]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace.c
tracing: Rename (un)register_ftrace_event() to (un)register_trace_event()
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
67012ab1 221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
e1e232ca
SR
227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
0daa2302
SRRH
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
de7edd31 245
cf8e3474 246unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
4fcdae83
SR
253/*
254 * The global_trace is the descriptor that holds the tracing
255 * buffers for the live tracing. For each CPU, it contains
256 * a link list of pages that will store trace entries. The
257 * page descriptor of the pages in the memory is used to hold
258 * the link list by linking the lru item in the page descriptor
259 * to each of the pages in the buffer per CPU.
260 *
261 * For each active CPU there is a data field that holds the
262 * pages for the buffer for that CPU. Each CPU has the same number
263 * of pages allocated for its buffer.
264 */
bc0c38d1
SR
265static struct trace_array global_trace;
266
ae63b31e 267LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 268
ff451961
SRRH
269int trace_array_get(struct trace_array *this_tr)
270{
271 struct trace_array *tr;
272 int ret = -ENODEV;
273
274 mutex_lock(&trace_types_lock);
275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 if (tr == this_tr) {
277 tr->ref++;
278 ret = 0;
279 break;
280 }
281 }
282 mutex_unlock(&trace_types_lock);
283
284 return ret;
285}
286
287static void __trace_array_put(struct trace_array *this_tr)
288{
289 WARN_ON(!this_tr->ref);
290 this_tr->ref--;
291}
292
293void trace_array_put(struct trace_array *this_tr)
294{
295 mutex_lock(&trace_types_lock);
296 __trace_array_put(this_tr);
297 mutex_unlock(&trace_types_lock);
298}
299
f306cc82
TZ
300int filter_check_discard(struct ftrace_event_file *file, void *rec,
301 struct ring_buffer *buffer,
302 struct ring_buffer_event *event)
eb02ce01 303{
f306cc82
TZ
304 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
305 !filter_match_preds(file->filter, rec)) {
306 ring_buffer_discard_commit(buffer, event);
307 return 1;
308 }
309
310 return 0;
311}
312EXPORT_SYMBOL_GPL(filter_check_discard);
313
314int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
315 struct ring_buffer *buffer,
316 struct ring_buffer_event *event)
317{
318 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 !filter_match_preds(call->filter, rec)) {
320 ring_buffer_discard_commit(buffer, event);
321 return 1;
322 }
323
324 return 0;
eb02ce01 325}
f306cc82 326EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 327
ad1438a0 328static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
329{
330 u64 ts;
331
332 /* Early boot up does not have a buffer yet */
9457158b 333 if (!buf->buffer)
37886f6a
SR
334 return trace_clock_local();
335
9457158b
AL
336 ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
338
339 return ts;
340}
bc0c38d1 341
9457158b
AL
342cycle_t ftrace_now(int cpu)
343{
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345}
346
10246fa3
SRRH
347/**
348 * tracing_is_enabled - Show if global_trace has been disabled
349 *
350 * Shows if the global trace has been enabled or not. It uses the
351 * mirror flag "buffer_disabled" to be used in fast paths such as for
352 * the irqsoff tracer. But it may be inaccurate due to races. If you
353 * need to know the accurate state, use tracing_is_on() which is a little
354 * slower, but accurate.
355 */
9036990d
SR
356int tracing_is_enabled(void)
357{
10246fa3
SRRH
358 /*
359 * For quick access (irqsoff uses this in fast path), just
360 * return the mirror variable of the state of the ring buffer.
361 * It's a little racy, but we don't really care.
362 */
363 smp_rmb();
364 return !global_trace.buffer_disabled;
9036990d
SR
365}
366
4fcdae83 367/*
3928a8a2
SR
368 * trace_buf_size is the size in bytes that is allocated
369 * for a buffer. Note, the number of bytes is always rounded
370 * to page size.
3f5a54e3
SR
371 *
372 * This number is purposely set to a low number of 16384.
373 * If the dump on oops happens, it will be much appreciated
374 * to not have to wait for all that output. Anyway this can be
375 * boot time and run time configurable.
4fcdae83 376 */
3928a8a2 377#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 378
3928a8a2 379static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 380
4fcdae83 381/* trace_types holds a link list of available tracers. */
bc0c38d1 382static struct tracer *trace_types __read_mostly;
4fcdae83 383
4fcdae83
SR
384/*
385 * trace_types_lock is used to protect the trace_types list.
4fcdae83 386 */
a8227415 387DEFINE_MUTEX(trace_types_lock);
4fcdae83 388
7e53bd42
LJ
389/*
390 * serialize the access of the ring buffer
391 *
392 * ring buffer serializes readers, but it is low level protection.
393 * The validity of the events (which returns by ring_buffer_peek() ..etc)
394 * are not protected by ring buffer.
395 *
396 * The content of events may become garbage if we allow other process consumes
397 * these events concurrently:
398 * A) the page of the consumed events may become a normal page
399 * (not reader page) in ring buffer, and this page will be rewrited
400 * by events producer.
401 * B) The page of the consumed events may become a page for splice_read,
402 * and this page will be returned to system.
403 *
404 * These primitives allow multi process access to different cpu ring buffer
405 * concurrently.
406 *
407 * These primitives don't distinguish read-only and read-consume access.
408 * Multi read-only access are also serialized.
409 */
410
411#ifdef CONFIG_SMP
412static DECLARE_RWSEM(all_cpu_access_lock);
413static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
ae3b5093 417 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
418 /* gain it for accessing the whole ring buffer. */
419 down_write(&all_cpu_access_lock);
420 } else {
421 /* gain it for accessing a cpu ring buffer. */
422
ae3b5093 423 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
424 down_read(&all_cpu_access_lock);
425
426 /* Secondly block other access to this @cpu ring buffer. */
427 mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 }
429}
430
431static inline void trace_access_unlock(int cpu)
432{
ae3b5093 433 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
434 up_write(&all_cpu_access_lock);
435 } else {
436 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 up_read(&all_cpu_access_lock);
438 }
439}
440
441static inline void trace_access_lock_init(void)
442{
443 int cpu;
444
445 for_each_possible_cpu(cpu)
446 mutex_init(&per_cpu(cpu_access_lock, cpu));
447}
448
449#else
450
451static DEFINE_MUTEX(access_lock);
452
453static inline void trace_access_lock(int cpu)
454{
455 (void)cpu;
456 mutex_lock(&access_lock);
457}
458
459static inline void trace_access_unlock(int cpu)
460{
461 (void)cpu;
462 mutex_unlock(&access_lock);
463}
464
465static inline void trace_access_lock_init(void)
466{
467}
468
469#endif
470
ee6bce52 471/* trace_flags holds trace_options default values */
12ef7d44 472unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 473 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 474 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 475 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
e7e2ee89 476
5280bcef 477static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
478{
479 if (tr->trace_buffer.buffer)
480 ring_buffer_record_on(tr->trace_buffer.buffer);
481 /*
482 * This flag is looked at when buffers haven't been allocated
483 * yet, or by some tracers (like irqsoff), that just want to
484 * know if the ring buffer has been disabled, but it can handle
485 * races of where it gets disabled but we still do a record.
486 * As the check is in the fast path of the tracers, it is more
487 * important to be fast than accurate.
488 */
489 tr->buffer_disabled = 0;
490 /* Make the flag seen by readers */
491 smp_wmb();
492}
493
499e5470
SR
494/**
495 * tracing_on - enable tracing buffers
496 *
497 * This function enables tracing buffers that may have been
498 * disabled with tracing_off.
499 */
500void tracing_on(void)
501{
10246fa3 502 tracer_tracing_on(&global_trace);
499e5470
SR
503}
504EXPORT_SYMBOL_GPL(tracing_on);
505
09ae7234
SRRH
506/**
507 * __trace_puts - write a constant string into the trace buffer.
508 * @ip: The address of the caller
509 * @str: The constant string to write
510 * @size: The size of the string.
511 */
512int __trace_puts(unsigned long ip, const char *str, int size)
513{
514 struct ring_buffer_event *event;
515 struct ring_buffer *buffer;
516 struct print_entry *entry;
517 unsigned long irq_flags;
518 int alloc;
8abfb872
J
519 int pc;
520
f0160a5a
J
521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
8abfb872 524 pc = preempt_count();
09ae7234 525
3132e107
SRRH
526 if (unlikely(tracing_selftest_running || tracing_disabled))
527 return 0;
528
09ae7234
SRRH
529 alloc = sizeof(*entry) + size + 2; /* possible \n added */
530
531 local_save_flags(irq_flags);
532 buffer = global_trace.trace_buffer.buffer;
533 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 534 irq_flags, pc);
09ae7234
SRRH
535 if (!event)
536 return 0;
537
538 entry = ring_buffer_event_data(event);
539 entry->ip = ip;
540
541 memcpy(&entry->buf, str, size);
542
543 /* Add a newline if necessary */
544 if (entry->buf[size - 1] != '\n') {
545 entry->buf[size] = '\n';
546 entry->buf[size + 1] = '\0';
547 } else
548 entry->buf[size] = '\0';
549
550 __buffer_unlock_commit(buffer, event);
8abfb872 551 ftrace_trace_stack(buffer, irq_flags, 4, pc);
09ae7234
SRRH
552
553 return size;
554}
555EXPORT_SYMBOL_GPL(__trace_puts);
556
557/**
558 * __trace_bputs - write the pointer to a constant string into trace buffer
559 * @ip: The address of the caller
560 * @str: The constant string to write to the buffer to
561 */
562int __trace_bputs(unsigned long ip, const char *str)
563{
564 struct ring_buffer_event *event;
565 struct ring_buffer *buffer;
566 struct bputs_entry *entry;
567 unsigned long irq_flags;
568 int size = sizeof(struct bputs_entry);
8abfb872
J
569 int pc;
570
f0160a5a
J
571 if (!(trace_flags & TRACE_ITER_PRINTK))
572 return 0;
573
8abfb872 574 pc = preempt_count();
09ae7234 575
3132e107
SRRH
576 if (unlikely(tracing_selftest_running || tracing_disabled))
577 return 0;
578
09ae7234
SRRH
579 local_save_flags(irq_flags);
580 buffer = global_trace.trace_buffer.buffer;
581 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 582 irq_flags, pc);
09ae7234
SRRH
583 if (!event)
584 return 0;
585
586 entry = ring_buffer_event_data(event);
587 entry->ip = ip;
588 entry->str = str;
589
590 __buffer_unlock_commit(buffer, event);
8abfb872 591 ftrace_trace_stack(buffer, irq_flags, 4, pc);
09ae7234
SRRH
592
593 return 1;
594}
595EXPORT_SYMBOL_GPL(__trace_bputs);
596
ad909e21
SRRH
597#ifdef CONFIG_TRACER_SNAPSHOT
598/**
599 * trace_snapshot - take a snapshot of the current buffer.
600 *
601 * This causes a swap between the snapshot buffer and the current live
602 * tracing buffer. You can use this to take snapshots of the live
603 * trace when some condition is triggered, but continue to trace.
604 *
605 * Note, make sure to allocate the snapshot with either
606 * a tracing_snapshot_alloc(), or by doing it manually
607 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
608 *
609 * If the snapshot buffer is not allocated, it will stop tracing.
610 * Basically making a permanent snapshot.
611 */
612void tracing_snapshot(void)
613{
614 struct trace_array *tr = &global_trace;
615 struct tracer *tracer = tr->current_trace;
616 unsigned long flags;
617
1b22e382
SRRH
618 if (in_nmi()) {
619 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
620 internal_trace_puts("*** snapshot is being ignored ***\n");
621 return;
622 }
623
ad909e21 624 if (!tr->allocated_snapshot) {
ca268da6
SRRH
625 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
626 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
627 tracing_off();
628 return;
629 }
630
631 /* Note, snapshot can not be used when the tracer uses it */
632 if (tracer->use_max_tr) {
ca268da6
SRRH
633 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
634 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
635 return;
636 }
637
638 local_irq_save(flags);
639 update_max_tr(tr, current, smp_processor_id());
640 local_irq_restore(flags);
641}
1b22e382 642EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
643
644static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
645 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
646static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
647
648static int alloc_snapshot(struct trace_array *tr)
649{
650 int ret;
651
652 if (!tr->allocated_snapshot) {
653
654 /* allocate spare buffer */
655 ret = resize_buffer_duplicate_size(&tr->max_buffer,
656 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
657 if (ret < 0)
658 return ret;
659
660 tr->allocated_snapshot = true;
661 }
662
663 return 0;
664}
665
ad1438a0 666static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
667{
668 /*
669 * We don't free the ring buffer. instead, resize it because
670 * The max_tr ring buffer has some state (e.g. ring->clock) and
671 * we want preserve it.
672 */
673 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
674 set_buffer_entries(&tr->max_buffer, 1);
675 tracing_reset_online_cpus(&tr->max_buffer);
676 tr->allocated_snapshot = false;
677}
ad909e21 678
93e31ffb
TZ
679/**
680 * tracing_alloc_snapshot - allocate snapshot buffer.
681 *
682 * This only allocates the snapshot buffer if it isn't already
683 * allocated - it doesn't also take a snapshot.
684 *
685 * This is meant to be used in cases where the snapshot buffer needs
686 * to be set up for events that can't sleep but need to be able to
687 * trigger a snapshot.
688 */
689int tracing_alloc_snapshot(void)
690{
691 struct trace_array *tr = &global_trace;
692 int ret;
693
694 ret = alloc_snapshot(tr);
695 WARN_ON(ret < 0);
696
697 return ret;
698}
699EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
700
ad909e21
SRRH
701/**
702 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
703 *
704 * This is similar to trace_snapshot(), but it will allocate the
705 * snapshot buffer if it isn't already allocated. Use this only
706 * where it is safe to sleep, as the allocation may sleep.
707 *
708 * This causes a swap between the snapshot buffer and the current live
709 * tracing buffer. You can use this to take snapshots of the live
710 * trace when some condition is triggered, but continue to trace.
711 */
712void tracing_snapshot_alloc(void)
713{
ad909e21
SRRH
714 int ret;
715
93e31ffb
TZ
716 ret = tracing_alloc_snapshot();
717 if (ret < 0)
3209cff4 718 return;
ad909e21
SRRH
719
720 tracing_snapshot();
721}
1b22e382 722EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
723#else
724void tracing_snapshot(void)
725{
726 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
727}
1b22e382 728EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
729int tracing_alloc_snapshot(void)
730{
731 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
732 return -ENODEV;
733}
734EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
735void tracing_snapshot_alloc(void)
736{
737 /* Give warning */
738 tracing_snapshot();
739}
1b22e382 740EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
741#endif /* CONFIG_TRACER_SNAPSHOT */
742
5280bcef 743static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
744{
745 if (tr->trace_buffer.buffer)
746 ring_buffer_record_off(tr->trace_buffer.buffer);
747 /*
748 * This flag is looked at when buffers haven't been allocated
749 * yet, or by some tracers (like irqsoff), that just want to
750 * know if the ring buffer has been disabled, but it can handle
751 * races of where it gets disabled but we still do a record.
752 * As the check is in the fast path of the tracers, it is more
753 * important to be fast than accurate.
754 */
755 tr->buffer_disabled = 1;
756 /* Make the flag seen by readers */
757 smp_wmb();
758}
759
499e5470
SR
760/**
761 * tracing_off - turn off tracing buffers
762 *
763 * This function stops the tracing buffers from recording data.
764 * It does not disable any overhead the tracers themselves may
765 * be causing. This function simply causes all recording to
766 * the ring buffers to fail.
767 */
768void tracing_off(void)
769{
10246fa3 770 tracer_tracing_off(&global_trace);
499e5470
SR
771}
772EXPORT_SYMBOL_GPL(tracing_off);
773
de7edd31
SRRH
774void disable_trace_on_warning(void)
775{
776 if (__disable_trace_on_warning)
777 tracing_off();
778}
779
10246fa3
SRRH
780/**
781 * tracer_tracing_is_on - show real state of ring buffer enabled
782 * @tr : the trace array to know if ring buffer is enabled
783 *
784 * Shows real state of the ring buffer if it is enabled or not.
785 */
5280bcef 786static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
787{
788 if (tr->trace_buffer.buffer)
789 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
790 return !tr->buffer_disabled;
791}
792
499e5470
SR
793/**
794 * tracing_is_on - show state of ring buffers enabled
795 */
796int tracing_is_on(void)
797{
10246fa3 798 return tracer_tracing_is_on(&global_trace);
499e5470
SR
799}
800EXPORT_SYMBOL_GPL(tracing_is_on);
801
3928a8a2 802static int __init set_buf_size(char *str)
bc0c38d1 803{
3928a8a2 804 unsigned long buf_size;
c6caeeb1 805
bc0c38d1
SR
806 if (!str)
807 return 0;
9d612bef 808 buf_size = memparse(str, &str);
c6caeeb1 809 /* nr_entries can not be zero */
9d612bef 810 if (buf_size == 0)
c6caeeb1 811 return 0;
3928a8a2 812 trace_buf_size = buf_size;
bc0c38d1
SR
813 return 1;
814}
3928a8a2 815__setup("trace_buf_size=", set_buf_size);
bc0c38d1 816
0e950173
TB
817static int __init set_tracing_thresh(char *str)
818{
87abb3b1 819 unsigned long threshold;
0e950173
TB
820 int ret;
821
822 if (!str)
823 return 0;
bcd83ea6 824 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
825 if (ret < 0)
826 return 0;
87abb3b1 827 tracing_thresh = threshold * 1000;
0e950173
TB
828 return 1;
829}
830__setup("tracing_thresh=", set_tracing_thresh);
831
57f50be1
SR
832unsigned long nsecs_to_usecs(unsigned long nsecs)
833{
834 return nsecs / 1000;
835}
836
4fcdae83 837/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
838static const char *trace_options[] = {
839 "print-parent",
840 "sym-offset",
841 "sym-addr",
842 "verbose",
f9896bf3 843 "raw",
5e3ca0ec 844 "hex",
cb0f12aa 845 "bin",
2a2cc8f7 846 "block",
86387f7e 847 "stacktrace",
5e1607a0 848 "trace_printk",
b2a866f9 849 "ftrace_preempt",
9f029e83 850 "branch",
12ef7d44 851 "annotate",
02b67518 852 "userstacktrace",
b54d3de9 853 "sym-userobj",
66896a85 854 "printk-msg-only",
c4a8e8be 855 "context-info",
c032ef64 856 "latency-format",
be6f164a 857 "sleep-time",
a2a16d6a 858 "graph-time",
e870e9a1 859 "record-cmd",
750912fa 860 "overwrite",
cf30cf67 861 "disable_on_free",
77271ce4 862 "irq-info",
5224c3a3 863 "markers",
328df475 864 "function-trace",
bc0c38d1
SR
865 NULL
866};
867
5079f326
Z
868static struct {
869 u64 (*func)(void);
870 const char *name;
8be0709f 871 int in_ns; /* is this clock in nanoseconds? */
5079f326 872} trace_clocks[] = {
1b3e5c09
TG
873 { trace_clock_local, "local", 1 },
874 { trace_clock_global, "global", 1 },
875 { trace_clock_counter, "counter", 0 },
e7fda6c4 876 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
877 { trace_clock, "perf", 1 },
878 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 879 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 880 ARCH_TRACE_CLOCKS
5079f326
Z
881};
882
b63f39ea 883/*
884 * trace_parser_get_init - gets the buffer for trace parser
885 */
886int trace_parser_get_init(struct trace_parser *parser, int size)
887{
888 memset(parser, 0, sizeof(*parser));
889
890 parser->buffer = kmalloc(size, GFP_KERNEL);
891 if (!parser->buffer)
892 return 1;
893
894 parser->size = size;
895 return 0;
896}
897
898/*
899 * trace_parser_put - frees the buffer for trace parser
900 */
901void trace_parser_put(struct trace_parser *parser)
902{
903 kfree(parser->buffer);
904}
905
906/*
907 * trace_get_user - reads the user input string separated by space
908 * (matched by isspace(ch))
909 *
910 * For each string found the 'struct trace_parser' is updated,
911 * and the function returns.
912 *
913 * Returns number of bytes read.
914 *
915 * See kernel/trace/trace.h for 'struct trace_parser' details.
916 */
917int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
918 size_t cnt, loff_t *ppos)
919{
920 char ch;
921 size_t read = 0;
922 ssize_t ret;
923
924 if (!*ppos)
925 trace_parser_clear(parser);
926
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930
931 read++;
932 cnt--;
933
934 /*
935 * The parser is not finished with the last write,
936 * continue reading the user input without skipping spaces.
937 */
938 if (!parser->cont) {
939 /* skip white space */
940 while (cnt && isspace(ch)) {
941 ret = get_user(ch, ubuf++);
942 if (ret)
943 goto out;
944 read++;
945 cnt--;
946 }
947
948 /* only spaces were written */
949 if (isspace(ch)) {
950 *ppos += read;
951 ret = read;
952 goto out;
953 }
954
955 parser->idx = 0;
956 }
957
958 /* read the non-space input */
959 while (cnt && !isspace(ch)) {
3c235a33 960 if (parser->idx < parser->size - 1)
b63f39ea 961 parser->buffer[parser->idx++] = ch;
962 else {
963 ret = -EINVAL;
964 goto out;
965 }
966 ret = get_user(ch, ubuf++);
967 if (ret)
968 goto out;
969 read++;
970 cnt--;
971 }
972
973 /* We either got finished input or we have to wait for another call. */
974 if (isspace(ch)) {
975 parser->buffer[parser->idx] = 0;
976 parser->cont = false;
057db848 977 } else if (parser->idx < parser->size - 1) {
b63f39ea 978 parser->cont = true;
979 parser->buffer[parser->idx++] = ch;
057db848
SR
980 } else {
981 ret = -EINVAL;
982 goto out;
b63f39ea 983 }
984
985 *ppos += read;
986 ret = read;
987
988out:
989 return ret;
990}
991
3a161d99 992/* TODO add a seq_buf_to_buffer() */
b8b94265 993static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
994{
995 int len;
3c56819b 996
5ac48378 997 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
998 return -EBUSY;
999
5ac48378 1000 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1001 if (cnt > len)
1002 cnt = len;
3a161d99 1003 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1004
3a161d99 1005 s->seq.readpos += cnt;
3c56819b
EGM
1006 return cnt;
1007}
1008
0e950173
TB
1009unsigned long __read_mostly tracing_thresh;
1010
5d4a9dba 1011#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1012/*
1013 * Copy the new maximum trace into the separate maximum-trace
1014 * structure. (this way the maximum trace is permanently saved,
1015 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1016 */
1017static void
1018__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
12883efb
SRRH
1020 struct trace_buffer *trace_buf = &tr->trace_buffer;
1021 struct trace_buffer *max_buf = &tr->max_buffer;
1022 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1023 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1024
12883efb
SRRH
1025 max_buf->cpu = cpu;
1026 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1027
6d9b3fa5 1028 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1029 max_data->critical_start = data->critical_start;
1030 max_data->critical_end = data->critical_end;
5d4a9dba 1031
1acaa1b2 1032 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1033 max_data->pid = tsk->pid;
f17a5194
SRRH
1034 /*
1035 * If tsk == current, then use current_uid(), as that does not use
1036 * RCU. The irq tracer can be called out of RCU scope.
1037 */
1038 if (tsk == current)
1039 max_data->uid = current_uid();
1040 else
1041 max_data->uid = task_uid(tsk);
1042
8248ac05
SR
1043 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1044 max_data->policy = tsk->policy;
1045 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1046
1047 /* record this tasks comm */
1048 tracing_record_cmdline(tsk);
1049}
1050
4fcdae83
SR
1051/**
1052 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1053 * @tr: tracer
1054 * @tsk: the task with the latency
1055 * @cpu: The cpu that initiated the trace.
1056 *
1057 * Flip the buffers between the @tr and the max_tr and record information
1058 * about which task was the cause of this latency.
1059 */
e309b41d 1060void
bc0c38d1
SR
1061update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1062{
2721e72d 1063 struct ring_buffer *buf;
bc0c38d1 1064
2b6080f2 1065 if (tr->stop_count)
b8de7bd1
SR
1066 return;
1067
4c11d7ae 1068 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1069
45ad21ca 1070 if (!tr->allocated_snapshot) {
debdd57f 1071 /* Only the nop tracer should hit this when disabling */
2b6080f2 1072 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1073 return;
debdd57f 1074 }
34600f0e 1075
0b9b12c1 1076 arch_spin_lock(&tr->max_lock);
3928a8a2 1077
12883efb
SRRH
1078 buf = tr->trace_buffer.buffer;
1079 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1080 tr->max_buffer.buffer = buf;
3928a8a2 1081
bc0c38d1 1082 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1083 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1084}
1085
1086/**
1087 * update_max_tr_single - only copy one trace over, and reset the rest
1088 * @tr - tracer
1089 * @tsk - task with the latency
1090 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1091 *
1092 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1093 */
e309b41d 1094void
bc0c38d1
SR
1095update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1096{
3928a8a2 1097 int ret;
bc0c38d1 1098
2b6080f2 1099 if (tr->stop_count)
b8de7bd1
SR
1100 return;
1101
4c11d7ae 1102 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1103 if (!tr->allocated_snapshot) {
2930e04d 1104 /* Only the nop tracer should hit this when disabling */
9e8529af 1105 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1106 return;
2930e04d 1107 }
ef710e10 1108
0b9b12c1 1109 arch_spin_lock(&tr->max_lock);
bc0c38d1 1110
12883efb 1111 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1112
e8165dbb
SR
1113 if (ret == -EBUSY) {
1114 /*
1115 * We failed to swap the buffer due to a commit taking
1116 * place on this CPU. We fail to record, but we reset
1117 * the max trace buffer (no one writes directly to it)
1118 * and flag that it failed.
1119 */
12883efb 1120 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1121 "Failed to swap buffers due to commit in progress\n");
1122 }
1123
e8165dbb 1124 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1125
1126 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1127 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1128}
5d4a9dba 1129#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1130
e30f53aa 1131static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1132{
15693458
SRRH
1133 /* Iterators are static, they should be filled or empty */
1134 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1135 return 0;
0d5c6e1c 1136
e30f53aa
RV
1137 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1138 full);
0d5c6e1c
SR
1139}
1140
f4e781c0
SRRH
1141#ifdef CONFIG_FTRACE_STARTUP_TEST
1142static int run_tracer_selftest(struct tracer *type)
1143{
1144 struct trace_array *tr = &global_trace;
1145 struct tracer *saved_tracer = tr->current_trace;
1146 int ret;
0d5c6e1c 1147
f4e781c0
SRRH
1148 if (!type->selftest || tracing_selftest_disabled)
1149 return 0;
0d5c6e1c
SR
1150
1151 /*
f4e781c0
SRRH
1152 * Run a selftest on this tracer.
1153 * Here we reset the trace buffer, and set the current
1154 * tracer to be this tracer. The tracer can then run some
1155 * internal tracing to verify that everything is in order.
1156 * If we fail, we do not register this tracer.
0d5c6e1c 1157 */
f4e781c0 1158 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1159
f4e781c0
SRRH
1160 tr->current_trace = type;
1161
1162#ifdef CONFIG_TRACER_MAX_TRACE
1163 if (type->use_max_tr) {
1164 /* If we expanded the buffers, make sure the max is expanded too */
1165 if (ring_buffer_expanded)
1166 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1167 RING_BUFFER_ALL_CPUS);
1168 tr->allocated_snapshot = true;
1169 }
1170#endif
1171
1172 /* the test is responsible for initializing and enabling */
1173 pr_info("Testing tracer %s: ", type->name);
1174 ret = type->selftest(type, tr);
1175 /* the test is responsible for resetting too */
1176 tr->current_trace = saved_tracer;
1177 if (ret) {
1178 printk(KERN_CONT "FAILED!\n");
1179 /* Add the warning after printing 'FAILED' */
1180 WARN_ON(1);
1181 return -1;
1182 }
1183 /* Only reset on passing, to avoid touching corrupted buffers */
1184 tracing_reset_online_cpus(&tr->trace_buffer);
1185
1186#ifdef CONFIG_TRACER_MAX_TRACE
1187 if (type->use_max_tr) {
1188 tr->allocated_snapshot = false;
0d5c6e1c 1189
f4e781c0
SRRH
1190 /* Shrink the max buffer again */
1191 if (ring_buffer_expanded)
1192 ring_buffer_resize(tr->max_buffer.buffer, 1,
1193 RING_BUFFER_ALL_CPUS);
1194 }
1195#endif
1196
1197 printk(KERN_CONT "PASSED\n");
1198 return 0;
1199}
1200#else
1201static inline int run_tracer_selftest(struct tracer *type)
1202{
1203 return 0;
0d5c6e1c 1204}
f4e781c0 1205#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1206
4fcdae83
SR
1207/**
1208 * register_tracer - register a tracer with the ftrace system.
1209 * @type - the plugin for the tracer
1210 *
1211 * Register a new plugin tracer.
1212 */
bc0c38d1
SR
1213int register_tracer(struct tracer *type)
1214{
1215 struct tracer *t;
bc0c38d1
SR
1216 int ret = 0;
1217
1218 if (!type->name) {
1219 pr_info("Tracer must have a name\n");
1220 return -1;
1221 }
1222
24a461d5 1223 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1224 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1225 return -1;
1226 }
1227
bc0c38d1 1228 mutex_lock(&trace_types_lock);
86fa2f60 1229
8e1b82e0
FW
1230 tracing_selftest_running = true;
1231
bc0c38d1
SR
1232 for (t = trace_types; t; t = t->next) {
1233 if (strcmp(type->name, t->name) == 0) {
1234 /* already found */
ee6c2c1b 1235 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1236 type->name);
1237 ret = -1;
1238 goto out;
1239 }
1240 }
1241
adf9f195
FW
1242 if (!type->set_flag)
1243 type->set_flag = &dummy_set_flag;
1244 if (!type->flags)
1245 type->flags = &dummy_tracer_flags;
1246 else
1247 if (!type->flags->opts)
1248 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1249
f4e781c0
SRRH
1250 ret = run_tracer_selftest(type);
1251 if (ret < 0)
1252 goto out;
60a11774 1253
bc0c38d1
SR
1254 type->next = trace_types;
1255 trace_types = type;
60a11774 1256
bc0c38d1 1257 out:
8e1b82e0 1258 tracing_selftest_running = false;
bc0c38d1
SR
1259 mutex_unlock(&trace_types_lock);
1260
dac74940
SR
1261 if (ret || !default_bootup_tracer)
1262 goto out_unlock;
1263
ee6c2c1b 1264 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1265 goto out_unlock;
1266
1267 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1268 /* Do we want this tracer to start on bootup? */
607e2ea1 1269 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1270 default_bootup_tracer = NULL;
1271 /* disable other selftests, since this will break it. */
55034cd6 1272 tracing_selftest_disabled = true;
b2821ae6 1273#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1274 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1275 type->name);
b2821ae6 1276#endif
b2821ae6 1277
dac74940 1278 out_unlock:
bc0c38d1
SR
1279 return ret;
1280}
1281
12883efb 1282void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1283{
12883efb 1284 struct ring_buffer *buffer = buf->buffer;
f633903a 1285
a5416411
HT
1286 if (!buffer)
1287 return;
1288
f633903a
SR
1289 ring_buffer_record_disable(buffer);
1290
1291 /* Make sure all commits have finished */
1292 synchronize_sched();
68179686 1293 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1294
1295 ring_buffer_record_enable(buffer);
1296}
1297
12883efb 1298void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1299{
12883efb 1300 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1301 int cpu;
1302
a5416411
HT
1303 if (!buffer)
1304 return;
1305
621968cd
SR
1306 ring_buffer_record_disable(buffer);
1307
1308 /* Make sure all commits have finished */
1309 synchronize_sched();
1310
9457158b 1311 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1312
1313 for_each_online_cpu(cpu)
68179686 1314 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1315
1316 ring_buffer_record_enable(buffer);
213cc060
PE
1317}
1318
09d8091c 1319/* Must have trace_types_lock held */
873c642f 1320void tracing_reset_all_online_cpus(void)
9456f0fa 1321{
873c642f
SRRH
1322 struct trace_array *tr;
1323
873c642f 1324 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1325 tracing_reset_online_cpus(&tr->trace_buffer);
1326#ifdef CONFIG_TRACER_MAX_TRACE
1327 tracing_reset_online_cpus(&tr->max_buffer);
1328#endif
873c642f 1329 }
9456f0fa
SR
1330}
1331
939c7a4f 1332#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1333#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1334static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1335struct saved_cmdlines_buffer {
1336 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1337 unsigned *map_cmdline_to_pid;
1338 unsigned cmdline_num;
1339 int cmdline_idx;
1340 char *saved_cmdlines;
1341};
1342static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1343
25b0b44a 1344/* temporary disable recording */
4fd27358 1345static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1346
939c7a4f
YY
1347static inline char *get_saved_cmdlines(int idx)
1348{
1349 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1350}
1351
1352static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1353{
939c7a4f
YY
1354 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1355}
1356
1357static int allocate_cmdlines_buffer(unsigned int val,
1358 struct saved_cmdlines_buffer *s)
1359{
1360 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1361 GFP_KERNEL);
1362 if (!s->map_cmdline_to_pid)
1363 return -ENOMEM;
1364
1365 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1366 if (!s->saved_cmdlines) {
1367 kfree(s->map_cmdline_to_pid);
1368 return -ENOMEM;
1369 }
1370
1371 s->cmdline_idx = 0;
1372 s->cmdline_num = val;
1373 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1374 sizeof(s->map_pid_to_cmdline));
1375 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1376 val * sizeof(*s->map_cmdline_to_pid));
1377
1378 return 0;
1379}
1380
1381static int trace_create_savedcmd(void)
1382{
1383 int ret;
1384
a6af8fbf 1385 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1386 if (!savedcmd)
1387 return -ENOMEM;
1388
1389 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1390 if (ret < 0) {
1391 kfree(savedcmd);
1392 savedcmd = NULL;
1393 return -ENOMEM;
1394 }
1395
1396 return 0;
bc0c38d1
SR
1397}
1398
b5130b1e
CE
1399int is_tracing_stopped(void)
1400{
2b6080f2 1401 return global_trace.stop_count;
b5130b1e
CE
1402}
1403
0f048701
SR
1404/**
1405 * tracing_start - quick start of the tracer
1406 *
1407 * If tracing is enabled but was stopped by tracing_stop,
1408 * this will start the tracer back up.
1409 */
1410void tracing_start(void)
1411{
1412 struct ring_buffer *buffer;
1413 unsigned long flags;
1414
1415 if (tracing_disabled)
1416 return;
1417
2b6080f2
SR
1418 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1419 if (--global_trace.stop_count) {
1420 if (global_trace.stop_count < 0) {
b06a8301
SR
1421 /* Someone screwed up their debugging */
1422 WARN_ON_ONCE(1);
2b6080f2 1423 global_trace.stop_count = 0;
b06a8301 1424 }
0f048701
SR
1425 goto out;
1426 }
1427
a2f80714 1428 /* Prevent the buffers from switching */
0b9b12c1 1429 arch_spin_lock(&global_trace.max_lock);
0f048701 1430
12883efb 1431 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1432 if (buffer)
1433 ring_buffer_record_enable(buffer);
1434
12883efb
SRRH
1435#ifdef CONFIG_TRACER_MAX_TRACE
1436 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1437 if (buffer)
1438 ring_buffer_record_enable(buffer);
12883efb 1439#endif
0f048701 1440
0b9b12c1 1441 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1442
0f048701 1443 out:
2b6080f2
SR
1444 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1445}
1446
1447static void tracing_start_tr(struct trace_array *tr)
1448{
1449 struct ring_buffer *buffer;
1450 unsigned long flags;
1451
1452 if (tracing_disabled)
1453 return;
1454
1455 /* If global, we need to also start the max tracer */
1456 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1457 return tracing_start();
1458
1459 raw_spin_lock_irqsave(&tr->start_lock, flags);
1460
1461 if (--tr->stop_count) {
1462 if (tr->stop_count < 0) {
1463 /* Someone screwed up their debugging */
1464 WARN_ON_ONCE(1);
1465 tr->stop_count = 0;
1466 }
1467 goto out;
1468 }
1469
12883efb 1470 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1471 if (buffer)
1472 ring_buffer_record_enable(buffer);
1473
1474 out:
1475 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1476}
1477
1478/**
1479 * tracing_stop - quick stop of the tracer
1480 *
1481 * Light weight way to stop tracing. Use in conjunction with
1482 * tracing_start.
1483 */
1484void tracing_stop(void)
1485{
1486 struct ring_buffer *buffer;
1487 unsigned long flags;
1488
2b6080f2
SR
1489 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1490 if (global_trace.stop_count++)
0f048701
SR
1491 goto out;
1492
a2f80714 1493 /* Prevent the buffers from switching */
0b9b12c1 1494 arch_spin_lock(&global_trace.max_lock);
a2f80714 1495
12883efb 1496 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1497 if (buffer)
1498 ring_buffer_record_disable(buffer);
1499
12883efb
SRRH
1500#ifdef CONFIG_TRACER_MAX_TRACE
1501 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1502 if (buffer)
1503 ring_buffer_record_disable(buffer);
12883efb 1504#endif
0f048701 1505
0b9b12c1 1506 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1507
0f048701 1508 out:
2b6080f2
SR
1509 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1510}
1511
1512static void tracing_stop_tr(struct trace_array *tr)
1513{
1514 struct ring_buffer *buffer;
1515 unsigned long flags;
1516
1517 /* If global, we need to also stop the max tracer */
1518 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1519 return tracing_stop();
1520
1521 raw_spin_lock_irqsave(&tr->start_lock, flags);
1522 if (tr->stop_count++)
1523 goto out;
1524
12883efb 1525 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1526 if (buffer)
1527 ring_buffer_record_disable(buffer);
1528
1529 out:
1530 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1531}
1532
e309b41d 1533void trace_stop_cmdline_recording(void);
bc0c38d1 1534
379cfdac 1535static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1536{
a635cf04 1537 unsigned pid, idx;
bc0c38d1
SR
1538
1539 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1540 return 0;
bc0c38d1
SR
1541
1542 /*
1543 * It's not the end of the world if we don't get
1544 * the lock, but we also don't want to spin
1545 * nor do we want to disable interrupts,
1546 * so if we miss here, then better luck next time.
1547 */
0199c4e6 1548 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1549 return 0;
bc0c38d1 1550
939c7a4f 1551 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1552 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1553 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1554
a635cf04
CE
1555 /*
1556 * Check whether the cmdline buffer at idx has a pid
1557 * mapped. We are going to overwrite that entry so we
1558 * need to clear the map_pid_to_cmdline. Otherwise we
1559 * would read the new comm for the old pid.
1560 */
939c7a4f 1561 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1562 if (pid != NO_CMDLINE_MAP)
939c7a4f 1563 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1564
939c7a4f
YY
1565 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1566 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1567
939c7a4f 1568 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1569 }
1570
939c7a4f 1571 set_cmdline(idx, tsk->comm);
bc0c38d1 1572
0199c4e6 1573 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1574
1575 return 1;
bc0c38d1
SR
1576}
1577
4c27e756 1578static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1579{
bc0c38d1
SR
1580 unsigned map;
1581
4ca53085
SR
1582 if (!pid) {
1583 strcpy(comm, "<idle>");
1584 return;
1585 }
bc0c38d1 1586
74bf4076
SR
1587 if (WARN_ON_ONCE(pid < 0)) {
1588 strcpy(comm, "<XXX>");
1589 return;
1590 }
1591
4ca53085
SR
1592 if (pid > PID_MAX_DEFAULT) {
1593 strcpy(comm, "<...>");
1594 return;
1595 }
bc0c38d1 1596
939c7a4f 1597 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1598 if (map != NO_CMDLINE_MAP)
939c7a4f 1599 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1600 else
1601 strcpy(comm, "<...>");
4c27e756
SRRH
1602}
1603
1604void trace_find_cmdline(int pid, char comm[])
1605{
1606 preempt_disable();
1607 arch_spin_lock(&trace_cmdline_lock);
1608
1609 __trace_find_cmdline(pid, comm);
bc0c38d1 1610
0199c4e6 1611 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1612 preempt_enable();
bc0c38d1
SR
1613}
1614
e309b41d 1615void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1616{
0fb9656d 1617 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1618 return;
1619
7ffbd48d
SR
1620 if (!__this_cpu_read(trace_cmdline_save))
1621 return;
1622
379cfdac
SRRH
1623 if (trace_save_cmdline(tsk))
1624 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1625}
1626
45dcd8b8 1627void
38697053
SR
1628tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1629 int pc)
bc0c38d1
SR
1630{
1631 struct task_struct *tsk = current;
bc0c38d1 1632
777e208d
SR
1633 entry->preempt_count = pc & 0xff;
1634 entry->pid = (tsk) ? tsk->pid : 0;
1635 entry->flags =
9244489a 1636#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1637 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1638#else
1639 TRACE_FLAG_IRQS_NOSUPPORT |
1640#endif
bc0c38d1
SR
1641 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1642 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1643 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1644 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1645}
f413cdb8 1646EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1647
e77405ad
SR
1648struct ring_buffer_event *
1649trace_buffer_lock_reserve(struct ring_buffer *buffer,
1650 int type,
1651 unsigned long len,
1652 unsigned long flags, int pc)
51a763dd
ACM
1653{
1654 struct ring_buffer_event *event;
1655
e77405ad 1656 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1657 if (event != NULL) {
1658 struct trace_entry *ent = ring_buffer_event_data(event);
1659
1660 tracing_generic_entry_update(ent, flags, pc);
1661 ent->type = type;
1662 }
1663
1664 return event;
1665}
51a763dd 1666
7ffbd48d
SR
1667void
1668__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1669{
1670 __this_cpu_write(trace_cmdline_save, true);
1671 ring_buffer_unlock_commit(buffer, event);
1672}
1673
e77405ad
SR
1674static inline void
1675__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1676 struct ring_buffer_event *event,
0d5c6e1c 1677 unsigned long flags, int pc)
51a763dd 1678{
7ffbd48d 1679 __buffer_unlock_commit(buffer, event);
51a763dd 1680
e77405ad
SR
1681 ftrace_trace_stack(buffer, flags, 6, pc);
1682 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1683}
1684
e77405ad
SR
1685void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1686 struct ring_buffer_event *event,
1687 unsigned long flags, int pc)
07edf712 1688{
0d5c6e1c 1689 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1690}
0d5c6e1c 1691EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1692
2c4a33ab
SRRH
1693static struct ring_buffer *temp_buffer;
1694
ccb469a1
SR
1695struct ring_buffer_event *
1696trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1697 struct ftrace_event_file *ftrace_file,
1698 int type, unsigned long len,
1699 unsigned long flags, int pc)
1700{
2c4a33ab
SRRH
1701 struct ring_buffer_event *entry;
1702
12883efb 1703 *current_rb = ftrace_file->tr->trace_buffer.buffer;
2c4a33ab 1704 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1705 type, len, flags, pc);
2c4a33ab
SRRH
1706 /*
1707 * If tracing is off, but we have triggers enabled
1708 * we still need to look at the event data. Use the temp_buffer
1709 * to store the trace event for the tigger to use. It's recusive
1710 * safe and will not be recorded anywhere.
1711 */
1712 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1713 *current_rb = temp_buffer;
1714 entry = trace_buffer_lock_reserve(*current_rb,
1715 type, len, flags, pc);
1716 }
1717 return entry;
ccb469a1
SR
1718}
1719EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1720
ef5580d0 1721struct ring_buffer_event *
e77405ad
SR
1722trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1723 int type, unsigned long len,
ef5580d0
SR
1724 unsigned long flags, int pc)
1725{
12883efb 1726 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1727 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1728 type, len, flags, pc);
1729}
94487d6d 1730EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1731
e77405ad
SR
1732void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1733 struct ring_buffer_event *event,
ef5580d0
SR
1734 unsigned long flags, int pc)
1735{
0d5c6e1c 1736 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1737}
94487d6d 1738EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1739
0d5c6e1c
SR
1740void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1741 struct ring_buffer_event *event,
1742 unsigned long flags, int pc,
1743 struct pt_regs *regs)
1fd8df2c 1744{
7ffbd48d 1745 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1746
1747 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1748 ftrace_trace_userstack(buffer, flags, pc);
1749}
0d5c6e1c 1750EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1751
e77405ad
SR
1752void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1753 struct ring_buffer_event *event)
77d9f465 1754{
e77405ad 1755 ring_buffer_discard_commit(buffer, event);
ef5580d0 1756}
12acd473 1757EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1758
e309b41d 1759void
7be42151 1760trace_function(struct trace_array *tr,
38697053
SR
1761 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1762 int pc)
bc0c38d1 1763{
e1112b4d 1764 struct ftrace_event_call *call = &event_function;
12883efb 1765 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1766 struct ring_buffer_event *event;
777e208d 1767 struct ftrace_entry *entry;
bc0c38d1 1768
d769041f 1769 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1770 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1771 return;
1772
e77405ad 1773 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1774 flags, pc);
3928a8a2
SR
1775 if (!event)
1776 return;
1777 entry = ring_buffer_event_data(event);
777e208d
SR
1778 entry->ip = ip;
1779 entry->parent_ip = parent_ip;
e1112b4d 1780
f306cc82 1781 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1782 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1783}
1784
c0a0d0d3 1785#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1786
1787#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1788struct ftrace_stack {
1789 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1790};
1791
1792static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1793static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1794
e77405ad 1795static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1796 unsigned long flags,
1fd8df2c 1797 int skip, int pc, struct pt_regs *regs)
86387f7e 1798{
e1112b4d 1799 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1800 struct ring_buffer_event *event;
777e208d 1801 struct stack_entry *entry;
86387f7e 1802 struct stack_trace trace;
4a9bd3f1
SR
1803 int use_stack;
1804 int size = FTRACE_STACK_ENTRIES;
1805
1806 trace.nr_entries = 0;
1807 trace.skip = skip;
1808
1809 /*
1810 * Since events can happen in NMIs there's no safe way to
1811 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1812 * or NMI comes in, it will just have to use the default
1813 * FTRACE_STACK_SIZE.
1814 */
1815 preempt_disable_notrace();
1816
82146529 1817 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1818 /*
1819 * We don't need any atomic variables, just a barrier.
1820 * If an interrupt comes in, we don't care, because it would
1821 * have exited and put the counter back to what we want.
1822 * We just need a barrier to keep gcc from moving things
1823 * around.
1824 */
1825 barrier();
1826 if (use_stack == 1) {
bdffd893 1827 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1828 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1829
1830 if (regs)
1831 save_stack_trace_regs(regs, &trace);
1832 else
1833 save_stack_trace(&trace);
1834
1835 if (trace.nr_entries > size)
1836 size = trace.nr_entries;
1837 } else
1838 /* From now on, use_stack is a boolean */
1839 use_stack = 0;
1840
1841 size *= sizeof(unsigned long);
86387f7e 1842
e77405ad 1843 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1844 sizeof(*entry) + size, flags, pc);
3928a8a2 1845 if (!event)
4a9bd3f1
SR
1846 goto out;
1847 entry = ring_buffer_event_data(event);
86387f7e 1848
4a9bd3f1
SR
1849 memset(&entry->caller, 0, size);
1850
1851 if (use_stack)
1852 memcpy(&entry->caller, trace.entries,
1853 trace.nr_entries * sizeof(unsigned long));
1854 else {
1855 trace.max_entries = FTRACE_STACK_ENTRIES;
1856 trace.entries = entry->caller;
1857 if (regs)
1858 save_stack_trace_regs(regs, &trace);
1859 else
1860 save_stack_trace(&trace);
1861 }
1862
1863 entry->size = trace.nr_entries;
86387f7e 1864
f306cc82 1865 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1866 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1867
1868 out:
1869 /* Again, don't let gcc optimize things here */
1870 barrier();
82146529 1871 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1872 preempt_enable_notrace();
1873
f0a920d5
IM
1874}
1875
1fd8df2c
MH
1876void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1877 int skip, int pc, struct pt_regs *regs)
1878{
1879 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1880 return;
1881
1882 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1883}
1884
e77405ad
SR
1885void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1886 int skip, int pc)
53614991
SR
1887{
1888 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1889 return;
1890
1fd8df2c 1891 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1892}
1893
c0a0d0d3
FW
1894void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1895 int pc)
38697053 1896{
12883efb 1897 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1898}
1899
03889384
SR
1900/**
1901 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1902 * @skip: Number of functions to skip (helper handlers)
03889384 1903 */
c142be8e 1904void trace_dump_stack(int skip)
03889384
SR
1905{
1906 unsigned long flags;
1907
1908 if (tracing_disabled || tracing_selftest_running)
e36c5458 1909 return;
03889384
SR
1910
1911 local_save_flags(flags);
1912
c142be8e
SRRH
1913 /*
1914 * Skip 3 more, seems to get us at the caller of
1915 * this function.
1916 */
1917 skip += 3;
1918 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1919 flags, skip, preempt_count(), NULL);
03889384
SR
1920}
1921
91e86e56
SR
1922static DEFINE_PER_CPU(int, user_stack_count);
1923
e77405ad
SR
1924void
1925ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1926{
e1112b4d 1927 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1928 struct ring_buffer_event *event;
02b67518
TE
1929 struct userstack_entry *entry;
1930 struct stack_trace trace;
02b67518
TE
1931
1932 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1933 return;
1934
b6345879
SR
1935 /*
1936 * NMIs can not handle page faults, even with fix ups.
1937 * The save user stack can (and often does) fault.
1938 */
1939 if (unlikely(in_nmi()))
1940 return;
02b67518 1941
91e86e56
SR
1942 /*
1943 * prevent recursion, since the user stack tracing may
1944 * trigger other kernel events.
1945 */
1946 preempt_disable();
1947 if (__this_cpu_read(user_stack_count))
1948 goto out;
1949
1950 __this_cpu_inc(user_stack_count);
1951
e77405ad 1952 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1953 sizeof(*entry), flags, pc);
02b67518 1954 if (!event)
1dbd1951 1955 goto out_drop_count;
02b67518 1956 entry = ring_buffer_event_data(event);
02b67518 1957
48659d31 1958 entry->tgid = current->tgid;
02b67518
TE
1959 memset(&entry->caller, 0, sizeof(entry->caller));
1960
1961 trace.nr_entries = 0;
1962 trace.max_entries = FTRACE_STACK_ENTRIES;
1963 trace.skip = 0;
1964 trace.entries = entry->caller;
1965
1966 save_stack_trace_user(&trace);
f306cc82 1967 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1968 __buffer_unlock_commit(buffer, event);
91e86e56 1969
1dbd1951 1970 out_drop_count:
91e86e56 1971 __this_cpu_dec(user_stack_count);
91e86e56
SR
1972 out:
1973 preempt_enable();
02b67518
TE
1974}
1975
4fd27358
HE
1976#ifdef UNUSED
1977static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1978{
7be42151 1979 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1980}
4fd27358 1981#endif /* UNUSED */
02b67518 1982
c0a0d0d3
FW
1983#endif /* CONFIG_STACKTRACE */
1984
07d777fe
SR
1985/* created for use with alloc_percpu */
1986struct trace_buffer_struct {
1987 char buffer[TRACE_BUF_SIZE];
1988};
1989
1990static struct trace_buffer_struct *trace_percpu_buffer;
1991static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1992static struct trace_buffer_struct *trace_percpu_irq_buffer;
1993static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1994
1995/*
1996 * The buffer used is dependent on the context. There is a per cpu
1997 * buffer for normal context, softirq contex, hard irq context and
1998 * for NMI context. Thise allows for lockless recording.
1999 *
2000 * Note, if the buffers failed to be allocated, then this returns NULL
2001 */
2002static char *get_trace_buf(void)
2003{
2004 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
2005
2006 /*
2007 * If we have allocated per cpu buffers, then we do not
2008 * need to do any locking.
2009 */
2010 if (in_nmi())
2011 percpu_buffer = trace_percpu_nmi_buffer;
2012 else if (in_irq())
2013 percpu_buffer = trace_percpu_irq_buffer;
2014 else if (in_softirq())
2015 percpu_buffer = trace_percpu_sirq_buffer;
2016 else
2017 percpu_buffer = trace_percpu_buffer;
2018
2019 if (!percpu_buffer)
2020 return NULL;
2021
d8a0349c 2022 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2023}
2024
2025static int alloc_percpu_trace_buffer(void)
2026{
2027 struct trace_buffer_struct *buffers;
2028 struct trace_buffer_struct *sirq_buffers;
2029 struct trace_buffer_struct *irq_buffers;
2030 struct trace_buffer_struct *nmi_buffers;
2031
2032 buffers = alloc_percpu(struct trace_buffer_struct);
2033 if (!buffers)
2034 goto err_warn;
2035
2036 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2037 if (!sirq_buffers)
2038 goto err_sirq;
2039
2040 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2041 if (!irq_buffers)
2042 goto err_irq;
2043
2044 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2045 if (!nmi_buffers)
2046 goto err_nmi;
2047
2048 trace_percpu_buffer = buffers;
2049 trace_percpu_sirq_buffer = sirq_buffers;
2050 trace_percpu_irq_buffer = irq_buffers;
2051 trace_percpu_nmi_buffer = nmi_buffers;
2052
2053 return 0;
2054
2055 err_nmi:
2056 free_percpu(irq_buffers);
2057 err_irq:
2058 free_percpu(sirq_buffers);
2059 err_sirq:
2060 free_percpu(buffers);
2061 err_warn:
2062 WARN(1, "Could not allocate percpu trace_printk buffer");
2063 return -ENOMEM;
2064}
2065
81698831
SR
2066static int buffers_allocated;
2067
07d777fe
SR
2068void trace_printk_init_buffers(void)
2069{
07d777fe
SR
2070 if (buffers_allocated)
2071 return;
2072
2073 if (alloc_percpu_trace_buffer())
2074 return;
2075
2184db46
SR
2076 /* trace_printk() is for debug use only. Don't use it in production. */
2077
69a1c994
BP
2078 pr_warning("\n");
2079 pr_warning("**********************************************************\n");
2184db46
SR
2080 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2081 pr_warning("** **\n");
2082 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2085 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2086 pr_warning("** **\n");
2087 pr_warning("** If you see this message and you are not debugging **\n");
2088 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2089 pr_warning("** **\n");
2090 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2091 pr_warning("**********************************************************\n");
07d777fe 2092
b382ede6
SR
2093 /* Expand the buffers to set size */
2094 tracing_update_buffers();
2095
07d777fe 2096 buffers_allocated = 1;
81698831
SR
2097
2098 /*
2099 * trace_printk_init_buffers() can be called by modules.
2100 * If that happens, then we need to start cmdline recording
2101 * directly here. If the global_trace.buffer is already
2102 * allocated here, then this was called by module code.
2103 */
12883efb 2104 if (global_trace.trace_buffer.buffer)
81698831
SR
2105 tracing_start_cmdline_record();
2106}
2107
2108void trace_printk_start_comm(void)
2109{
2110 /* Start tracing comms if trace printk is set */
2111 if (!buffers_allocated)
2112 return;
2113 tracing_start_cmdline_record();
2114}
2115
2116static void trace_printk_start_stop_comm(int enabled)
2117{
2118 if (!buffers_allocated)
2119 return;
2120
2121 if (enabled)
2122 tracing_start_cmdline_record();
2123 else
2124 tracing_stop_cmdline_record();
07d777fe
SR
2125}
2126
769b0441 2127/**
48ead020 2128 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2129 *
2130 */
40ce74f1 2131int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2132{
e1112b4d 2133 struct ftrace_event_call *call = &event_bprint;
769b0441 2134 struct ring_buffer_event *event;
e77405ad 2135 struct ring_buffer *buffer;
769b0441 2136 struct trace_array *tr = &global_trace;
48ead020 2137 struct bprint_entry *entry;
769b0441 2138 unsigned long flags;
07d777fe
SR
2139 char *tbuffer;
2140 int len = 0, size, pc;
769b0441
FW
2141
2142 if (unlikely(tracing_selftest_running || tracing_disabled))
2143 return 0;
2144
2145 /* Don't pollute graph traces with trace_vprintk internals */
2146 pause_graph_tracing();
2147
2148 pc = preempt_count();
5168ae50 2149 preempt_disable_notrace();
769b0441 2150
07d777fe
SR
2151 tbuffer = get_trace_buf();
2152 if (!tbuffer) {
2153 len = 0;
769b0441 2154 goto out;
07d777fe 2155 }
769b0441 2156
07d777fe 2157 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2158
07d777fe
SR
2159 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2160 goto out;
769b0441 2161
07d777fe 2162 local_save_flags(flags);
769b0441 2163 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2164 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2165 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2166 flags, pc);
769b0441 2167 if (!event)
07d777fe 2168 goto out;
769b0441
FW
2169 entry = ring_buffer_event_data(event);
2170 entry->ip = ip;
769b0441
FW
2171 entry->fmt = fmt;
2172
07d777fe 2173 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2174 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2175 __buffer_unlock_commit(buffer, event);
d931369b
SR
2176 ftrace_trace_stack(buffer, flags, 6, pc);
2177 }
769b0441 2178
769b0441 2179out:
5168ae50 2180 preempt_enable_notrace();
769b0441
FW
2181 unpause_graph_tracing();
2182
2183 return len;
2184}
48ead020
FW
2185EXPORT_SYMBOL_GPL(trace_vbprintk);
2186
12883efb
SRRH
2187static int
2188__trace_array_vprintk(struct ring_buffer *buffer,
2189 unsigned long ip, const char *fmt, va_list args)
48ead020 2190{
e1112b4d 2191 struct ftrace_event_call *call = &event_print;
48ead020 2192 struct ring_buffer_event *event;
07d777fe 2193 int len = 0, size, pc;
48ead020 2194 struct print_entry *entry;
07d777fe
SR
2195 unsigned long flags;
2196 char *tbuffer;
48ead020
FW
2197
2198 if (tracing_disabled || tracing_selftest_running)
2199 return 0;
2200
07d777fe
SR
2201 /* Don't pollute graph traces with trace_vprintk internals */
2202 pause_graph_tracing();
2203
48ead020
FW
2204 pc = preempt_count();
2205 preempt_disable_notrace();
48ead020 2206
07d777fe
SR
2207
2208 tbuffer = get_trace_buf();
2209 if (!tbuffer) {
2210 len = 0;
48ead020 2211 goto out;
07d777fe 2212 }
48ead020 2213
3558a5ac 2214 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2215
07d777fe 2216 local_save_flags(flags);
48ead020 2217 size = sizeof(*entry) + len + 1;
e77405ad 2218 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2219 flags, pc);
48ead020 2220 if (!event)
07d777fe 2221 goto out;
48ead020 2222 entry = ring_buffer_event_data(event);
c13d2f7c 2223 entry->ip = ip;
48ead020 2224
3558a5ac 2225 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2226 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2227 __buffer_unlock_commit(buffer, event);
07d777fe 2228 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 2229 }
48ead020
FW
2230 out:
2231 preempt_enable_notrace();
07d777fe 2232 unpause_graph_tracing();
48ead020
FW
2233
2234 return len;
2235}
659372d3 2236
12883efb
SRRH
2237int trace_array_vprintk(struct trace_array *tr,
2238 unsigned long ip, const char *fmt, va_list args)
2239{
2240 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2241}
2242
2243int trace_array_printk(struct trace_array *tr,
2244 unsigned long ip, const char *fmt, ...)
2245{
2246 int ret;
2247 va_list ap;
2248
2249 if (!(trace_flags & TRACE_ITER_PRINTK))
2250 return 0;
2251
2252 va_start(ap, fmt);
2253 ret = trace_array_vprintk(tr, ip, fmt, ap);
2254 va_end(ap);
2255 return ret;
2256}
2257
2258int trace_array_printk_buf(struct ring_buffer *buffer,
2259 unsigned long ip, const char *fmt, ...)
2260{
2261 int ret;
2262 va_list ap;
2263
2264 if (!(trace_flags & TRACE_ITER_PRINTK))
2265 return 0;
2266
2267 va_start(ap, fmt);
2268 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2269 va_end(ap);
2270 return ret;
2271}
2272
659372d3
SR
2273int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2274{
a813a159 2275 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2276}
769b0441
FW
2277EXPORT_SYMBOL_GPL(trace_vprintk);
2278
e2ac8ef5 2279static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2280{
6d158a81
SR
2281 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2282
5a90f577 2283 iter->idx++;
6d158a81
SR
2284 if (buf_iter)
2285 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2286}
2287
e309b41d 2288static struct trace_entry *
bc21b478
SR
2289peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2290 unsigned long *lost_events)
dd0e545f 2291{
3928a8a2 2292 struct ring_buffer_event *event;
6d158a81 2293 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2294
d769041f
SR
2295 if (buf_iter)
2296 event = ring_buffer_iter_peek(buf_iter, ts);
2297 else
12883efb 2298 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2299 lost_events);
d769041f 2300
4a9bd3f1
SR
2301 if (event) {
2302 iter->ent_size = ring_buffer_event_length(event);
2303 return ring_buffer_event_data(event);
2304 }
2305 iter->ent_size = 0;
2306 return NULL;
dd0e545f 2307}
d769041f 2308
dd0e545f 2309static struct trace_entry *
bc21b478
SR
2310__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2311 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2312{
12883efb 2313 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2314 struct trace_entry *ent, *next = NULL;
aa27497c 2315 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2316 int cpu_file = iter->cpu_file;
3928a8a2 2317 u64 next_ts = 0, ts;
bc0c38d1 2318 int next_cpu = -1;
12b5da34 2319 int next_size = 0;
bc0c38d1
SR
2320 int cpu;
2321
b04cc6b1
FW
2322 /*
2323 * If we are in a per_cpu trace file, don't bother by iterating over
2324 * all cpu and peek directly.
2325 */
ae3b5093 2326 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2327 if (ring_buffer_empty_cpu(buffer, cpu_file))
2328 return NULL;
bc21b478 2329 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2330 if (ent_cpu)
2331 *ent_cpu = cpu_file;
2332
2333 return ent;
2334 }
2335
ab46428c 2336 for_each_tracing_cpu(cpu) {
dd0e545f 2337
3928a8a2
SR
2338 if (ring_buffer_empty_cpu(buffer, cpu))
2339 continue;
dd0e545f 2340
bc21b478 2341 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2342
cdd31cd2
IM
2343 /*
2344 * Pick the entry with the smallest timestamp:
2345 */
3928a8a2 2346 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2347 next = ent;
2348 next_cpu = cpu;
3928a8a2 2349 next_ts = ts;
bc21b478 2350 next_lost = lost_events;
12b5da34 2351 next_size = iter->ent_size;
bc0c38d1
SR
2352 }
2353 }
2354
12b5da34
SR
2355 iter->ent_size = next_size;
2356
bc0c38d1
SR
2357 if (ent_cpu)
2358 *ent_cpu = next_cpu;
2359
3928a8a2
SR
2360 if (ent_ts)
2361 *ent_ts = next_ts;
2362
bc21b478
SR
2363 if (missing_events)
2364 *missing_events = next_lost;
2365
bc0c38d1
SR
2366 return next;
2367}
2368
dd0e545f 2369/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2370struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2371 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2372{
bc21b478 2373 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2374}
2375
2376/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2377void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2378{
bc21b478
SR
2379 iter->ent = __find_next_entry(iter, &iter->cpu,
2380 &iter->lost_events, &iter->ts);
dd0e545f 2381
3928a8a2 2382 if (iter->ent)
e2ac8ef5 2383 trace_iterator_increment(iter);
dd0e545f 2384
3928a8a2 2385 return iter->ent ? iter : NULL;
b3806b43 2386}
bc0c38d1 2387
e309b41d 2388static void trace_consume(struct trace_iterator *iter)
b3806b43 2389{
12883efb 2390 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2391 &iter->lost_events);
bc0c38d1
SR
2392}
2393
e309b41d 2394static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2395{
2396 struct trace_iterator *iter = m->private;
bc0c38d1 2397 int i = (int)*pos;
4e3c3333 2398 void *ent;
bc0c38d1 2399
a63ce5b3
SR
2400 WARN_ON_ONCE(iter->leftover);
2401
bc0c38d1
SR
2402 (*pos)++;
2403
2404 /* can't go backwards */
2405 if (iter->idx > i)
2406 return NULL;
2407
2408 if (iter->idx < 0)
955b61e5 2409 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2410 else
2411 ent = iter;
2412
2413 while (ent && iter->idx < i)
955b61e5 2414 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2415
2416 iter->pos = *pos;
2417
bc0c38d1
SR
2418 return ent;
2419}
2420
955b61e5 2421void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2422{
2f26ebd5
SR
2423 struct ring_buffer_event *event;
2424 struct ring_buffer_iter *buf_iter;
2425 unsigned long entries = 0;
2426 u64 ts;
2427
12883efb 2428 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2429
6d158a81
SR
2430 buf_iter = trace_buffer_iter(iter, cpu);
2431 if (!buf_iter)
2f26ebd5
SR
2432 return;
2433
2f26ebd5
SR
2434 ring_buffer_iter_reset(buf_iter);
2435
2436 /*
2437 * We could have the case with the max latency tracers
2438 * that a reset never took place on a cpu. This is evident
2439 * by the timestamp being before the start of the buffer.
2440 */
2441 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2442 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2443 break;
2444 entries++;
2445 ring_buffer_read(buf_iter, NULL);
2446 }
2447
12883efb 2448 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2449}
2450
d7350c3f 2451/*
d7350c3f
FW
2452 * The current tracer is copied to avoid a global locking
2453 * all around.
2454 */
bc0c38d1
SR
2455static void *s_start(struct seq_file *m, loff_t *pos)
2456{
2457 struct trace_iterator *iter = m->private;
2b6080f2 2458 struct trace_array *tr = iter->tr;
b04cc6b1 2459 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2460 void *p = NULL;
2461 loff_t l = 0;
3928a8a2 2462 int cpu;
bc0c38d1 2463
2fd196ec
HT
2464 /*
2465 * copy the tracer to avoid using a global lock all around.
2466 * iter->trace is a copy of current_trace, the pointer to the
2467 * name may be used instead of a strcmp(), as iter->trace->name
2468 * will point to the same string as current_trace->name.
2469 */
bc0c38d1 2470 mutex_lock(&trace_types_lock);
2b6080f2
SR
2471 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2472 *iter->trace = *tr->current_trace;
d7350c3f 2473 mutex_unlock(&trace_types_lock);
bc0c38d1 2474
12883efb 2475#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2476 if (iter->snapshot && iter->trace->use_max_tr)
2477 return ERR_PTR(-EBUSY);
12883efb 2478#endif
debdd57f
HT
2479
2480 if (!iter->snapshot)
2481 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2482
bc0c38d1
SR
2483 if (*pos != iter->pos) {
2484 iter->ent = NULL;
2485 iter->cpu = 0;
2486 iter->idx = -1;
2487
ae3b5093 2488 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2489 for_each_tracing_cpu(cpu)
2f26ebd5 2490 tracing_iter_reset(iter, cpu);
b04cc6b1 2491 } else
2f26ebd5 2492 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2493
ac91d854 2494 iter->leftover = 0;
bc0c38d1
SR
2495 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2496 ;
2497
2498 } else {
a63ce5b3
SR
2499 /*
2500 * If we overflowed the seq_file before, then we want
2501 * to just reuse the trace_seq buffer again.
2502 */
2503 if (iter->leftover)
2504 p = iter;
2505 else {
2506 l = *pos - 1;
2507 p = s_next(m, p, &l);
2508 }
bc0c38d1
SR
2509 }
2510
4f535968 2511 trace_event_read_lock();
7e53bd42 2512 trace_access_lock(cpu_file);
bc0c38d1
SR
2513 return p;
2514}
2515
2516static void s_stop(struct seq_file *m, void *p)
2517{
7e53bd42
LJ
2518 struct trace_iterator *iter = m->private;
2519
12883efb 2520#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2521 if (iter->snapshot && iter->trace->use_max_tr)
2522 return;
12883efb 2523#endif
debdd57f
HT
2524
2525 if (!iter->snapshot)
2526 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2527
7e53bd42 2528 trace_access_unlock(iter->cpu_file);
4f535968 2529 trace_event_read_unlock();
bc0c38d1
SR
2530}
2531
39eaf7ef 2532static void
12883efb
SRRH
2533get_total_entries(struct trace_buffer *buf,
2534 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2535{
2536 unsigned long count;
2537 int cpu;
2538
2539 *total = 0;
2540 *entries = 0;
2541
2542 for_each_tracing_cpu(cpu) {
12883efb 2543 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2544 /*
2545 * If this buffer has skipped entries, then we hold all
2546 * entries for the trace and we need to ignore the
2547 * ones before the time stamp.
2548 */
12883efb
SRRH
2549 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2550 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2551 /* total is the same as the entries */
2552 *total += count;
2553 } else
2554 *total += count +
12883efb 2555 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2556 *entries += count;
2557 }
2558}
2559
e309b41d 2560static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2561{
d79ac28f
RV
2562 seq_puts(m, "# _------=> CPU# \n"
2563 "# / _-----=> irqs-off \n"
2564 "# | / _----=> need-resched \n"
2565 "# || / _---=> hardirq/softirq \n"
2566 "# ||| / _--=> preempt-depth \n"
2567 "# |||| / delay \n"
2568 "# cmd pid ||||| time | caller \n"
2569 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2570}
2571
12883efb 2572static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2573{
39eaf7ef
SR
2574 unsigned long total;
2575 unsigned long entries;
2576
12883efb 2577 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2578 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2579 entries, total, num_online_cpus());
2580 seq_puts(m, "#\n");
2581}
2582
12883efb 2583static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2584{
12883efb 2585 print_event_info(buf, m);
d79ac28f
RV
2586 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2587 "# | | | | |\n");
bc0c38d1
SR
2588}
2589
12883efb 2590static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2591{
12883efb 2592 print_event_info(buf, m);
d79ac28f
RV
2593 seq_puts(m, "# _-----=> irqs-off\n"
2594 "# / _----=> need-resched\n"
2595 "# | / _---=> hardirq/softirq\n"
2596 "# || / _--=> preempt-depth\n"
2597 "# ||| / delay\n"
2598 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2599 "# | | | |||| | |\n");
77271ce4 2600}
bc0c38d1 2601
62b915f1 2602void
bc0c38d1
SR
2603print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2604{
2605 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2606 struct trace_buffer *buf = iter->trace_buffer;
2607 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2608 struct tracer *type = iter->trace;
39eaf7ef
SR
2609 unsigned long entries;
2610 unsigned long total;
bc0c38d1
SR
2611 const char *name = "preemption";
2612
d840f718 2613 name = type->name;
bc0c38d1 2614
12883efb 2615 get_total_entries(buf, &total, &entries);
bc0c38d1 2616
888b55dc 2617 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2618 name, UTS_RELEASE);
888b55dc 2619 seq_puts(m, "# -----------------------------------"
bc0c38d1 2620 "---------------------------------\n");
888b55dc 2621 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2622 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2623 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2624 entries,
4c11d7ae 2625 total,
12883efb 2626 buf->cpu,
bc0c38d1
SR
2627#if defined(CONFIG_PREEMPT_NONE)
2628 "server",
2629#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2630 "desktop",
b5c21b45 2631#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2632 "preempt",
2633#else
2634 "unknown",
2635#endif
2636 /* These are reserved for later use */
2637 0, 0, 0, 0);
2638#ifdef CONFIG_SMP
2639 seq_printf(m, " #P:%d)\n", num_online_cpus());
2640#else
2641 seq_puts(m, ")\n");
2642#endif
888b55dc
KM
2643 seq_puts(m, "# -----------------\n");
2644 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2645 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2646 data->comm, data->pid,
2647 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2648 data->policy, data->rt_priority);
888b55dc 2649 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2650
2651 if (data->critical_start) {
888b55dc 2652 seq_puts(m, "# => started at: ");
214023c3
SR
2653 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2654 trace_print_seq(m, &iter->seq);
888b55dc 2655 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2656 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2657 trace_print_seq(m, &iter->seq);
8248ac05 2658 seq_puts(m, "\n#\n");
bc0c38d1
SR
2659 }
2660
888b55dc 2661 seq_puts(m, "#\n");
bc0c38d1
SR
2662}
2663
a309720c
SR
2664static void test_cpu_buff_start(struct trace_iterator *iter)
2665{
2666 struct trace_seq *s = &iter->seq;
2667
12ef7d44
SR
2668 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2669 return;
2670
2671 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2672 return;
2673
4462344e 2674 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2675 return;
2676
12883efb 2677 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2678 return;
2679
4462344e 2680 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2681
2682 /* Don't print started cpu buffer for the first entry of the trace */
2683 if (iter->idx > 1)
2684 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2685 iter->cpu);
a309720c
SR
2686}
2687
2c4f035f 2688static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2689{
214023c3 2690 struct trace_seq *s = &iter->seq;
bc0c38d1 2691 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2692 struct trace_entry *entry;
f633cef0 2693 struct trace_event *event;
bc0c38d1 2694
4e3c3333 2695 entry = iter->ent;
dd0e545f 2696
a309720c
SR
2697 test_cpu_buff_start(iter);
2698
c4a8e8be 2699 event = ftrace_find_event(entry->type);
bc0c38d1 2700
c4a8e8be 2701 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2702 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2703 trace_print_lat_context(iter);
2704 else
2705 trace_print_context(iter);
c4a8e8be 2706 }
bc0c38d1 2707
19a7fe20
SRRH
2708 if (trace_seq_has_overflowed(s))
2709 return TRACE_TYPE_PARTIAL_LINE;
2710
268ccda0 2711 if (event)
a9a57763 2712 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2713
19a7fe20 2714 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2715
19a7fe20 2716 return trace_handle_return(s);
bc0c38d1
SR
2717}
2718
2c4f035f 2719static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2720{
2721 struct trace_seq *s = &iter->seq;
2722 struct trace_entry *entry;
f633cef0 2723 struct trace_event *event;
f9896bf3
IM
2724
2725 entry = iter->ent;
dd0e545f 2726
19a7fe20
SRRH
2727 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2728 trace_seq_printf(s, "%d %d %llu ",
2729 entry->pid, iter->cpu, iter->ts);
2730
2731 if (trace_seq_has_overflowed(s))
2732 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2733
f633cef0 2734 event = ftrace_find_event(entry->type);
268ccda0 2735 if (event)
a9a57763 2736 return event->funcs->raw(iter, 0, event);
d9793bd8 2737
19a7fe20 2738 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2739
19a7fe20 2740 return trace_handle_return(s);
f9896bf3
IM
2741}
2742
2c4f035f 2743static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2744{
2745 struct trace_seq *s = &iter->seq;
2746 unsigned char newline = '\n';
2747 struct trace_entry *entry;
f633cef0 2748 struct trace_event *event;
5e3ca0ec
IM
2749
2750 entry = iter->ent;
dd0e545f 2751
c4a8e8be 2752 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2753 SEQ_PUT_HEX_FIELD(s, entry->pid);
2754 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2755 SEQ_PUT_HEX_FIELD(s, iter->ts);
2756 if (trace_seq_has_overflowed(s))
2757 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2758 }
5e3ca0ec 2759
f633cef0 2760 event = ftrace_find_event(entry->type);
268ccda0 2761 if (event) {
a9a57763 2762 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2763 if (ret != TRACE_TYPE_HANDLED)
2764 return ret;
2765 }
7104f300 2766
19a7fe20 2767 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2768
19a7fe20 2769 return trace_handle_return(s);
5e3ca0ec
IM
2770}
2771
2c4f035f 2772static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2773{
2774 struct trace_seq *s = &iter->seq;
2775 struct trace_entry *entry;
f633cef0 2776 struct trace_event *event;
cb0f12aa
IM
2777
2778 entry = iter->ent;
dd0e545f 2779
c4a8e8be 2780 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2781 SEQ_PUT_FIELD(s, entry->pid);
2782 SEQ_PUT_FIELD(s, iter->cpu);
2783 SEQ_PUT_FIELD(s, iter->ts);
2784 if (trace_seq_has_overflowed(s))
2785 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2786 }
cb0f12aa 2787
f633cef0 2788 event = ftrace_find_event(entry->type);
a9a57763
SR
2789 return event ? event->funcs->binary(iter, 0, event) :
2790 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2791}
2792
62b915f1 2793int trace_empty(struct trace_iterator *iter)
bc0c38d1 2794{
6d158a81 2795 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2796 int cpu;
2797
9aba60fe 2798 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2799 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2800 cpu = iter->cpu_file;
6d158a81
SR
2801 buf_iter = trace_buffer_iter(iter, cpu);
2802 if (buf_iter) {
2803 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2804 return 0;
2805 } else {
12883efb 2806 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2807 return 0;
2808 }
2809 return 1;
2810 }
2811
ab46428c 2812 for_each_tracing_cpu(cpu) {
6d158a81
SR
2813 buf_iter = trace_buffer_iter(iter, cpu);
2814 if (buf_iter) {
2815 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2816 return 0;
2817 } else {
12883efb 2818 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2819 return 0;
2820 }
bc0c38d1 2821 }
d769041f 2822
797d3712 2823 return 1;
bc0c38d1
SR
2824}
2825
4f535968 2826/* Called with trace_event_read_lock() held. */
955b61e5 2827enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2828{
2c4f035f
FW
2829 enum print_line_t ret;
2830
19a7fe20
SRRH
2831 if (iter->lost_events) {
2832 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2833 iter->cpu, iter->lost_events);
2834 if (trace_seq_has_overflowed(&iter->seq))
2835 return TRACE_TYPE_PARTIAL_LINE;
2836 }
bc21b478 2837
2c4f035f
FW
2838 if (iter->trace && iter->trace->print_line) {
2839 ret = iter->trace->print_line(iter);
2840 if (ret != TRACE_TYPE_UNHANDLED)
2841 return ret;
2842 }
72829bc3 2843
09ae7234
SRRH
2844 if (iter->ent->type == TRACE_BPUTS &&
2845 trace_flags & TRACE_ITER_PRINTK &&
2846 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2847 return trace_print_bputs_msg_only(iter);
2848
48ead020
FW
2849 if (iter->ent->type == TRACE_BPRINT &&
2850 trace_flags & TRACE_ITER_PRINTK &&
2851 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2852 return trace_print_bprintk_msg_only(iter);
48ead020 2853
66896a85
FW
2854 if (iter->ent->type == TRACE_PRINT &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2857 return trace_print_printk_msg_only(iter);
66896a85 2858
cb0f12aa
IM
2859 if (trace_flags & TRACE_ITER_BIN)
2860 return print_bin_fmt(iter);
2861
5e3ca0ec
IM
2862 if (trace_flags & TRACE_ITER_HEX)
2863 return print_hex_fmt(iter);
2864
f9896bf3
IM
2865 if (trace_flags & TRACE_ITER_RAW)
2866 return print_raw_fmt(iter);
2867
f9896bf3
IM
2868 return print_trace_fmt(iter);
2869}
2870
7e9a49ef
JO
2871void trace_latency_header(struct seq_file *m)
2872{
2873 struct trace_iterator *iter = m->private;
2874
2875 /* print nothing if the buffers are empty */
2876 if (trace_empty(iter))
2877 return;
2878
2879 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2880 print_trace_header(m, iter);
2881
2882 if (!(trace_flags & TRACE_ITER_VERBOSE))
2883 print_lat_help_header(m);
2884}
2885
62b915f1
JO
2886void trace_default_header(struct seq_file *m)
2887{
2888 struct trace_iterator *iter = m->private;
2889
f56e7f8e
JO
2890 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2891 return;
2892
62b915f1
JO
2893 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2894 /* print nothing if the buffers are empty */
2895 if (trace_empty(iter))
2896 return;
2897 print_trace_header(m, iter);
2898 if (!(trace_flags & TRACE_ITER_VERBOSE))
2899 print_lat_help_header(m);
2900 } else {
77271ce4
SR
2901 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2902 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2903 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2904 else
12883efb 2905 print_func_help_header(iter->trace_buffer, m);
77271ce4 2906 }
62b915f1
JO
2907 }
2908}
2909
e0a413f6
SR
2910static void test_ftrace_alive(struct seq_file *m)
2911{
2912 if (!ftrace_is_dead())
2913 return;
d79ac28f
RV
2914 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2915 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2916}
2917
d8741e2e 2918#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2919static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2920{
d79ac28f
RV
2921 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2922 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2923 "# Takes a snapshot of the main buffer.\n"
2924 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2925 "# (Doesn't have to be '2' works with any number that\n"
2926 "# is not a '0' or '1')\n");
d8741e2e 2927}
f1affcaa
SRRH
2928
2929static void show_snapshot_percpu_help(struct seq_file *m)
2930{
fa6f0cc7 2931 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2932#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2933 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2934 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2935#else
d79ac28f
RV
2936 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2937 "# Must use main snapshot file to allocate.\n");
f1affcaa 2938#endif
d79ac28f
RV
2939 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2940 "# (Doesn't have to be '2' works with any number that\n"
2941 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2942}
2943
d8741e2e
SRRH
2944static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2945{
45ad21ca 2946 if (iter->tr->allocated_snapshot)
fa6f0cc7 2947 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2948 else
fa6f0cc7 2949 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2950
fa6f0cc7 2951 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2952 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2953 show_snapshot_main_help(m);
2954 else
2955 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2956}
2957#else
2958/* Should never be called */
2959static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2960#endif
2961
bc0c38d1
SR
2962static int s_show(struct seq_file *m, void *v)
2963{
2964 struct trace_iterator *iter = v;
a63ce5b3 2965 int ret;
bc0c38d1
SR
2966
2967 if (iter->ent == NULL) {
2968 if (iter->tr) {
2969 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2970 seq_puts(m, "#\n");
e0a413f6 2971 test_ftrace_alive(m);
bc0c38d1 2972 }
d8741e2e
SRRH
2973 if (iter->snapshot && trace_empty(iter))
2974 print_snapshot_help(m, iter);
2975 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2976 iter->trace->print_header(m);
62b915f1
JO
2977 else
2978 trace_default_header(m);
2979
a63ce5b3
SR
2980 } else if (iter->leftover) {
2981 /*
2982 * If we filled the seq_file buffer earlier, we
2983 * want to just show it now.
2984 */
2985 ret = trace_print_seq(m, &iter->seq);
2986
2987 /* ret should this time be zero, but you never know */
2988 iter->leftover = ret;
2989
bc0c38d1 2990 } else {
f9896bf3 2991 print_trace_line(iter);
a63ce5b3
SR
2992 ret = trace_print_seq(m, &iter->seq);
2993 /*
2994 * If we overflow the seq_file buffer, then it will
2995 * ask us for this data again at start up.
2996 * Use that instead.
2997 * ret is 0 if seq_file write succeeded.
2998 * -1 otherwise.
2999 */
3000 iter->leftover = ret;
bc0c38d1
SR
3001 }
3002
3003 return 0;
3004}
3005
649e9c70
ON
3006/*
3007 * Should be used after trace_array_get(), trace_types_lock
3008 * ensures that i_cdev was already initialized.
3009 */
3010static inline int tracing_get_cpu(struct inode *inode)
3011{
3012 if (inode->i_cdev) /* See trace_create_cpu_file() */
3013 return (long)inode->i_cdev - 1;
3014 return RING_BUFFER_ALL_CPUS;
3015}
3016
88e9d34c 3017static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3018 .start = s_start,
3019 .next = s_next,
3020 .stop = s_stop,
3021 .show = s_show,
bc0c38d1
SR
3022};
3023
e309b41d 3024static struct trace_iterator *
6484c71c 3025__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3026{
6484c71c 3027 struct trace_array *tr = inode->i_private;
bc0c38d1 3028 struct trace_iterator *iter;
50e18b94 3029 int cpu;
bc0c38d1 3030
85a2f9b4
SR
3031 if (tracing_disabled)
3032 return ERR_PTR(-ENODEV);
60a11774 3033
50e18b94 3034 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3035 if (!iter)
3036 return ERR_PTR(-ENOMEM);
bc0c38d1 3037
6d158a81
SR
3038 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3039 GFP_KERNEL);
93574fcc
DC
3040 if (!iter->buffer_iter)
3041 goto release;
3042
d7350c3f
FW
3043 /*
3044 * We make a copy of the current tracer to avoid concurrent
3045 * changes on it while we are reading.
3046 */
bc0c38d1 3047 mutex_lock(&trace_types_lock);
d7350c3f 3048 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3049 if (!iter->trace)
d7350c3f 3050 goto fail;
85a2f9b4 3051
2b6080f2 3052 *iter->trace = *tr->current_trace;
d7350c3f 3053
79f55997 3054 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3055 goto fail;
3056
12883efb
SRRH
3057 iter->tr = tr;
3058
3059#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3060 /* Currently only the top directory has a snapshot */
3061 if (tr->current_trace->print_max || snapshot)
12883efb 3062 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3063 else
12883efb
SRRH
3064#endif
3065 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3066 iter->snapshot = snapshot;
bc0c38d1 3067 iter->pos = -1;
6484c71c 3068 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3069 mutex_init(&iter->mutex);
bc0c38d1 3070
8bba1bf5
MM
3071 /* Notify the tracer early; before we stop tracing. */
3072 if (iter->trace && iter->trace->open)
a93751ca 3073 iter->trace->open(iter);
8bba1bf5 3074
12ef7d44 3075 /* Annotate start of buffers if we had overruns */
12883efb 3076 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3077 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3078
8be0709f 3079 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3080 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3081 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3082
debdd57f
HT
3083 /* stop the trace while dumping if we are not opening "snapshot" */
3084 if (!iter->snapshot)
2b6080f2 3085 tracing_stop_tr(tr);
2f26ebd5 3086
ae3b5093 3087 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3088 for_each_tracing_cpu(cpu) {
b04cc6b1 3089 iter->buffer_iter[cpu] =
12883efb 3090 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3091 }
3092 ring_buffer_read_prepare_sync();
3093 for_each_tracing_cpu(cpu) {
3094 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3095 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3096 }
3097 } else {
3098 cpu = iter->cpu_file;
3928a8a2 3099 iter->buffer_iter[cpu] =
12883efb 3100 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3101 ring_buffer_read_prepare_sync();
3102 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3103 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3104 }
3105
bc0c38d1
SR
3106 mutex_unlock(&trace_types_lock);
3107
bc0c38d1 3108 return iter;
3928a8a2 3109
d7350c3f 3110 fail:
3928a8a2 3111 mutex_unlock(&trace_types_lock);
d7350c3f 3112 kfree(iter->trace);
6d158a81 3113 kfree(iter->buffer_iter);
93574fcc 3114release:
50e18b94
JO
3115 seq_release_private(inode, file);
3116 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3117}
3118
3119int tracing_open_generic(struct inode *inode, struct file *filp)
3120{
60a11774
SR
3121 if (tracing_disabled)
3122 return -ENODEV;
3123
bc0c38d1
SR
3124 filp->private_data = inode->i_private;
3125 return 0;
3126}
3127
2e86421d
GB
3128bool tracing_is_disabled(void)
3129{
3130 return (tracing_disabled) ? true: false;
3131}
3132
7b85af63
SRRH
3133/*
3134 * Open and update trace_array ref count.
3135 * Must have the current trace_array passed to it.
3136 */
dcc30223 3137static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3138{
3139 struct trace_array *tr = inode->i_private;
3140
3141 if (tracing_disabled)
3142 return -ENODEV;
3143
3144 if (trace_array_get(tr) < 0)
3145 return -ENODEV;
3146
3147 filp->private_data = inode->i_private;
3148
3149 return 0;
7b85af63
SRRH
3150}
3151
4fd27358 3152static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3153{
6484c71c 3154 struct trace_array *tr = inode->i_private;
907f2784 3155 struct seq_file *m = file->private_data;
4acd4d00 3156 struct trace_iterator *iter;
3928a8a2 3157 int cpu;
bc0c38d1 3158
ff451961 3159 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3160 trace_array_put(tr);
4acd4d00 3161 return 0;
ff451961 3162 }
4acd4d00 3163
6484c71c 3164 /* Writes do not use seq_file */
4acd4d00 3165 iter = m->private;
bc0c38d1 3166 mutex_lock(&trace_types_lock);
a695cb58 3167
3928a8a2
SR
3168 for_each_tracing_cpu(cpu) {
3169 if (iter->buffer_iter[cpu])
3170 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3171 }
3172
bc0c38d1
SR
3173 if (iter->trace && iter->trace->close)
3174 iter->trace->close(iter);
3175
debdd57f
HT
3176 if (!iter->snapshot)
3177 /* reenable tracing if it was previously enabled */
2b6080f2 3178 tracing_start_tr(tr);
f77d09a3
AL
3179
3180 __trace_array_put(tr);
3181
bc0c38d1
SR
3182 mutex_unlock(&trace_types_lock);
3183
d7350c3f 3184 mutex_destroy(&iter->mutex);
b0dfa978 3185 free_cpumask_var(iter->started);
d7350c3f 3186 kfree(iter->trace);
6d158a81 3187 kfree(iter->buffer_iter);
50e18b94 3188 seq_release_private(inode, file);
ff451961 3189
bc0c38d1
SR
3190 return 0;
3191}
3192
7b85af63
SRRH
3193static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3194{
3195 struct trace_array *tr = inode->i_private;
3196
3197 trace_array_put(tr);
bc0c38d1
SR
3198 return 0;
3199}
3200
7b85af63
SRRH
3201static int tracing_single_release_tr(struct inode *inode, struct file *file)
3202{
3203 struct trace_array *tr = inode->i_private;
3204
3205 trace_array_put(tr);
3206
3207 return single_release(inode, file);
3208}
3209
bc0c38d1
SR
3210static int tracing_open(struct inode *inode, struct file *file)
3211{
6484c71c 3212 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3213 struct trace_iterator *iter;
3214 int ret = 0;
bc0c38d1 3215
ff451961
SRRH
3216 if (trace_array_get(tr) < 0)
3217 return -ENODEV;
3218
4acd4d00 3219 /* If this file was open for write, then erase contents */
6484c71c
ON
3220 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3221 int cpu = tracing_get_cpu(inode);
3222
3223 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3224 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3225 else
6484c71c 3226 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3227 }
bc0c38d1 3228
4acd4d00 3229 if (file->f_mode & FMODE_READ) {
6484c71c 3230 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3231 if (IS_ERR(iter))
3232 ret = PTR_ERR(iter);
3233 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3234 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3235 }
ff451961
SRRH
3236
3237 if (ret < 0)
3238 trace_array_put(tr);
3239
bc0c38d1
SR
3240 return ret;
3241}
3242
607e2ea1
SRRH
3243/*
3244 * Some tracers are not suitable for instance buffers.
3245 * A tracer is always available for the global array (toplevel)
3246 * or if it explicitly states that it is.
3247 */
3248static bool
3249trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3250{
3251 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3252}
3253
3254/* Find the next tracer that this trace array may use */
3255static struct tracer *
3256get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3257{
3258 while (t && !trace_ok_for_array(t, tr))
3259 t = t->next;
3260
3261 return t;
3262}
3263
e309b41d 3264static void *
bc0c38d1
SR
3265t_next(struct seq_file *m, void *v, loff_t *pos)
3266{
607e2ea1 3267 struct trace_array *tr = m->private;
f129e965 3268 struct tracer *t = v;
bc0c38d1
SR
3269
3270 (*pos)++;
3271
3272 if (t)
607e2ea1 3273 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3274
bc0c38d1
SR
3275 return t;
3276}
3277
3278static void *t_start(struct seq_file *m, loff_t *pos)
3279{
607e2ea1 3280 struct trace_array *tr = m->private;
f129e965 3281 struct tracer *t;
bc0c38d1
SR
3282 loff_t l = 0;
3283
3284 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3285
3286 t = get_tracer_for_array(tr, trace_types);
3287 for (; t && l < *pos; t = t_next(m, t, &l))
3288 ;
bc0c38d1
SR
3289
3290 return t;
3291}
3292
3293static void t_stop(struct seq_file *m, void *p)
3294{
3295 mutex_unlock(&trace_types_lock);
3296}
3297
3298static int t_show(struct seq_file *m, void *v)
3299{
3300 struct tracer *t = v;
3301
3302 if (!t)
3303 return 0;
3304
fa6f0cc7 3305 seq_puts(m, t->name);
bc0c38d1
SR
3306 if (t->next)
3307 seq_putc(m, ' ');
3308 else
3309 seq_putc(m, '\n');
3310
3311 return 0;
3312}
3313
88e9d34c 3314static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3315 .start = t_start,
3316 .next = t_next,
3317 .stop = t_stop,
3318 .show = t_show,
bc0c38d1
SR
3319};
3320
3321static int show_traces_open(struct inode *inode, struct file *file)
3322{
607e2ea1
SRRH
3323 struct trace_array *tr = inode->i_private;
3324 struct seq_file *m;
3325 int ret;
3326
60a11774
SR
3327 if (tracing_disabled)
3328 return -ENODEV;
3329
607e2ea1
SRRH
3330 ret = seq_open(file, &show_traces_seq_ops);
3331 if (ret)
3332 return ret;
3333
3334 m = file->private_data;
3335 m->private = tr;
3336
3337 return 0;
bc0c38d1
SR
3338}
3339
4acd4d00
SR
3340static ssize_t
3341tracing_write_stub(struct file *filp, const char __user *ubuf,
3342 size_t count, loff_t *ppos)
3343{
3344 return count;
3345}
3346
098c879e 3347loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3348{
098c879e
SRRH
3349 int ret;
3350
364829b1 3351 if (file->f_mode & FMODE_READ)
098c879e 3352 ret = seq_lseek(file, offset, whence);
364829b1 3353 else
098c879e
SRRH
3354 file->f_pos = ret = 0;
3355
3356 return ret;
364829b1
SP
3357}
3358
5e2336a0 3359static const struct file_operations tracing_fops = {
4bf39a94
IM
3360 .open = tracing_open,
3361 .read = seq_read,
4acd4d00 3362 .write = tracing_write_stub,
098c879e 3363 .llseek = tracing_lseek,
4bf39a94 3364 .release = tracing_release,
bc0c38d1
SR
3365};
3366
5e2336a0 3367static const struct file_operations show_traces_fops = {
c7078de1
IM
3368 .open = show_traces_open,
3369 .read = seq_read,
3370 .release = seq_release,
b444786f 3371 .llseek = seq_lseek,
c7078de1
IM
3372};
3373
36dfe925
IM
3374/*
3375 * The tracer itself will not take this lock, but still we want
3376 * to provide a consistent cpumask to user-space:
3377 */
3378static DEFINE_MUTEX(tracing_cpumask_update_lock);
3379
3380/*
3381 * Temporary storage for the character representation of the
3382 * CPU bitmask (and one more byte for the newline):
3383 */
3384static char mask_str[NR_CPUS + 1];
3385
c7078de1
IM
3386static ssize_t
3387tracing_cpumask_read(struct file *filp, char __user *ubuf,
3388 size_t count, loff_t *ppos)
3389{
ccfe9e42 3390 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3391 int len;
c7078de1
IM
3392
3393 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3394
1a40243b
TH
3395 len = snprintf(mask_str, count, "%*pb\n",
3396 cpumask_pr_args(tr->tracing_cpumask));
3397 if (len >= count) {
36dfe925
IM
3398 count = -EINVAL;
3399 goto out_err;
3400 }
36dfe925
IM
3401 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3402
3403out_err:
c7078de1
IM
3404 mutex_unlock(&tracing_cpumask_update_lock);
3405
3406 return count;
3407}
3408
3409static ssize_t
3410tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3411 size_t count, loff_t *ppos)
3412{
ccfe9e42 3413 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3414 cpumask_var_t tracing_cpumask_new;
2b6080f2 3415 int err, cpu;
9e01c1b7
RR
3416
3417 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3418 return -ENOMEM;
c7078de1 3419
9e01c1b7 3420 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3421 if (err)
36dfe925
IM
3422 goto err_unlock;
3423
215368e8
LZ
3424 mutex_lock(&tracing_cpumask_update_lock);
3425
a5e25883 3426 local_irq_disable();
0b9b12c1 3427 arch_spin_lock(&tr->max_lock);
ab46428c 3428 for_each_tracing_cpu(cpu) {
36dfe925
IM
3429 /*
3430 * Increase/decrease the disabled counter if we are
3431 * about to flip a bit in the cpumask:
3432 */
ccfe9e42 3433 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3434 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3435 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3436 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3437 }
ccfe9e42 3438 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3439 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3440 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3441 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3442 }
3443 }
0b9b12c1 3444 arch_spin_unlock(&tr->max_lock);
a5e25883 3445 local_irq_enable();
36dfe925 3446
ccfe9e42 3447 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3448
3449 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3450 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3451
3452 return count;
36dfe925
IM
3453
3454err_unlock:
215368e8 3455 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3456
3457 return err;
c7078de1
IM
3458}
3459
5e2336a0 3460static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3461 .open = tracing_open_generic_tr,
c7078de1
IM
3462 .read = tracing_cpumask_read,
3463 .write = tracing_cpumask_write,
ccfe9e42 3464 .release = tracing_release_generic_tr,
b444786f 3465 .llseek = generic_file_llseek,
bc0c38d1
SR
3466};
3467
fdb372ed 3468static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3469{
d8e83d26 3470 struct tracer_opt *trace_opts;
2b6080f2 3471 struct trace_array *tr = m->private;
d8e83d26 3472 u32 tracer_flags;
d8e83d26 3473 int i;
adf9f195 3474
d8e83d26 3475 mutex_lock(&trace_types_lock);
2b6080f2
SR
3476 tracer_flags = tr->current_trace->flags->val;
3477 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3478
bc0c38d1
SR
3479 for (i = 0; trace_options[i]; i++) {
3480 if (trace_flags & (1 << i))
fdb372ed 3481 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3482 else
fdb372ed 3483 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3484 }
3485
adf9f195
FW
3486 for (i = 0; trace_opts[i].name; i++) {
3487 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3488 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3489 else
fdb372ed 3490 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3491 }
d8e83d26 3492 mutex_unlock(&trace_types_lock);
adf9f195 3493
fdb372ed 3494 return 0;
bc0c38d1 3495}
bc0c38d1 3496
8c1a49ae 3497static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3498 struct tracer_flags *tracer_flags,
3499 struct tracer_opt *opts, int neg)
3500{
8c1a49ae 3501 struct tracer *trace = tr->current_trace;
8d18eaaf 3502 int ret;
bc0c38d1 3503
8c1a49ae 3504 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3505 if (ret)
3506 return ret;
3507
3508 if (neg)
3509 tracer_flags->val &= ~opts->bit;
3510 else
3511 tracer_flags->val |= opts->bit;
3512 return 0;
bc0c38d1
SR
3513}
3514
adf9f195 3515/* Try to assign a tracer specific option */
8c1a49ae 3516static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3517{
8c1a49ae 3518 struct tracer *trace = tr->current_trace;
7770841e 3519 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3520 struct tracer_opt *opts = NULL;
8d18eaaf 3521 int i;
adf9f195 3522
7770841e
Z
3523 for (i = 0; tracer_flags->opts[i].name; i++) {
3524 opts = &tracer_flags->opts[i];
adf9f195 3525
8d18eaaf 3526 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3527 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3528 }
adf9f195 3529
8d18eaaf 3530 return -EINVAL;
adf9f195
FW
3531}
3532
613f04a0
SRRH
3533/* Some tracers require overwrite to stay enabled */
3534int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3535{
3536 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3537 return -1;
3538
3539 return 0;
3540}
3541
2b6080f2 3542int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3543{
3544 /* do nothing if flag is already set */
3545 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3546 return 0;
3547
3548 /* Give the tracer a chance to approve the change */
2b6080f2 3549 if (tr->current_trace->flag_changed)
bf6065b5 3550 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3551 return -EINVAL;
af4617bd
SR
3552
3553 if (enabled)
3554 trace_flags |= mask;
3555 else
3556 trace_flags &= ~mask;
e870e9a1
LZ
3557
3558 if (mask == TRACE_ITER_RECORD_CMD)
3559 trace_event_enable_cmd_record(enabled);
750912fa 3560
80902822 3561 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3562 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3563#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3564 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3565#endif
3566 }
81698831
SR
3567
3568 if (mask == TRACE_ITER_PRINTK)
3569 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3570
3571 return 0;
af4617bd
SR
3572}
3573
2b6080f2 3574static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3575{
8d18eaaf 3576 char *cmp;
bc0c38d1 3577 int neg = 0;
613f04a0 3578 int ret = -ENODEV;
bc0c38d1
SR
3579 int i;
3580
7bcfaf54 3581 cmp = strstrip(option);
bc0c38d1 3582
8d18eaaf 3583 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3584 neg = 1;
3585 cmp += 2;
3586 }
3587
69d34da2
SRRH
3588 mutex_lock(&trace_types_lock);
3589
bc0c38d1 3590 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3591 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3592 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3593 break;
3594 }
3595 }
adf9f195
FW
3596
3597 /* If no option could be set, test the specific tracer options */
69d34da2 3598 if (!trace_options[i])
8c1a49ae 3599 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3600
3601 mutex_unlock(&trace_types_lock);
bc0c38d1 3602
7bcfaf54
SR
3603 return ret;
3604}
3605
3606static ssize_t
3607tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3608 size_t cnt, loff_t *ppos)
3609{
2b6080f2
SR
3610 struct seq_file *m = filp->private_data;
3611 struct trace_array *tr = m->private;
7bcfaf54 3612 char buf[64];
613f04a0 3613 int ret;
7bcfaf54
SR
3614
3615 if (cnt >= sizeof(buf))
3616 return -EINVAL;
3617
3618 if (copy_from_user(&buf, ubuf, cnt))
3619 return -EFAULT;
3620
a8dd2176
SR
3621 buf[cnt] = 0;
3622
2b6080f2 3623 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3624 if (ret < 0)
3625 return ret;
7bcfaf54 3626
cf8517cf 3627 *ppos += cnt;
bc0c38d1
SR
3628
3629 return cnt;
3630}
3631
fdb372ed
LZ
3632static int tracing_trace_options_open(struct inode *inode, struct file *file)
3633{
7b85af63 3634 struct trace_array *tr = inode->i_private;
f77d09a3 3635 int ret;
7b85af63 3636
fdb372ed
LZ
3637 if (tracing_disabled)
3638 return -ENODEV;
2b6080f2 3639
7b85af63
SRRH
3640 if (trace_array_get(tr) < 0)
3641 return -ENODEV;
3642
f77d09a3
AL
3643 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3644 if (ret < 0)
3645 trace_array_put(tr);
3646
3647 return ret;
fdb372ed
LZ
3648}
3649
5e2336a0 3650static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3651 .open = tracing_trace_options_open,
3652 .read = seq_read,
3653 .llseek = seq_lseek,
7b85af63 3654 .release = tracing_single_release_tr,
ee6bce52 3655 .write = tracing_trace_options_write,
bc0c38d1
SR
3656};
3657
7bd2f24c
IM
3658static const char readme_msg[] =
3659 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3660 "# echo 0 > tracing_on : quick way to disable tracing\n"
3661 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3662 " Important files:\n"
3663 " trace\t\t\t- The static contents of the buffer\n"
3664 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3665 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3666 " current_tracer\t- function and latency tracers\n"
3667 " available_tracers\t- list of configured tracers for current_tracer\n"
3668 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3669 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3670 " trace_clock\t\t-change the clock used to order events\n"
3671 " local: Per cpu clock but may not be synced across CPUs\n"
3672 " global: Synced across CPUs but slows tracing down.\n"
3673 " counter: Not a clock, but just an increment\n"
3674 " uptime: Jiffy counter from time of boot\n"
3675 " perf: Same clock that perf events use\n"
3676#ifdef CONFIG_X86_64
3677 " x86-tsc: TSC cycle counter\n"
3678#endif
3679 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3680 " tracing_cpumask\t- Limit which CPUs to trace\n"
3681 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3682 "\t\t\t Remove sub-buffer with rmdir\n"
3683 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3684 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3685 "\t\t\t option name\n"
939c7a4f 3686 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3687#ifdef CONFIG_DYNAMIC_FTRACE
3688 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3689 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3690 "\t\t\t functions\n"
3691 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3692 "\t modules: Can select a group via module\n"
3693 "\t Format: :mod:<module-name>\n"
3694 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3695 "\t triggers: a command to perform when function is hit\n"
3696 "\t Format: <function>:<trigger>[:count]\n"
3697 "\t trigger: traceon, traceoff\n"
3698 "\t\t enable_event:<system>:<event>\n"
3699 "\t\t disable_event:<system>:<event>\n"
22f45649 3700#ifdef CONFIG_STACKTRACE
71485c45 3701 "\t\t stacktrace\n"
22f45649
SRRH
3702#endif
3703#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3704 "\t\t snapshot\n"
22f45649 3705#endif
17a280ea
SRRH
3706 "\t\t dump\n"
3707 "\t\t cpudump\n"
71485c45
SRRH
3708 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3709 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3710 "\t The first one will disable tracing every time do_fault is hit\n"
3711 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3712 "\t The first time do trap is hit and it disables tracing, the\n"
3713 "\t counter will decrement to 2. If tracing is already disabled,\n"
3714 "\t the counter will not decrement. It only decrements when the\n"
3715 "\t trigger did work\n"
3716 "\t To remove trigger without count:\n"
3717 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3718 "\t To remove trigger with a count:\n"
3719 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3720 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3721 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3722 "\t modules: Can select a group via module command :mod:\n"
3723 "\t Does not accept triggers\n"
22f45649
SRRH
3724#endif /* CONFIG_DYNAMIC_FTRACE */
3725#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3726 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3727 "\t\t (function)\n"
22f45649
SRRH
3728#endif
3729#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3730 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3731 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3732 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3733#endif
3734#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3735 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3736 "\t\t\t snapshot buffer. Read the contents for more\n"
3737 "\t\t\t information\n"
22f45649 3738#endif
991821c8 3739#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3740 " stack_trace\t\t- Shows the max stack trace when active\n"
3741 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3742 "\t\t\t Write into this file to reset the max size (trigger a\n"
3743 "\t\t\t new trace)\n"
22f45649 3744#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3745 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3746 "\t\t\t traces\n"
22f45649 3747#endif
991821c8 3748#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3749 " events/\t\t- Directory containing all trace event subsystems:\n"
3750 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3751 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3752 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3753 "\t\t\t events\n"
26f25564 3754 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3755 " events/<system>/<event>/\t- Directory containing control files for\n"
3756 "\t\t\t <event>:\n"
26f25564
TZ
3757 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3758 " filter\t\t- If set, only events passing filter are traced\n"
3759 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3760 "\t Format: <trigger>[:count][if <filter>]\n"
3761 "\t trigger: traceon, traceoff\n"
3762 "\t enable_event:<system>:<event>\n"
3763 "\t disable_event:<system>:<event>\n"
26f25564 3764#ifdef CONFIG_STACKTRACE
71485c45 3765 "\t\t stacktrace\n"
26f25564
TZ
3766#endif
3767#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3768 "\t\t snapshot\n"
26f25564 3769#endif
71485c45
SRRH
3770 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3771 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3772 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3773 "\t events/block/block_unplug/trigger\n"
3774 "\t The first disables tracing every time block_unplug is hit.\n"
3775 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3776 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3777 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3778 "\t Like function triggers, the counter is only decremented if it\n"
3779 "\t enabled or disabled tracing.\n"
3780 "\t To remove a trigger without a count:\n"
3781 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3782 "\t To remove a trigger with a count:\n"
3783 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3784 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3785;
3786
3787static ssize_t
3788tracing_readme_read(struct file *filp, char __user *ubuf,
3789 size_t cnt, loff_t *ppos)
3790{
3791 return simple_read_from_buffer(ubuf, cnt, ppos,
3792 readme_msg, strlen(readme_msg));
3793}
3794
5e2336a0 3795static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3796 .open = tracing_open_generic,
3797 .read = tracing_readme_read,
b444786f 3798 .llseek = generic_file_llseek,
7bd2f24c
IM
3799};
3800
42584c81
YY
3801static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3802{
3803 unsigned int *ptr = v;
69abe6a5 3804
42584c81
YY
3805 if (*pos || m->count)
3806 ptr++;
69abe6a5 3807
42584c81 3808 (*pos)++;
69abe6a5 3809
939c7a4f
YY
3810 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3811 ptr++) {
42584c81
YY
3812 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3813 continue;
69abe6a5 3814
42584c81
YY
3815 return ptr;
3816 }
69abe6a5 3817
42584c81
YY
3818 return NULL;
3819}
3820
3821static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3822{
3823 void *v;
3824 loff_t l = 0;
69abe6a5 3825
4c27e756
SRRH
3826 preempt_disable();
3827 arch_spin_lock(&trace_cmdline_lock);
3828
939c7a4f 3829 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3830 while (l <= *pos) {
3831 v = saved_cmdlines_next(m, v, &l);
3832 if (!v)
3833 return NULL;
69abe6a5
AP
3834 }
3835
42584c81
YY
3836 return v;
3837}
3838
3839static void saved_cmdlines_stop(struct seq_file *m, void *v)
3840{
4c27e756
SRRH
3841 arch_spin_unlock(&trace_cmdline_lock);
3842 preempt_enable();
42584c81 3843}
69abe6a5 3844
42584c81
YY
3845static int saved_cmdlines_show(struct seq_file *m, void *v)
3846{
3847 char buf[TASK_COMM_LEN];
3848 unsigned int *pid = v;
69abe6a5 3849
4c27e756 3850 __trace_find_cmdline(*pid, buf);
42584c81
YY
3851 seq_printf(m, "%d %s\n", *pid, buf);
3852 return 0;
3853}
3854
3855static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3856 .start = saved_cmdlines_start,
3857 .next = saved_cmdlines_next,
3858 .stop = saved_cmdlines_stop,
3859 .show = saved_cmdlines_show,
3860};
3861
3862static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3863{
3864 if (tracing_disabled)
3865 return -ENODEV;
3866
3867 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3868}
3869
3870static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3871 .open = tracing_saved_cmdlines_open,
3872 .read = seq_read,
3873 .llseek = seq_lseek,
3874 .release = seq_release,
69abe6a5
AP
3875};
3876
939c7a4f
YY
3877static ssize_t
3878tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3879 size_t cnt, loff_t *ppos)
3880{
3881 char buf[64];
3882 int r;
3883
3884 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3885 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3886 arch_spin_unlock(&trace_cmdline_lock);
3887
3888 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3889}
3890
3891static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3892{
3893 kfree(s->saved_cmdlines);
3894 kfree(s->map_cmdline_to_pid);
3895 kfree(s);
3896}
3897
3898static int tracing_resize_saved_cmdlines(unsigned int val)
3899{
3900 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3901
a6af8fbf 3902 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3903 if (!s)
3904 return -ENOMEM;
3905
3906 if (allocate_cmdlines_buffer(val, s) < 0) {
3907 kfree(s);
3908 return -ENOMEM;
3909 }
3910
3911 arch_spin_lock(&trace_cmdline_lock);
3912 savedcmd_temp = savedcmd;
3913 savedcmd = s;
3914 arch_spin_unlock(&trace_cmdline_lock);
3915 free_saved_cmdlines_buffer(savedcmd_temp);
3916
3917 return 0;
3918}
3919
3920static ssize_t
3921tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3922 size_t cnt, loff_t *ppos)
3923{
3924 unsigned long val;
3925 int ret;
3926
3927 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3928 if (ret)
3929 return ret;
3930
3931 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3932 if (!val || val > PID_MAX_DEFAULT)
3933 return -EINVAL;
3934
3935 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3936 if (ret < 0)
3937 return ret;
3938
3939 *ppos += cnt;
3940
3941 return cnt;
3942}
3943
3944static const struct file_operations tracing_saved_cmdlines_size_fops = {
3945 .open = tracing_open_generic,
3946 .read = tracing_saved_cmdlines_size_read,
3947 .write = tracing_saved_cmdlines_size_write,
3948};
3949
9828413d
SRRH
3950#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3951static union trace_enum_map_item *
3952update_enum_map(union trace_enum_map_item *ptr)
3953{
3954 if (!ptr->map.enum_string) {
3955 if (ptr->tail.next) {
3956 ptr = ptr->tail.next;
3957 /* Set ptr to the next real item (skip head) */
3958 ptr++;
3959 } else
3960 return NULL;
3961 }
3962 return ptr;
3963}
3964
3965static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3966{
3967 union trace_enum_map_item *ptr = v;
3968
3969 /*
3970 * Paranoid! If ptr points to end, we don't want to increment past it.
3971 * This really should never happen.
3972 */
3973 ptr = update_enum_map(ptr);
3974 if (WARN_ON_ONCE(!ptr))
3975 return NULL;
3976
3977 ptr++;
3978
3979 (*pos)++;
3980
3981 ptr = update_enum_map(ptr);
3982
3983 return ptr;
3984}
3985
3986static void *enum_map_start(struct seq_file *m, loff_t *pos)
3987{
3988 union trace_enum_map_item *v;
3989 loff_t l = 0;
3990
3991 mutex_lock(&trace_enum_mutex);
3992
3993 v = trace_enum_maps;
3994 if (v)
3995 v++;
3996
3997 while (v && l < *pos) {
3998 v = enum_map_next(m, v, &l);
3999 }
4000
4001 return v;
4002}
4003
4004static void enum_map_stop(struct seq_file *m, void *v)
4005{
4006 mutex_unlock(&trace_enum_mutex);
4007}
4008
4009static int enum_map_show(struct seq_file *m, void *v)
4010{
4011 union trace_enum_map_item *ptr = v;
4012
4013 seq_printf(m, "%s %ld (%s)\n",
4014 ptr->map.enum_string, ptr->map.enum_value,
4015 ptr->map.system);
4016
4017 return 0;
4018}
4019
4020static const struct seq_operations tracing_enum_map_seq_ops = {
4021 .start = enum_map_start,
4022 .next = enum_map_next,
4023 .stop = enum_map_stop,
4024 .show = enum_map_show,
4025};
4026
4027static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4028{
4029 if (tracing_disabled)
4030 return -ENODEV;
4031
4032 return seq_open(filp, &tracing_enum_map_seq_ops);
4033}
4034
4035static const struct file_operations tracing_enum_map_fops = {
4036 .open = tracing_enum_map_open,
4037 .read = seq_read,
4038 .llseek = seq_lseek,
4039 .release = seq_release,
4040};
4041
4042static inline union trace_enum_map_item *
4043trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4044{
4045 /* Return tail of array given the head */
4046 return ptr + ptr->head.length + 1;
4047}
4048
4049static void
4050trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4051 int len)
4052{
4053 struct trace_enum_map **stop;
4054 struct trace_enum_map **map;
4055 union trace_enum_map_item *map_array;
4056 union trace_enum_map_item *ptr;
4057
4058 stop = start + len;
4059
4060 /*
4061 * The trace_enum_maps contains the map plus a head and tail item,
4062 * where the head holds the module and length of array, and the
4063 * tail holds a pointer to the next list.
4064 */
4065 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4066 if (!map_array) {
4067 pr_warning("Unable to allocate trace enum mapping\n");
4068 return;
4069 }
4070
4071 mutex_lock(&trace_enum_mutex);
4072
4073 if (!trace_enum_maps)
4074 trace_enum_maps = map_array;
4075 else {
4076 ptr = trace_enum_maps;
4077 for (;;) {
4078 ptr = trace_enum_jmp_to_tail(ptr);
4079 if (!ptr->tail.next)
4080 break;
4081 ptr = ptr->tail.next;
4082
4083 }
4084 ptr->tail.next = map_array;
4085 }
4086 map_array->head.mod = mod;
4087 map_array->head.length = len;
4088 map_array++;
4089
4090 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4091 map_array->map = **map;
4092 map_array++;
4093 }
4094 memset(map_array, 0, sizeof(*map_array));
4095
4096 mutex_unlock(&trace_enum_mutex);
4097}
4098
4099static void trace_create_enum_file(struct dentry *d_tracer)
4100{
4101 trace_create_file("enum_map", 0444, d_tracer,
4102 NULL, &tracing_enum_map_fops);
4103}
4104
4105#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4106static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4107static inline void trace_insert_enum_map_file(struct module *mod,
4108 struct trace_enum_map **start, int len) { }
4109#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4110
4111static void trace_insert_enum_map(struct module *mod,
4112 struct trace_enum_map **start, int len)
0c564a53
SRRH
4113{
4114 struct trace_enum_map **map;
0c564a53
SRRH
4115
4116 if (len <= 0)
4117 return;
4118
4119 map = start;
4120
4121 trace_event_enum_update(map, len);
9828413d
SRRH
4122
4123 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4124}
4125
bc0c38d1
SR
4126static ssize_t
4127tracing_set_trace_read(struct file *filp, char __user *ubuf,
4128 size_t cnt, loff_t *ppos)
4129{
2b6080f2 4130 struct trace_array *tr = filp->private_data;
ee6c2c1b 4131 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4132 int r;
4133
4134 mutex_lock(&trace_types_lock);
2b6080f2 4135 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4136 mutex_unlock(&trace_types_lock);
4137
4bf39a94 4138 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4139}
4140
b6f11df2
ACM
4141int tracer_init(struct tracer *t, struct trace_array *tr)
4142{
12883efb 4143 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4144 return t->init(tr);
4145}
4146
12883efb 4147static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4148{
4149 int cpu;
737223fb 4150
438ced17 4151 for_each_tracing_cpu(cpu)
12883efb 4152 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4153}
4154
12883efb 4155#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4156/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4157static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4158 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4159{
4160 int cpu, ret = 0;
4161
4162 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4163 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4164 ret = ring_buffer_resize(trace_buf->buffer,
4165 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4166 if (ret < 0)
4167 break;
12883efb
SRRH
4168 per_cpu_ptr(trace_buf->data, cpu)->entries =
4169 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4170 }
4171 } else {
12883efb
SRRH
4172 ret = ring_buffer_resize(trace_buf->buffer,
4173 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4174 if (ret == 0)
12883efb
SRRH
4175 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4176 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4177 }
4178
4179 return ret;
4180}
12883efb 4181#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4182
2b6080f2
SR
4183static int __tracing_resize_ring_buffer(struct trace_array *tr,
4184 unsigned long size, int cpu)
73c5162a
SR
4185{
4186 int ret;
4187
4188 /*
4189 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4190 * we use the size that was given, and we can forget about
4191 * expanding it later.
73c5162a 4192 */
55034cd6 4193 ring_buffer_expanded = true;
73c5162a 4194
b382ede6 4195 /* May be called before buffers are initialized */
12883efb 4196 if (!tr->trace_buffer.buffer)
b382ede6
SR
4197 return 0;
4198
12883efb 4199 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4200 if (ret < 0)
4201 return ret;
4202
12883efb 4203#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4204 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4205 !tr->current_trace->use_max_tr)
ef710e10
KM
4206 goto out;
4207
12883efb 4208 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4209 if (ret < 0) {
12883efb
SRRH
4210 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4211 &tr->trace_buffer, cpu);
73c5162a 4212 if (r < 0) {
a123c52b
SR
4213 /*
4214 * AARGH! We are left with different
4215 * size max buffer!!!!
4216 * The max buffer is our "snapshot" buffer.
4217 * When a tracer needs a snapshot (one of the
4218 * latency tracers), it swaps the max buffer
4219 * with the saved snap shot. We succeeded to
4220 * update the size of the main buffer, but failed to
4221 * update the size of the max buffer. But when we tried
4222 * to reset the main buffer to the original size, we
4223 * failed there too. This is very unlikely to
4224 * happen, but if it does, warn and kill all
4225 * tracing.
4226 */
73c5162a
SR
4227 WARN_ON(1);
4228 tracing_disabled = 1;
4229 }
4230 return ret;
4231 }
4232
438ced17 4233 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4234 set_buffer_entries(&tr->max_buffer, size);
438ced17 4235 else
12883efb 4236 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4237
ef710e10 4238 out:
12883efb
SRRH
4239#endif /* CONFIG_TRACER_MAX_TRACE */
4240
438ced17 4241 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4242 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4243 else
12883efb 4244 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4245
4246 return ret;
4247}
4248
2b6080f2
SR
4249static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4250 unsigned long size, int cpu_id)
4f271a2a 4251{
83f40318 4252 int ret = size;
4f271a2a
VN
4253
4254 mutex_lock(&trace_types_lock);
4255
438ced17
VN
4256 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4257 /* make sure, this cpu is enabled in the mask */
4258 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4259 ret = -EINVAL;
4260 goto out;
4261 }
4262 }
4f271a2a 4263
2b6080f2 4264 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4265 if (ret < 0)
4266 ret = -ENOMEM;
4267
438ced17 4268out:
4f271a2a
VN
4269 mutex_unlock(&trace_types_lock);
4270
4271 return ret;
4272}
4273
ef710e10 4274
1852fcce
SR
4275/**
4276 * tracing_update_buffers - used by tracing facility to expand ring buffers
4277 *
4278 * To save on memory when the tracing is never used on a system with it
4279 * configured in. The ring buffers are set to a minimum size. But once
4280 * a user starts to use the tracing facility, then they need to grow
4281 * to their default size.
4282 *
4283 * This function is to be called when a tracer is about to be used.
4284 */
4285int tracing_update_buffers(void)
4286{
4287 int ret = 0;
4288
1027fcb2 4289 mutex_lock(&trace_types_lock);
1852fcce 4290 if (!ring_buffer_expanded)
2b6080f2 4291 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4292 RING_BUFFER_ALL_CPUS);
1027fcb2 4293 mutex_unlock(&trace_types_lock);
1852fcce
SR
4294
4295 return ret;
4296}
4297
577b785f
SR
4298struct trace_option_dentry;
4299
4300static struct trace_option_dentry *
2b6080f2 4301create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
4302
4303static void
4304destroy_trace_option_files(struct trace_option_dentry *topts);
4305
6b450d25
SRRH
4306/*
4307 * Used to clear out the tracer before deletion of an instance.
4308 * Must have trace_types_lock held.
4309 */
4310static void tracing_set_nop(struct trace_array *tr)
4311{
4312 if (tr->current_trace == &nop_trace)
4313 return;
4314
50512ab5 4315 tr->current_trace->enabled--;
6b450d25
SRRH
4316
4317 if (tr->current_trace->reset)
4318 tr->current_trace->reset(tr);
4319
4320 tr->current_trace = &nop_trace;
4321}
4322
09d23a1d 4323static void update_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4324{
577b785f 4325 static struct trace_option_dentry *topts;
09d23a1d
SRRH
4326
4327 /* Only enable if the directory has been created already. */
4328 if (!tr->dir)
4329 return;
4330
4331 /* Currently, only the top instance has options */
4332 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4333 return;
4334
4335 destroy_trace_option_files(topts);
4336 topts = create_trace_option_files(tr, t);
4337}
4338
4339static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4340{
bc0c38d1 4341 struct tracer *t;
12883efb 4342#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4343 bool had_max_tr;
12883efb 4344#endif
d9e54076 4345 int ret = 0;
bc0c38d1 4346
1027fcb2
SR
4347 mutex_lock(&trace_types_lock);
4348
73c5162a 4349 if (!ring_buffer_expanded) {
2b6080f2 4350 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4351 RING_BUFFER_ALL_CPUS);
73c5162a 4352 if (ret < 0)
59f586db 4353 goto out;
73c5162a
SR
4354 ret = 0;
4355 }
4356
bc0c38d1
SR
4357 for (t = trace_types; t; t = t->next) {
4358 if (strcmp(t->name, buf) == 0)
4359 break;
4360 }
c2931e05
FW
4361 if (!t) {
4362 ret = -EINVAL;
4363 goto out;
4364 }
2b6080f2 4365 if (t == tr->current_trace)
bc0c38d1
SR
4366 goto out;
4367
607e2ea1
SRRH
4368 /* Some tracers are only allowed for the top level buffer */
4369 if (!trace_ok_for_array(t, tr)) {
4370 ret = -EINVAL;
4371 goto out;
4372 }
4373
cf6ab6d9
SRRH
4374 /* If trace pipe files are being read, we can't change the tracer */
4375 if (tr->current_trace->ref) {
4376 ret = -EBUSY;
4377 goto out;
4378 }
4379
9f029e83 4380 trace_branch_disable();
613f04a0 4381
50512ab5 4382 tr->current_trace->enabled--;
613f04a0 4383
2b6080f2
SR
4384 if (tr->current_trace->reset)
4385 tr->current_trace->reset(tr);
34600f0e 4386
12883efb 4387 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4388 tr->current_trace = &nop_trace;
34600f0e 4389
45ad21ca
SRRH
4390#ifdef CONFIG_TRACER_MAX_TRACE
4391 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4392
4393 if (had_max_tr && !t->use_max_tr) {
4394 /*
4395 * We need to make sure that the update_max_tr sees that
4396 * current_trace changed to nop_trace to keep it from
4397 * swapping the buffers after we resize it.
4398 * The update_max_tr is called from interrupts disabled
4399 * so a synchronized_sched() is sufficient.
4400 */
4401 synchronize_sched();
3209cff4 4402 free_snapshot(tr);
ef710e10 4403 }
12883efb 4404#endif
09d23a1d 4405 update_tracer_options(tr, t);
12883efb
SRRH
4406
4407#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4408 if (t->use_max_tr && !had_max_tr) {
3209cff4 4409 ret = alloc_snapshot(tr);
d60da506
HT
4410 if (ret < 0)
4411 goto out;
ef710e10 4412 }
12883efb 4413#endif
577b785f 4414
1c80025a 4415 if (t->init) {
b6f11df2 4416 ret = tracer_init(t, tr);
1c80025a
FW
4417 if (ret)
4418 goto out;
4419 }
bc0c38d1 4420
2b6080f2 4421 tr->current_trace = t;
50512ab5 4422 tr->current_trace->enabled++;
9f029e83 4423 trace_branch_enable(tr);
bc0c38d1
SR
4424 out:
4425 mutex_unlock(&trace_types_lock);
4426
d9e54076
PZ
4427 return ret;
4428}
4429
4430static ssize_t
4431tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4432 size_t cnt, loff_t *ppos)
4433{
607e2ea1 4434 struct trace_array *tr = filp->private_data;
ee6c2c1b 4435 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4436 int i;
4437 size_t ret;
e6e7a65a
FW
4438 int err;
4439
4440 ret = cnt;
d9e54076 4441
ee6c2c1b
LZ
4442 if (cnt > MAX_TRACER_SIZE)
4443 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4444
4445 if (copy_from_user(&buf, ubuf, cnt))
4446 return -EFAULT;
4447
4448 buf[cnt] = 0;
4449
4450 /* strip ending whitespace. */
4451 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4452 buf[i] = 0;
4453
607e2ea1 4454 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4455 if (err)
4456 return err;
d9e54076 4457
cf8517cf 4458 *ppos += ret;
bc0c38d1 4459
c2931e05 4460 return ret;
bc0c38d1
SR
4461}
4462
4463static ssize_t
6508fa76
SF
4464tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
bc0c38d1 4466{
bc0c38d1
SR
4467 char buf[64];
4468 int r;
4469
cffae437 4470 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4471 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4472 if (r > sizeof(buf))
4473 r = sizeof(buf);
4bf39a94 4474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4475}
4476
4477static ssize_t
6508fa76
SF
4478tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4479 size_t cnt, loff_t *ppos)
bc0c38d1 4480{
5e39841c 4481 unsigned long val;
c6caeeb1 4482 int ret;
bc0c38d1 4483
22fe9b54
PH
4484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4485 if (ret)
c6caeeb1 4486 return ret;
bc0c38d1
SR
4487
4488 *ptr = val * 1000;
4489
4490 return cnt;
4491}
4492
6508fa76
SF
4493static ssize_t
4494tracing_thresh_read(struct file *filp, char __user *ubuf,
4495 size_t cnt, loff_t *ppos)
4496{
4497 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4498}
4499
4500static ssize_t
4501tracing_thresh_write(struct file *filp, const char __user *ubuf,
4502 size_t cnt, loff_t *ppos)
4503{
4504 struct trace_array *tr = filp->private_data;
4505 int ret;
4506
4507 mutex_lock(&trace_types_lock);
4508 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4509 if (ret < 0)
4510 goto out;
4511
4512 if (tr->current_trace->update_thresh) {
4513 ret = tr->current_trace->update_thresh(tr);
4514 if (ret < 0)
4515 goto out;
4516 }
4517
4518 ret = cnt;
4519out:
4520 mutex_unlock(&trace_types_lock);
4521
4522 return ret;
4523}
4524
4525static ssize_t
4526tracing_max_lat_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4528{
4529 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4535{
4536 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4537}
4538
b3806b43
SR
4539static int tracing_open_pipe(struct inode *inode, struct file *filp)
4540{
15544209 4541 struct trace_array *tr = inode->i_private;
b3806b43 4542 struct trace_iterator *iter;
b04cc6b1 4543 int ret = 0;
b3806b43
SR
4544
4545 if (tracing_disabled)
4546 return -ENODEV;
4547
7b85af63
SRRH
4548 if (trace_array_get(tr) < 0)
4549 return -ENODEV;
4550
b04cc6b1
FW
4551 mutex_lock(&trace_types_lock);
4552
b3806b43
SR
4553 /* create a buffer to store the information to pass to userspace */
4554 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4555 if (!iter) {
4556 ret = -ENOMEM;
f77d09a3 4557 __trace_array_put(tr);
b04cc6b1
FW
4558 goto out;
4559 }
b3806b43 4560
3a161d99 4561 trace_seq_init(&iter->seq);
d716ff71 4562 iter->trace = tr->current_trace;
d7350c3f 4563
4462344e 4564 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4565 ret = -ENOMEM;
d7350c3f 4566 goto fail;
4462344e
RR
4567 }
4568
a309720c 4569 /* trace pipe does not show start of buffer */
4462344e 4570 cpumask_setall(iter->started);
a309720c 4571
112f38a7
SR
4572 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4573 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4574
8be0709f 4575 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4576 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4577 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4578
15544209
ON
4579 iter->tr = tr;
4580 iter->trace_buffer = &tr->trace_buffer;
4581 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4582 mutex_init(&iter->mutex);
b3806b43
SR
4583 filp->private_data = iter;
4584
107bad8b
SR
4585 if (iter->trace->pipe_open)
4586 iter->trace->pipe_open(iter);
107bad8b 4587
b444786f 4588 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4589
4590 tr->current_trace->ref++;
b04cc6b1
FW
4591out:
4592 mutex_unlock(&trace_types_lock);
4593 return ret;
d7350c3f
FW
4594
4595fail:
4596 kfree(iter->trace);
4597 kfree(iter);
7b85af63 4598 __trace_array_put(tr);
d7350c3f
FW
4599 mutex_unlock(&trace_types_lock);
4600 return ret;
b3806b43
SR
4601}
4602
4603static int tracing_release_pipe(struct inode *inode, struct file *file)
4604{
4605 struct trace_iterator *iter = file->private_data;
15544209 4606 struct trace_array *tr = inode->i_private;
b3806b43 4607
b04cc6b1
FW
4608 mutex_lock(&trace_types_lock);
4609
cf6ab6d9
SRRH
4610 tr->current_trace->ref--;
4611
29bf4a5e 4612 if (iter->trace->pipe_close)
c521efd1
SR
4613 iter->trace->pipe_close(iter);
4614
b04cc6b1
FW
4615 mutex_unlock(&trace_types_lock);
4616
4462344e 4617 free_cpumask_var(iter->started);
d7350c3f 4618 mutex_destroy(&iter->mutex);
b3806b43 4619 kfree(iter);
b3806b43 4620
7b85af63
SRRH
4621 trace_array_put(tr);
4622
b3806b43
SR
4623 return 0;
4624}
4625
2a2cc8f7 4626static unsigned int
cc60cdc9 4627trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4628{
15693458
SRRH
4629 /* Iterators are static, they should be filled or empty */
4630 if (trace_buffer_iter(iter, iter->cpu_file))
4631 return POLLIN | POLLRDNORM;
2a2cc8f7 4632
15693458 4633 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4634 /*
4635 * Always select as readable when in blocking mode
4636 */
4637 return POLLIN | POLLRDNORM;
15693458 4638 else
12883efb 4639 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4640 filp, poll_table);
2a2cc8f7 4641}
2a2cc8f7 4642
cc60cdc9
SR
4643static unsigned int
4644tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4645{
4646 struct trace_iterator *iter = filp->private_data;
4647
4648 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4649}
4650
d716ff71 4651/* Must be called with iter->mutex held. */
ff98781b 4652static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4653{
4654 struct trace_iterator *iter = filp->private_data;
8b8b3683 4655 int ret;
b3806b43 4656
b3806b43 4657 while (trace_empty(iter)) {
2dc8f095 4658
107bad8b 4659 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4660 return -EAGAIN;
107bad8b 4661 }
2dc8f095 4662
b3806b43 4663 /*
250bfd3d 4664 * We block until we read something and tracing is disabled.
b3806b43
SR
4665 * We still block if tracing is disabled, but we have never
4666 * read anything. This allows a user to cat this file, and
4667 * then enable tracing. But after we have read something,
4668 * we give an EOF when tracing is again disabled.
4669 *
4670 * iter->pos will be 0 if we haven't read anything.
4671 */
10246fa3 4672 if (!tracing_is_on() && iter->pos)
b3806b43 4673 break;
f4874261
SRRH
4674
4675 mutex_unlock(&iter->mutex);
4676
e30f53aa 4677 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4678
4679 mutex_lock(&iter->mutex);
4680
8b8b3683
SRRH
4681 if (ret)
4682 return ret;
b3806b43
SR
4683 }
4684
ff98781b
EGM
4685 return 1;
4686}
4687
4688/*
4689 * Consumer reader.
4690 */
4691static ssize_t
4692tracing_read_pipe(struct file *filp, char __user *ubuf,
4693 size_t cnt, loff_t *ppos)
4694{
4695 struct trace_iterator *iter = filp->private_data;
4696 ssize_t sret;
4697
4698 /* return any leftover data */
4699 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4700 if (sret != -EBUSY)
4701 return sret;
4702
f9520750 4703 trace_seq_init(&iter->seq);
ff98781b 4704
d7350c3f
FW
4705 /*
4706 * Avoid more than one consumer on a single file descriptor
4707 * This is just a matter of traces coherency, the ring buffer itself
4708 * is protected.
4709 */
4710 mutex_lock(&iter->mutex);
ff98781b
EGM
4711 if (iter->trace->read) {
4712 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4713 if (sret)
4714 goto out;
4715 }
4716
4717waitagain:
4718 sret = tracing_wait_pipe(filp);
4719 if (sret <= 0)
4720 goto out;
4721
b3806b43 4722 /* stop when tracing is finished */
ff98781b
EGM
4723 if (trace_empty(iter)) {
4724 sret = 0;
107bad8b 4725 goto out;
ff98781b 4726 }
b3806b43
SR
4727
4728 if (cnt >= PAGE_SIZE)
4729 cnt = PAGE_SIZE - 1;
4730
53d0aa77 4731 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4732 memset(&iter->seq, 0,
4733 sizeof(struct trace_iterator) -
4734 offsetof(struct trace_iterator, seq));
ed5467da 4735 cpumask_clear(iter->started);
4823ed7e 4736 iter->pos = -1;
b3806b43 4737
4f535968 4738 trace_event_read_lock();
7e53bd42 4739 trace_access_lock(iter->cpu_file);
955b61e5 4740 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4741 enum print_line_t ret;
5ac48378 4742 int save_len = iter->seq.seq.len;
088b1e42 4743
f9896bf3 4744 ret = print_trace_line(iter);
2c4f035f 4745 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4746 /* don't print partial lines */
5ac48378 4747 iter->seq.seq.len = save_len;
b3806b43 4748 break;
088b1e42 4749 }
b91facc3
FW
4750 if (ret != TRACE_TYPE_NO_CONSUME)
4751 trace_consume(iter);
b3806b43 4752
5ac48378 4753 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4754 break;
ee5e51f5
JO
4755
4756 /*
4757 * Setting the full flag means we reached the trace_seq buffer
4758 * size and we should leave by partial output condition above.
4759 * One of the trace_seq_* functions is not used properly.
4760 */
4761 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4762 iter->ent->type);
b3806b43 4763 }
7e53bd42 4764 trace_access_unlock(iter->cpu_file);
4f535968 4765 trace_event_read_unlock();
b3806b43 4766
b3806b43 4767 /* Now copy what we have to the user */
6c6c2796 4768 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4769 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4770 trace_seq_init(&iter->seq);
9ff4b974
PP
4771
4772 /*
25985edc 4773 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4774 * entries, go back to wait for more entries.
4775 */
6c6c2796 4776 if (sret == -EBUSY)
9ff4b974 4777 goto waitagain;
b3806b43 4778
107bad8b 4779out:
d7350c3f 4780 mutex_unlock(&iter->mutex);
107bad8b 4781
6c6c2796 4782 return sret;
b3806b43
SR
4783}
4784
3c56819b
EGM
4785static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4786 unsigned int idx)
4787{
4788 __free_page(spd->pages[idx]);
4789}
4790
28dfef8f 4791static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4792 .can_merge = 0,
34cd4998 4793 .confirm = generic_pipe_buf_confirm,
92fdd98c 4794 .release = generic_pipe_buf_release,
34cd4998
SR
4795 .steal = generic_pipe_buf_steal,
4796 .get = generic_pipe_buf_get,
3c56819b
EGM
4797};
4798
34cd4998 4799static size_t
fa7c7f6e 4800tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4801{
4802 size_t count;
74f06bb7 4803 int save_len;
34cd4998
SR
4804 int ret;
4805
4806 /* Seq buffer is page-sized, exactly what we need. */
4807 for (;;) {
74f06bb7 4808 save_len = iter->seq.seq.len;
34cd4998 4809 ret = print_trace_line(iter);
74f06bb7
SRRH
4810
4811 if (trace_seq_has_overflowed(&iter->seq)) {
4812 iter->seq.seq.len = save_len;
34cd4998
SR
4813 break;
4814 }
74f06bb7
SRRH
4815
4816 /*
4817 * This should not be hit, because it should only
4818 * be set if the iter->seq overflowed. But check it
4819 * anyway to be safe.
4820 */
34cd4998 4821 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4822 iter->seq.seq.len = save_len;
4823 break;
4824 }
4825
5ac48378 4826 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4827 if (rem < count) {
4828 rem = 0;
4829 iter->seq.seq.len = save_len;
34cd4998
SR
4830 break;
4831 }
4832
74e7ff8c
LJ
4833 if (ret != TRACE_TYPE_NO_CONSUME)
4834 trace_consume(iter);
34cd4998 4835 rem -= count;
955b61e5 4836 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4837 rem = 0;
4838 iter->ent = NULL;
4839 break;
4840 }
4841 }
4842
4843 return rem;
4844}
4845
3c56819b
EGM
4846static ssize_t tracing_splice_read_pipe(struct file *filp,
4847 loff_t *ppos,
4848 struct pipe_inode_info *pipe,
4849 size_t len,
4850 unsigned int flags)
4851{
35f3d14d
JA
4852 struct page *pages_def[PIPE_DEF_BUFFERS];
4853 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4854 struct trace_iterator *iter = filp->private_data;
4855 struct splice_pipe_desc spd = {
35f3d14d
JA
4856 .pages = pages_def,
4857 .partial = partial_def,
34cd4998 4858 .nr_pages = 0, /* This gets updated below. */
047fe360 4859 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4860 .flags = flags,
4861 .ops = &tracing_pipe_buf_ops,
4862 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4863 };
4864 ssize_t ret;
34cd4998 4865 size_t rem;
3c56819b
EGM
4866 unsigned int i;
4867
35f3d14d
JA
4868 if (splice_grow_spd(pipe, &spd))
4869 return -ENOMEM;
4870
d7350c3f 4871 mutex_lock(&iter->mutex);
3c56819b
EGM
4872
4873 if (iter->trace->splice_read) {
4874 ret = iter->trace->splice_read(iter, filp,
4875 ppos, pipe, len, flags);
4876 if (ret)
34cd4998 4877 goto out_err;
3c56819b
EGM
4878 }
4879
4880 ret = tracing_wait_pipe(filp);
4881 if (ret <= 0)
34cd4998 4882 goto out_err;
3c56819b 4883
955b61e5 4884 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4885 ret = -EFAULT;
34cd4998 4886 goto out_err;
3c56819b
EGM
4887 }
4888
4f535968 4889 trace_event_read_lock();
7e53bd42 4890 trace_access_lock(iter->cpu_file);
4f535968 4891
3c56819b 4892 /* Fill as many pages as possible. */
a786c06d 4893 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4894 spd.pages[i] = alloc_page(GFP_KERNEL);
4895 if (!spd.pages[i])
34cd4998 4896 break;
3c56819b 4897
fa7c7f6e 4898 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4899
4900 /* Copy the data into the page, so we can start over. */
4901 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4902 page_address(spd.pages[i]),
5ac48378 4903 trace_seq_used(&iter->seq));
3c56819b 4904 if (ret < 0) {
35f3d14d 4905 __free_page(spd.pages[i]);
3c56819b
EGM
4906 break;
4907 }
35f3d14d 4908 spd.partial[i].offset = 0;
5ac48378 4909 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4910
f9520750 4911 trace_seq_init(&iter->seq);
3c56819b
EGM
4912 }
4913
7e53bd42 4914 trace_access_unlock(iter->cpu_file);
4f535968 4915 trace_event_read_unlock();
d7350c3f 4916 mutex_unlock(&iter->mutex);
3c56819b
EGM
4917
4918 spd.nr_pages = i;
4919
35f3d14d
JA
4920 ret = splice_to_pipe(pipe, &spd);
4921out:
047fe360 4922 splice_shrink_spd(&spd);
35f3d14d 4923 return ret;
3c56819b 4924
34cd4998 4925out_err:
d7350c3f 4926 mutex_unlock(&iter->mutex);
35f3d14d 4927 goto out;
3c56819b
EGM
4928}
4929
a98a3c3f
SR
4930static ssize_t
4931tracing_entries_read(struct file *filp, char __user *ubuf,
4932 size_t cnt, loff_t *ppos)
4933{
0bc392ee
ON
4934 struct inode *inode = file_inode(filp);
4935 struct trace_array *tr = inode->i_private;
4936 int cpu = tracing_get_cpu(inode);
438ced17
VN
4937 char buf[64];
4938 int r = 0;
4939 ssize_t ret;
a98a3c3f 4940
db526ca3 4941 mutex_lock(&trace_types_lock);
438ced17 4942
0bc392ee 4943 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4944 int cpu, buf_size_same;
4945 unsigned long size;
4946
4947 size = 0;
4948 buf_size_same = 1;
4949 /* check if all cpu sizes are same */
4950 for_each_tracing_cpu(cpu) {
4951 /* fill in the size from first enabled cpu */
4952 if (size == 0)
12883efb
SRRH
4953 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4954 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4955 buf_size_same = 0;
4956 break;
4957 }
4958 }
4959
4960 if (buf_size_same) {
4961 if (!ring_buffer_expanded)
4962 r = sprintf(buf, "%lu (expanded: %lu)\n",
4963 size >> 10,
4964 trace_buf_size >> 10);
4965 else
4966 r = sprintf(buf, "%lu\n", size >> 10);
4967 } else
4968 r = sprintf(buf, "X\n");
4969 } else
0bc392ee 4970 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4971
db526ca3
SR
4972 mutex_unlock(&trace_types_lock);
4973
438ced17
VN
4974 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4975 return ret;
a98a3c3f
SR
4976}
4977
4978static ssize_t
4979tracing_entries_write(struct file *filp, const char __user *ubuf,
4980 size_t cnt, loff_t *ppos)
4981{
0bc392ee
ON
4982 struct inode *inode = file_inode(filp);
4983 struct trace_array *tr = inode->i_private;
a98a3c3f 4984 unsigned long val;
4f271a2a 4985 int ret;
a98a3c3f 4986
22fe9b54
PH
4987 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4988 if (ret)
c6caeeb1 4989 return ret;
a98a3c3f
SR
4990
4991 /* must have at least 1 entry */
4992 if (!val)
4993 return -EINVAL;
4994
1696b2b0
SR
4995 /* value is in KB */
4996 val <<= 10;
0bc392ee 4997 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4998 if (ret < 0)
4999 return ret;
a98a3c3f 5000
cf8517cf 5001 *ppos += cnt;
a98a3c3f 5002
4f271a2a
VN
5003 return cnt;
5004}
bf5e6519 5005
f81ab074
VN
5006static ssize_t
5007tracing_total_entries_read(struct file *filp, char __user *ubuf,
5008 size_t cnt, loff_t *ppos)
5009{
5010 struct trace_array *tr = filp->private_data;
5011 char buf[64];
5012 int r, cpu;
5013 unsigned long size = 0, expanded_size = 0;
5014
5015 mutex_lock(&trace_types_lock);
5016 for_each_tracing_cpu(cpu) {
12883efb 5017 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5018 if (!ring_buffer_expanded)
5019 expanded_size += trace_buf_size >> 10;
5020 }
5021 if (ring_buffer_expanded)
5022 r = sprintf(buf, "%lu\n", size);
5023 else
5024 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5025 mutex_unlock(&trace_types_lock);
5026
5027 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5028}
5029
4f271a2a
VN
5030static ssize_t
5031tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5032 size_t cnt, loff_t *ppos)
5033{
5034 /*
5035 * There is no need to read what the user has written, this function
5036 * is just to make sure that there is no error when "echo" is used
5037 */
5038
5039 *ppos += cnt;
a98a3c3f
SR
5040
5041 return cnt;
5042}
5043
4f271a2a
VN
5044static int
5045tracing_free_buffer_release(struct inode *inode, struct file *filp)
5046{
2b6080f2
SR
5047 struct trace_array *tr = inode->i_private;
5048
cf30cf67
SR
5049 /* disable tracing ? */
5050 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5051 tracer_tracing_off(tr);
4f271a2a 5052 /* resize the ring buffer to 0 */
2b6080f2 5053 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5054
7b85af63
SRRH
5055 trace_array_put(tr);
5056
4f271a2a
VN
5057 return 0;
5058}
5059
5bf9a1ee
PP
5060static ssize_t
5061tracing_mark_write(struct file *filp, const char __user *ubuf,
5062 size_t cnt, loff_t *fpos)
5063{
d696b58c 5064 unsigned long addr = (unsigned long)ubuf;
2d71619c 5065 struct trace_array *tr = filp->private_data;
d696b58c
SR
5066 struct ring_buffer_event *event;
5067 struct ring_buffer *buffer;
5068 struct print_entry *entry;
5069 unsigned long irq_flags;
5070 struct page *pages[2];
6edb2a8a 5071 void *map_page[2];
d696b58c
SR
5072 int nr_pages = 1;
5073 ssize_t written;
d696b58c
SR
5074 int offset;
5075 int size;
5076 int len;
5077 int ret;
6edb2a8a 5078 int i;
5bf9a1ee 5079
c76f0694 5080 if (tracing_disabled)
5bf9a1ee
PP
5081 return -EINVAL;
5082
5224c3a3
MSB
5083 if (!(trace_flags & TRACE_ITER_MARKERS))
5084 return -EINVAL;
5085
5bf9a1ee
PP
5086 if (cnt > TRACE_BUF_SIZE)
5087 cnt = TRACE_BUF_SIZE;
5088
d696b58c
SR
5089 /*
5090 * Userspace is injecting traces into the kernel trace buffer.
5091 * We want to be as non intrusive as possible.
5092 * To do so, we do not want to allocate any special buffers
5093 * or take any locks, but instead write the userspace data
5094 * straight into the ring buffer.
5095 *
5096 * First we need to pin the userspace buffer into memory,
5097 * which, most likely it is, because it just referenced it.
5098 * But there's no guarantee that it is. By using get_user_pages_fast()
5099 * and kmap_atomic/kunmap_atomic() we can get access to the
5100 * pages directly. We then write the data directly into the
5101 * ring buffer.
5102 */
5103 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5104
d696b58c
SR
5105 /* check if we cross pages */
5106 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5107 nr_pages = 2;
5108
5109 offset = addr & (PAGE_SIZE - 1);
5110 addr &= PAGE_MASK;
5111
5112 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5113 if (ret < nr_pages) {
5114 while (--ret >= 0)
5115 put_page(pages[ret]);
5116 written = -EFAULT;
5117 goto out;
5bf9a1ee 5118 }
d696b58c 5119
6edb2a8a
SR
5120 for (i = 0; i < nr_pages; i++)
5121 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5122
5123 local_save_flags(irq_flags);
5124 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5125 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5126 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5127 irq_flags, preempt_count());
5128 if (!event) {
5129 /* Ring buffer disabled, return as if not open for write */
5130 written = -EBADF;
5131 goto out_unlock;
5bf9a1ee 5132 }
d696b58c
SR
5133
5134 entry = ring_buffer_event_data(event);
5135 entry->ip = _THIS_IP_;
5136
5137 if (nr_pages == 2) {
5138 len = PAGE_SIZE - offset;
6edb2a8a
SR
5139 memcpy(&entry->buf, map_page[0] + offset, len);
5140 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5141 } else
6edb2a8a 5142 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5143
d696b58c
SR
5144 if (entry->buf[cnt - 1] != '\n') {
5145 entry->buf[cnt] = '\n';
5146 entry->buf[cnt + 1] = '\0';
5147 } else
5148 entry->buf[cnt] = '\0';
5149
7ffbd48d 5150 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5151
d696b58c 5152 written = cnt;
5bf9a1ee 5153
d696b58c 5154 *fpos += written;
1aa54bca 5155
d696b58c 5156 out_unlock:
7215853e 5157 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5158 kunmap_atomic(map_page[i]);
5159 put_page(pages[i]);
5160 }
d696b58c 5161 out:
1aa54bca 5162 return written;
5bf9a1ee
PP
5163}
5164
13f16d20 5165static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5166{
2b6080f2 5167 struct trace_array *tr = m->private;
5079f326
Z
5168 int i;
5169
5170 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5171 seq_printf(m,
5079f326 5172 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5173 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5174 i == tr->clock_id ? "]" : "");
13f16d20 5175 seq_putc(m, '\n');
5079f326 5176
13f16d20 5177 return 0;
5079f326
Z
5178}
5179
e1e232ca 5180static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5181{
5079f326
Z
5182 int i;
5183
5079f326
Z
5184 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5185 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5186 break;
5187 }
5188 if (i == ARRAY_SIZE(trace_clocks))
5189 return -EINVAL;
5190
5079f326
Z
5191 mutex_lock(&trace_types_lock);
5192
2b6080f2
SR
5193 tr->clock_id = i;
5194
12883efb 5195 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5196
60303ed3
DS
5197 /*
5198 * New clock may not be consistent with the previous clock.
5199 * Reset the buffer so that it doesn't have incomparable timestamps.
5200 */
9457158b 5201 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5202
5203#ifdef CONFIG_TRACER_MAX_TRACE
5204 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5205 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5206 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5207#endif
60303ed3 5208
5079f326
Z
5209 mutex_unlock(&trace_types_lock);
5210
e1e232ca
SR
5211 return 0;
5212}
5213
5214static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5215 size_t cnt, loff_t *fpos)
5216{
5217 struct seq_file *m = filp->private_data;
5218 struct trace_array *tr = m->private;
5219 char buf[64];
5220 const char *clockstr;
5221 int ret;
5222
5223 if (cnt >= sizeof(buf))
5224 return -EINVAL;
5225
5226 if (copy_from_user(&buf, ubuf, cnt))
5227 return -EFAULT;
5228
5229 buf[cnt] = 0;
5230
5231 clockstr = strstrip(buf);
5232
5233 ret = tracing_set_clock(tr, clockstr);
5234 if (ret)
5235 return ret;
5236
5079f326
Z
5237 *fpos += cnt;
5238
5239 return cnt;
5240}
5241
13f16d20
LZ
5242static int tracing_clock_open(struct inode *inode, struct file *file)
5243{
7b85af63
SRRH
5244 struct trace_array *tr = inode->i_private;
5245 int ret;
5246
13f16d20
LZ
5247 if (tracing_disabled)
5248 return -ENODEV;
2b6080f2 5249
7b85af63
SRRH
5250 if (trace_array_get(tr))
5251 return -ENODEV;
5252
5253 ret = single_open(file, tracing_clock_show, inode->i_private);
5254 if (ret < 0)
5255 trace_array_put(tr);
5256
5257 return ret;
13f16d20
LZ
5258}
5259
6de58e62
SRRH
5260struct ftrace_buffer_info {
5261 struct trace_iterator iter;
5262 void *spare;
5263 unsigned int read;
5264};
5265
debdd57f
HT
5266#ifdef CONFIG_TRACER_SNAPSHOT
5267static int tracing_snapshot_open(struct inode *inode, struct file *file)
5268{
6484c71c 5269 struct trace_array *tr = inode->i_private;
debdd57f 5270 struct trace_iterator *iter;
2b6080f2 5271 struct seq_file *m;
debdd57f
HT
5272 int ret = 0;
5273
ff451961
SRRH
5274 if (trace_array_get(tr) < 0)
5275 return -ENODEV;
5276
debdd57f 5277 if (file->f_mode & FMODE_READ) {
6484c71c 5278 iter = __tracing_open(inode, file, true);
debdd57f
HT
5279 if (IS_ERR(iter))
5280 ret = PTR_ERR(iter);
2b6080f2
SR
5281 } else {
5282 /* Writes still need the seq_file to hold the private data */
f77d09a3 5283 ret = -ENOMEM;
2b6080f2
SR
5284 m = kzalloc(sizeof(*m), GFP_KERNEL);
5285 if (!m)
f77d09a3 5286 goto out;
2b6080f2
SR
5287 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5288 if (!iter) {
5289 kfree(m);
f77d09a3 5290 goto out;
2b6080f2 5291 }
f77d09a3
AL
5292 ret = 0;
5293
ff451961 5294 iter->tr = tr;
6484c71c
ON
5295 iter->trace_buffer = &tr->max_buffer;
5296 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5297 m->private = iter;
5298 file->private_data = m;
debdd57f 5299 }
f77d09a3 5300out:
ff451961
SRRH
5301 if (ret < 0)
5302 trace_array_put(tr);
5303
debdd57f
HT
5304 return ret;
5305}
5306
5307static ssize_t
5308tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5309 loff_t *ppos)
5310{
2b6080f2
SR
5311 struct seq_file *m = filp->private_data;
5312 struct trace_iterator *iter = m->private;
5313 struct trace_array *tr = iter->tr;
debdd57f
HT
5314 unsigned long val;
5315 int ret;
5316
5317 ret = tracing_update_buffers();
5318 if (ret < 0)
5319 return ret;
5320
5321 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5322 if (ret)
5323 return ret;
5324
5325 mutex_lock(&trace_types_lock);
5326
2b6080f2 5327 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5328 ret = -EBUSY;
5329 goto out;
5330 }
5331
5332 switch (val) {
5333 case 0:
f1affcaa
SRRH
5334 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5335 ret = -EINVAL;
5336 break;
debdd57f 5337 }
3209cff4
SRRH
5338 if (tr->allocated_snapshot)
5339 free_snapshot(tr);
debdd57f
HT
5340 break;
5341 case 1:
f1affcaa
SRRH
5342/* Only allow per-cpu swap if the ring buffer supports it */
5343#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5344 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5345 ret = -EINVAL;
5346 break;
5347 }
5348#endif
45ad21ca 5349 if (!tr->allocated_snapshot) {
3209cff4 5350 ret = alloc_snapshot(tr);
debdd57f
HT
5351 if (ret < 0)
5352 break;
debdd57f 5353 }
debdd57f
HT
5354 local_irq_disable();
5355 /* Now, we're going to swap */
f1affcaa 5356 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5357 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5358 else
ce9bae55 5359 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5360 local_irq_enable();
5361 break;
5362 default:
45ad21ca 5363 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5364 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5365 tracing_reset_online_cpus(&tr->max_buffer);
5366 else
5367 tracing_reset(&tr->max_buffer, iter->cpu_file);
5368 }
debdd57f
HT
5369 break;
5370 }
5371
5372 if (ret >= 0) {
5373 *ppos += cnt;
5374 ret = cnt;
5375 }
5376out:
5377 mutex_unlock(&trace_types_lock);
5378 return ret;
5379}
2b6080f2
SR
5380
5381static int tracing_snapshot_release(struct inode *inode, struct file *file)
5382{
5383 struct seq_file *m = file->private_data;
ff451961
SRRH
5384 int ret;
5385
5386 ret = tracing_release(inode, file);
2b6080f2
SR
5387
5388 if (file->f_mode & FMODE_READ)
ff451961 5389 return ret;
2b6080f2
SR
5390
5391 /* If write only, the seq_file is just a stub */
5392 if (m)
5393 kfree(m->private);
5394 kfree(m);
5395
5396 return 0;
5397}
5398
6de58e62
SRRH
5399static int tracing_buffers_open(struct inode *inode, struct file *filp);
5400static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5401 size_t count, loff_t *ppos);
5402static int tracing_buffers_release(struct inode *inode, struct file *file);
5403static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5404 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5405
5406static int snapshot_raw_open(struct inode *inode, struct file *filp)
5407{
5408 struct ftrace_buffer_info *info;
5409 int ret;
5410
5411 ret = tracing_buffers_open(inode, filp);
5412 if (ret < 0)
5413 return ret;
5414
5415 info = filp->private_data;
5416
5417 if (info->iter.trace->use_max_tr) {
5418 tracing_buffers_release(inode, filp);
5419 return -EBUSY;
5420 }
5421
5422 info->iter.snapshot = true;
5423 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5424
5425 return ret;
5426}
5427
debdd57f
HT
5428#endif /* CONFIG_TRACER_SNAPSHOT */
5429
5430
6508fa76
SF
5431static const struct file_operations tracing_thresh_fops = {
5432 .open = tracing_open_generic,
5433 .read = tracing_thresh_read,
5434 .write = tracing_thresh_write,
5435 .llseek = generic_file_llseek,
5436};
5437
5e2336a0 5438static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5439 .open = tracing_open_generic,
5440 .read = tracing_max_lat_read,
5441 .write = tracing_max_lat_write,
b444786f 5442 .llseek = generic_file_llseek,
bc0c38d1
SR
5443};
5444
5e2336a0 5445static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5446 .open = tracing_open_generic,
5447 .read = tracing_set_trace_read,
5448 .write = tracing_set_trace_write,
b444786f 5449 .llseek = generic_file_llseek,
bc0c38d1
SR
5450};
5451
5e2336a0 5452static const struct file_operations tracing_pipe_fops = {
4bf39a94 5453 .open = tracing_open_pipe,
2a2cc8f7 5454 .poll = tracing_poll_pipe,
4bf39a94 5455 .read = tracing_read_pipe,
3c56819b 5456 .splice_read = tracing_splice_read_pipe,
4bf39a94 5457 .release = tracing_release_pipe,
b444786f 5458 .llseek = no_llseek,
b3806b43
SR
5459};
5460
5e2336a0 5461static const struct file_operations tracing_entries_fops = {
0bc392ee 5462 .open = tracing_open_generic_tr,
a98a3c3f
SR
5463 .read = tracing_entries_read,
5464 .write = tracing_entries_write,
b444786f 5465 .llseek = generic_file_llseek,
0bc392ee 5466 .release = tracing_release_generic_tr,
a98a3c3f
SR
5467};
5468
f81ab074 5469static const struct file_operations tracing_total_entries_fops = {
7b85af63 5470 .open = tracing_open_generic_tr,
f81ab074
VN
5471 .read = tracing_total_entries_read,
5472 .llseek = generic_file_llseek,
7b85af63 5473 .release = tracing_release_generic_tr,
f81ab074
VN
5474};
5475
4f271a2a 5476static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5477 .open = tracing_open_generic_tr,
4f271a2a
VN
5478 .write = tracing_free_buffer_write,
5479 .release = tracing_free_buffer_release,
5480};
5481
5e2336a0 5482static const struct file_operations tracing_mark_fops = {
7b85af63 5483 .open = tracing_open_generic_tr,
5bf9a1ee 5484 .write = tracing_mark_write,
b444786f 5485 .llseek = generic_file_llseek,
7b85af63 5486 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5487};
5488
5079f326 5489static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5490 .open = tracing_clock_open,
5491 .read = seq_read,
5492 .llseek = seq_lseek,
7b85af63 5493 .release = tracing_single_release_tr,
5079f326
Z
5494 .write = tracing_clock_write,
5495};
5496
debdd57f
HT
5497#ifdef CONFIG_TRACER_SNAPSHOT
5498static const struct file_operations snapshot_fops = {
5499 .open = tracing_snapshot_open,
5500 .read = seq_read,
5501 .write = tracing_snapshot_write,
098c879e 5502 .llseek = tracing_lseek,
2b6080f2 5503 .release = tracing_snapshot_release,
debdd57f 5504};
debdd57f 5505
6de58e62
SRRH
5506static const struct file_operations snapshot_raw_fops = {
5507 .open = snapshot_raw_open,
5508 .read = tracing_buffers_read,
5509 .release = tracing_buffers_release,
5510 .splice_read = tracing_buffers_splice_read,
5511 .llseek = no_llseek,
2cadf913
SR
5512};
5513
6de58e62
SRRH
5514#endif /* CONFIG_TRACER_SNAPSHOT */
5515
2cadf913
SR
5516static int tracing_buffers_open(struct inode *inode, struct file *filp)
5517{
46ef2be0 5518 struct trace_array *tr = inode->i_private;
2cadf913 5519 struct ftrace_buffer_info *info;
7b85af63 5520 int ret;
2cadf913
SR
5521
5522 if (tracing_disabled)
5523 return -ENODEV;
5524
7b85af63
SRRH
5525 if (trace_array_get(tr) < 0)
5526 return -ENODEV;
5527
2cadf913 5528 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5529 if (!info) {
5530 trace_array_put(tr);
2cadf913 5531 return -ENOMEM;
7b85af63 5532 }
2cadf913 5533
a695cb58
SRRH
5534 mutex_lock(&trace_types_lock);
5535
cc60cdc9 5536 info->iter.tr = tr;
46ef2be0 5537 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5538 info->iter.trace = tr->current_trace;
12883efb 5539 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5540 info->spare = NULL;
2cadf913 5541 /* Force reading ring buffer for first read */
cc60cdc9 5542 info->read = (unsigned int)-1;
2cadf913
SR
5543
5544 filp->private_data = info;
5545
cf6ab6d9
SRRH
5546 tr->current_trace->ref++;
5547
a695cb58
SRRH
5548 mutex_unlock(&trace_types_lock);
5549
7b85af63
SRRH
5550 ret = nonseekable_open(inode, filp);
5551 if (ret < 0)
5552 trace_array_put(tr);
5553
5554 return ret;
2cadf913
SR
5555}
5556
cc60cdc9
SR
5557static unsigned int
5558tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5559{
5560 struct ftrace_buffer_info *info = filp->private_data;
5561 struct trace_iterator *iter = &info->iter;
5562
5563 return trace_poll(iter, filp, poll_table);
5564}
5565
2cadf913
SR
5566static ssize_t
5567tracing_buffers_read(struct file *filp, char __user *ubuf,
5568 size_t count, loff_t *ppos)
5569{
5570 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5571 struct trace_iterator *iter = &info->iter;
2cadf913 5572 ssize_t ret;
6de58e62 5573 ssize_t size;
2cadf913 5574
2dc5d12b
SR
5575 if (!count)
5576 return 0;
5577
6de58e62 5578#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5579 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5580 return -EBUSY;
6de58e62
SRRH
5581#endif
5582
ddd538f3 5583 if (!info->spare)
12883efb
SRRH
5584 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5585 iter->cpu_file);
ddd538f3 5586 if (!info->spare)
d716ff71 5587 return -ENOMEM;
ddd538f3 5588
2cadf913
SR
5589 /* Do we have previous read data to read? */
5590 if (info->read < PAGE_SIZE)
5591 goto read;
5592
b627344f 5593 again:
cc60cdc9 5594 trace_access_lock(iter->cpu_file);
12883efb 5595 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5596 &info->spare,
5597 count,
cc60cdc9
SR
5598 iter->cpu_file, 0);
5599 trace_access_unlock(iter->cpu_file);
2cadf913 5600
b627344f
SR
5601 if (ret < 0) {
5602 if (trace_empty(iter)) {
d716ff71
SRRH
5603 if ((filp->f_flags & O_NONBLOCK))
5604 return -EAGAIN;
5605
e30f53aa 5606 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5607 if (ret)
5608 return ret;
5609
b627344f
SR
5610 goto again;
5611 }
d716ff71 5612 return 0;
b627344f 5613 }
436fc280 5614
436fc280 5615 info->read = 0;
b627344f 5616 read:
2cadf913
SR
5617 size = PAGE_SIZE - info->read;
5618 if (size > count)
5619 size = count;
5620
5621 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5622 if (ret == size)
5623 return -EFAULT;
5624
2dc5d12b
SR
5625 size -= ret;
5626
2cadf913
SR
5627 *ppos += size;
5628 info->read += size;
5629
5630 return size;
5631}
5632
5633static int tracing_buffers_release(struct inode *inode, struct file *file)
5634{
5635 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5636 struct trace_iterator *iter = &info->iter;
2cadf913 5637
a695cb58
SRRH
5638 mutex_lock(&trace_types_lock);
5639
cf6ab6d9
SRRH
5640 iter->tr->current_trace->ref--;
5641
ff451961 5642 __trace_array_put(iter->tr);
2cadf913 5643
ddd538f3 5644 if (info->spare)
12883efb 5645 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5646 kfree(info);
5647
a695cb58
SRRH
5648 mutex_unlock(&trace_types_lock);
5649
2cadf913
SR
5650 return 0;
5651}
5652
5653struct buffer_ref {
5654 struct ring_buffer *buffer;
5655 void *page;
5656 int ref;
5657};
5658
5659static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5660 struct pipe_buffer *buf)
5661{
5662 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5663
5664 if (--ref->ref)
5665 return;
5666
5667 ring_buffer_free_read_page(ref->buffer, ref->page);
5668 kfree(ref);
5669 buf->private = 0;
5670}
5671
2cadf913
SR
5672static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5673 struct pipe_buffer *buf)
5674{
5675 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5676
5677 ref->ref++;
5678}
5679
5680/* Pipe buffer operations for a buffer. */
28dfef8f 5681static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5682 .can_merge = 0,
2cadf913
SR
5683 .confirm = generic_pipe_buf_confirm,
5684 .release = buffer_pipe_buf_release,
d55cb6cf 5685 .steal = generic_pipe_buf_steal,
2cadf913
SR
5686 .get = buffer_pipe_buf_get,
5687};
5688
5689/*
5690 * Callback from splice_to_pipe(), if we need to release some pages
5691 * at the end of the spd in case we error'ed out in filling the pipe.
5692 */
5693static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5694{
5695 struct buffer_ref *ref =
5696 (struct buffer_ref *)spd->partial[i].private;
5697
5698 if (--ref->ref)
5699 return;
5700
5701 ring_buffer_free_read_page(ref->buffer, ref->page);
5702 kfree(ref);
5703 spd->partial[i].private = 0;
5704}
5705
5706static ssize_t
5707tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5708 struct pipe_inode_info *pipe, size_t len,
5709 unsigned int flags)
5710{
5711 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5712 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5713 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5714 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5715 struct splice_pipe_desc spd = {
35f3d14d
JA
5716 .pages = pages_def,
5717 .partial = partial_def,
047fe360 5718 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5719 .flags = flags,
5720 .ops = &buffer_pipe_buf_ops,
5721 .spd_release = buffer_spd_release,
5722 };
5723 struct buffer_ref *ref;
93459c6c 5724 int entries, size, i;
07906da7 5725 ssize_t ret = 0;
2cadf913 5726
6de58e62 5727#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5728 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5729 return -EBUSY;
6de58e62
SRRH
5730#endif
5731
d716ff71
SRRH
5732 if (splice_grow_spd(pipe, &spd))
5733 return -ENOMEM;
35f3d14d 5734
d716ff71
SRRH
5735 if (*ppos & (PAGE_SIZE - 1))
5736 return -EINVAL;
93cfb3c9
LJ
5737
5738 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5739 if (len < PAGE_SIZE)
5740 return -EINVAL;
93cfb3c9
LJ
5741 len &= PAGE_MASK;
5742 }
5743
cc60cdc9
SR
5744 again:
5745 trace_access_lock(iter->cpu_file);
12883efb 5746 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5747
a786c06d 5748 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5749 struct page *page;
5750 int r;
5751
5752 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5753 if (!ref) {
5754 ret = -ENOMEM;
2cadf913 5755 break;
07906da7 5756 }
2cadf913 5757
7267fa68 5758 ref->ref = 1;
12883efb 5759 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5760 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5761 if (!ref->page) {
07906da7 5762 ret = -ENOMEM;
2cadf913
SR
5763 kfree(ref);
5764 break;
5765 }
5766
5767 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5768 len, iter->cpu_file, 1);
2cadf913 5769 if (r < 0) {
7ea59064 5770 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5771 kfree(ref);
5772 break;
5773 }
5774
5775 /*
5776 * zero out any left over data, this is going to
5777 * user land.
5778 */
5779 size = ring_buffer_page_len(ref->page);
5780 if (size < PAGE_SIZE)
5781 memset(ref->page + size, 0, PAGE_SIZE - size);
5782
5783 page = virt_to_page(ref->page);
5784
5785 spd.pages[i] = page;
5786 spd.partial[i].len = PAGE_SIZE;
5787 spd.partial[i].offset = 0;
5788 spd.partial[i].private = (unsigned long)ref;
5789 spd.nr_pages++;
93cfb3c9 5790 *ppos += PAGE_SIZE;
93459c6c 5791
12883efb 5792 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5793 }
5794
cc60cdc9 5795 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5796 spd.nr_pages = i;
5797
5798 /* did we read anything? */
5799 if (!spd.nr_pages) {
07906da7 5800 if (ret)
d716ff71
SRRH
5801 return ret;
5802
5803 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5804 return -EAGAIN;
07906da7 5805
e30f53aa 5806 ret = wait_on_pipe(iter, true);
8b8b3683 5807 if (ret)
d716ff71 5808 return ret;
e30f53aa 5809
cc60cdc9 5810 goto again;
2cadf913
SR
5811 }
5812
5813 ret = splice_to_pipe(pipe, &spd);
047fe360 5814 splice_shrink_spd(&spd);
6de58e62 5815
2cadf913
SR
5816 return ret;
5817}
5818
5819static const struct file_operations tracing_buffers_fops = {
5820 .open = tracing_buffers_open,
5821 .read = tracing_buffers_read,
cc60cdc9 5822 .poll = tracing_buffers_poll,
2cadf913
SR
5823 .release = tracing_buffers_release,
5824 .splice_read = tracing_buffers_splice_read,
5825 .llseek = no_llseek,
5826};
5827
c8d77183
SR
5828static ssize_t
5829tracing_stats_read(struct file *filp, char __user *ubuf,
5830 size_t count, loff_t *ppos)
5831{
4d3435b8
ON
5832 struct inode *inode = file_inode(filp);
5833 struct trace_array *tr = inode->i_private;
12883efb 5834 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5835 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5836 struct trace_seq *s;
5837 unsigned long cnt;
c64e148a
VN
5838 unsigned long long t;
5839 unsigned long usec_rem;
c8d77183 5840
e4f2d10f 5841 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5842 if (!s)
a646365c 5843 return -ENOMEM;
c8d77183
SR
5844
5845 trace_seq_init(s);
5846
12883efb 5847 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5848 trace_seq_printf(s, "entries: %ld\n", cnt);
5849
12883efb 5850 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5851 trace_seq_printf(s, "overrun: %ld\n", cnt);
5852
12883efb 5853 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5854 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5855
12883efb 5856 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5857 trace_seq_printf(s, "bytes: %ld\n", cnt);
5858
58e8eedf 5859 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5860 /* local or global for trace_clock */
12883efb 5861 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5862 usec_rem = do_div(t, USEC_PER_SEC);
5863 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5864 t, usec_rem);
5865
12883efb 5866 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5867 usec_rem = do_div(t, USEC_PER_SEC);
5868 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5869 } else {
5870 /* counter or tsc mode for trace_clock */
5871 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5872 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5873
11043d8b 5874 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5875 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5876 }
c64e148a 5877
12883efb 5878 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5879 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5880
12883efb 5881 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5882 trace_seq_printf(s, "read events: %ld\n", cnt);
5883
5ac48378
SRRH
5884 count = simple_read_from_buffer(ubuf, count, ppos,
5885 s->buffer, trace_seq_used(s));
c8d77183
SR
5886
5887 kfree(s);
5888
5889 return count;
5890}
5891
5892static const struct file_operations tracing_stats_fops = {
4d3435b8 5893 .open = tracing_open_generic_tr,
c8d77183 5894 .read = tracing_stats_read,
b444786f 5895 .llseek = generic_file_llseek,
4d3435b8 5896 .release = tracing_release_generic_tr,
c8d77183
SR
5897};
5898
bc0c38d1
SR
5899#ifdef CONFIG_DYNAMIC_FTRACE
5900
b807c3d0
SR
5901int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5902{
5903 return 0;
5904}
5905
bc0c38d1 5906static ssize_t
b807c3d0 5907tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5908 size_t cnt, loff_t *ppos)
5909{
a26a2a27
SR
5910 static char ftrace_dyn_info_buffer[1024];
5911 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5912 unsigned long *p = filp->private_data;
b807c3d0 5913 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5914 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5915 int r;
5916
b807c3d0
SR
5917 mutex_lock(&dyn_info_mutex);
5918 r = sprintf(buf, "%ld ", *p);
4bf39a94 5919
a26a2a27 5920 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5921 buf[r++] = '\n';
5922
5923 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5924
5925 mutex_unlock(&dyn_info_mutex);
5926
5927 return r;
bc0c38d1
SR
5928}
5929
5e2336a0 5930static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5931 .open = tracing_open_generic,
b807c3d0 5932 .read = tracing_read_dyn_info,
b444786f 5933 .llseek = generic_file_llseek,
bc0c38d1 5934};
77fd5c15 5935#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5936
77fd5c15
SRRH
5937#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5938static void
5939ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5940{
5941 tracing_snapshot();
5942}
bc0c38d1 5943
77fd5c15
SRRH
5944static void
5945ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5946{
77fd5c15
SRRH
5947 unsigned long *count = (long *)data;
5948
5949 if (!*count)
5950 return;
bc0c38d1 5951
77fd5c15
SRRH
5952 if (*count != -1)
5953 (*count)--;
5954
5955 tracing_snapshot();
5956}
5957
5958static int
5959ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5960 struct ftrace_probe_ops *ops, void *data)
5961{
5962 long count = (long)data;
5963
5964 seq_printf(m, "%ps:", (void *)ip);
5965
fa6f0cc7 5966 seq_puts(m, "snapshot");
77fd5c15
SRRH
5967
5968 if (count == -1)
fa6f0cc7 5969 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5970 else
5971 seq_printf(m, ":count=%ld\n", count);
5972
5973 return 0;
5974}
5975
5976static struct ftrace_probe_ops snapshot_probe_ops = {
5977 .func = ftrace_snapshot,
5978 .print = ftrace_snapshot_print,
5979};
5980
5981static struct ftrace_probe_ops snapshot_count_probe_ops = {
5982 .func = ftrace_count_snapshot,
5983 .print = ftrace_snapshot_print,
5984};
5985
5986static int
5987ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5988 char *glob, char *cmd, char *param, int enable)
5989{
5990 struct ftrace_probe_ops *ops;
5991 void *count = (void *)-1;
5992 char *number;
5993 int ret;
5994
5995 /* hash funcs only work with set_ftrace_filter */
5996 if (!enable)
5997 return -EINVAL;
5998
5999 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6000
6001 if (glob[0] == '!') {
6002 unregister_ftrace_function_probe_func(glob+1, ops);
6003 return 0;
6004 }
6005
6006 if (!param)
6007 goto out_reg;
6008
6009 number = strsep(&param, ":");
6010
6011 if (!strlen(number))
6012 goto out_reg;
6013
6014 /*
6015 * We use the callback data field (which is a pointer)
6016 * as our counter.
6017 */
6018 ret = kstrtoul(number, 0, (unsigned long *)&count);
6019 if (ret)
6020 return ret;
6021
6022 out_reg:
6023 ret = register_ftrace_function_probe(glob, ops, count);
6024
6025 if (ret >= 0)
6026 alloc_snapshot(&global_trace);
6027
6028 return ret < 0 ? ret : 0;
6029}
6030
6031static struct ftrace_func_command ftrace_snapshot_cmd = {
6032 .name = "snapshot",
6033 .func = ftrace_trace_snapshot_callback,
6034};
6035
38de93ab 6036static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6037{
6038 return register_ftrace_command(&ftrace_snapshot_cmd);
6039}
6040#else
38de93ab 6041static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6042#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6043
7eeafbca 6044static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6045{
8434dc93
SRRH
6046 if (WARN_ON(!tr->dir))
6047 return ERR_PTR(-ENODEV);
6048
6049 /* Top directory uses NULL as the parent */
6050 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6051 return NULL;
6052
6053 /* All sub buffers have a descriptor */
2b6080f2 6054 return tr->dir;
bc0c38d1
SR
6055}
6056
2b6080f2 6057static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6058{
b04cc6b1
FW
6059 struct dentry *d_tracer;
6060
2b6080f2
SR
6061 if (tr->percpu_dir)
6062 return tr->percpu_dir;
b04cc6b1 6063
7eeafbca 6064 d_tracer = tracing_get_dentry(tr);
14a5ae40 6065 if (IS_ERR(d_tracer))
b04cc6b1
FW
6066 return NULL;
6067
8434dc93 6068 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6069
2b6080f2 6070 WARN_ONCE(!tr->percpu_dir,
8434dc93 6071 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6072
2b6080f2 6073 return tr->percpu_dir;
b04cc6b1
FW
6074}
6075
649e9c70
ON
6076static struct dentry *
6077trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6078 void *data, long cpu, const struct file_operations *fops)
6079{
6080 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6081
6082 if (ret) /* See tracing_get_cpu() */
7682c918 6083 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6084 return ret;
6085}
6086
2b6080f2 6087static void
8434dc93 6088tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6089{
2b6080f2 6090 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6091 struct dentry *d_cpu;
dd49a38c 6092 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6093
0a3d7ce7
NK
6094 if (!d_percpu)
6095 return;
6096
dd49a38c 6097 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6098 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6099 if (!d_cpu) {
8434dc93 6100 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6101 return;
6102 }
b04cc6b1 6103
8656e7a2 6104 /* per cpu trace_pipe */
649e9c70 6105 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6106 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6107
6108 /* per cpu trace */
649e9c70 6109 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6110 tr, cpu, &tracing_fops);
7f96f93f 6111
649e9c70 6112 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6113 tr, cpu, &tracing_buffers_fops);
7f96f93f 6114
649e9c70 6115 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6116 tr, cpu, &tracing_stats_fops);
438ced17 6117
649e9c70 6118 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6119 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6120
6121#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6122 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6123 tr, cpu, &snapshot_fops);
6de58e62 6124
649e9c70 6125 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6126 tr, cpu, &snapshot_raw_fops);
f1affcaa 6127#endif
b04cc6b1
FW
6128}
6129
60a11774
SR
6130#ifdef CONFIG_FTRACE_SELFTEST
6131/* Let selftest have access to static functions in this file */
6132#include "trace_selftest.c"
6133#endif
6134
577b785f
SR
6135struct trace_option_dentry {
6136 struct tracer_opt *opt;
6137 struct tracer_flags *flags;
2b6080f2 6138 struct trace_array *tr;
577b785f
SR
6139 struct dentry *entry;
6140};
6141
6142static ssize_t
6143trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6144 loff_t *ppos)
6145{
6146 struct trace_option_dentry *topt = filp->private_data;
6147 char *buf;
6148
6149 if (topt->flags->val & topt->opt->bit)
6150 buf = "1\n";
6151 else
6152 buf = "0\n";
6153
6154 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6155}
6156
6157static ssize_t
6158trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6159 loff_t *ppos)
6160{
6161 struct trace_option_dentry *topt = filp->private_data;
6162 unsigned long val;
577b785f
SR
6163 int ret;
6164
22fe9b54
PH
6165 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6166 if (ret)
577b785f
SR
6167 return ret;
6168
8d18eaaf
LZ
6169 if (val != 0 && val != 1)
6170 return -EINVAL;
577b785f 6171
8d18eaaf 6172 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6173 mutex_lock(&trace_types_lock);
8c1a49ae 6174 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6175 topt->opt, !val);
577b785f
SR
6176 mutex_unlock(&trace_types_lock);
6177 if (ret)
6178 return ret;
577b785f
SR
6179 }
6180
6181 *ppos += cnt;
6182
6183 return cnt;
6184}
6185
6186
6187static const struct file_operations trace_options_fops = {
6188 .open = tracing_open_generic,
6189 .read = trace_options_read,
6190 .write = trace_options_write,
b444786f 6191 .llseek = generic_file_llseek,
577b785f
SR
6192};
6193
a8259075
SR
6194static ssize_t
6195trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6196 loff_t *ppos)
6197{
6198 long index = (long)filp->private_data;
6199 char *buf;
6200
6201 if (trace_flags & (1 << index))
6202 buf = "1\n";
6203 else
6204 buf = "0\n";
6205
6206 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6207}
6208
6209static ssize_t
6210trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6211 loff_t *ppos)
6212{
2b6080f2 6213 struct trace_array *tr = &global_trace;
a8259075 6214 long index = (long)filp->private_data;
a8259075
SR
6215 unsigned long val;
6216 int ret;
6217
22fe9b54
PH
6218 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6219 if (ret)
a8259075
SR
6220 return ret;
6221
f2d84b65 6222 if (val != 0 && val != 1)
a8259075 6223 return -EINVAL;
69d34da2
SRRH
6224
6225 mutex_lock(&trace_types_lock);
2b6080f2 6226 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6227 mutex_unlock(&trace_types_lock);
a8259075 6228
613f04a0
SRRH
6229 if (ret < 0)
6230 return ret;
6231
a8259075
SR
6232 *ppos += cnt;
6233
6234 return cnt;
6235}
6236
a8259075
SR
6237static const struct file_operations trace_options_core_fops = {
6238 .open = tracing_open_generic,
6239 .read = trace_options_core_read,
6240 .write = trace_options_core_write,
b444786f 6241 .llseek = generic_file_llseek,
a8259075
SR
6242};
6243
5452af66 6244struct dentry *trace_create_file(const char *name,
f4ae40a6 6245 umode_t mode,
5452af66
FW
6246 struct dentry *parent,
6247 void *data,
6248 const struct file_operations *fops)
6249{
6250 struct dentry *ret;
6251
8434dc93 6252 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6253 if (!ret)
8434dc93 6254 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6255
6256 return ret;
6257}
6258
6259
2b6080f2 6260static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6261{
6262 struct dentry *d_tracer;
a8259075 6263
2b6080f2
SR
6264 if (tr->options)
6265 return tr->options;
a8259075 6266
7eeafbca 6267 d_tracer = tracing_get_dentry(tr);
14a5ae40 6268 if (IS_ERR(d_tracer))
a8259075
SR
6269 return NULL;
6270
8434dc93 6271 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6272 if (!tr->options) {
8434dc93 6273 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6274 return NULL;
6275 }
6276
2b6080f2 6277 return tr->options;
a8259075
SR
6278}
6279
577b785f 6280static void
2b6080f2
SR
6281create_trace_option_file(struct trace_array *tr,
6282 struct trace_option_dentry *topt,
577b785f
SR
6283 struct tracer_flags *flags,
6284 struct tracer_opt *opt)
6285{
6286 struct dentry *t_options;
577b785f 6287
2b6080f2 6288 t_options = trace_options_init_dentry(tr);
577b785f
SR
6289 if (!t_options)
6290 return;
6291
6292 topt->flags = flags;
6293 topt->opt = opt;
2b6080f2 6294 topt->tr = tr;
577b785f 6295
5452af66 6296 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6297 &trace_options_fops);
6298
577b785f
SR
6299}
6300
6301static struct trace_option_dentry *
2b6080f2 6302create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6303{
6304 struct trace_option_dentry *topts;
6305 struct tracer_flags *flags;
6306 struct tracer_opt *opts;
6307 int cnt;
6308
6309 if (!tracer)
6310 return NULL;
6311
6312 flags = tracer->flags;
6313
6314 if (!flags || !flags->opts)
6315 return NULL;
6316
6317 opts = flags->opts;
6318
6319 for (cnt = 0; opts[cnt].name; cnt++)
6320 ;
6321
0cfe8245 6322 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6323 if (!topts)
6324 return NULL;
6325
6326 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 6327 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
6328 &opts[cnt]);
6329
6330 return topts;
6331}
6332
6333static void
6334destroy_trace_option_files(struct trace_option_dentry *topts)
6335{
6336 int cnt;
6337
6338 if (!topts)
6339 return;
6340
3f4d8f78 6341 for (cnt = 0; topts[cnt].opt; cnt++)
8434dc93 6342 tracefs_remove(topts[cnt].entry);
577b785f
SR
6343
6344 kfree(topts);
6345}
6346
a8259075 6347static struct dentry *
2b6080f2
SR
6348create_trace_option_core_file(struct trace_array *tr,
6349 const char *option, long index)
a8259075
SR
6350{
6351 struct dentry *t_options;
a8259075 6352
2b6080f2 6353 t_options = trace_options_init_dentry(tr);
a8259075
SR
6354 if (!t_options)
6355 return NULL;
6356
5452af66 6357 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 6358 &trace_options_core_fops);
a8259075
SR
6359}
6360
2b6080f2 6361static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6362{
6363 struct dentry *t_options;
a8259075
SR
6364 int i;
6365
2b6080f2 6366 t_options = trace_options_init_dentry(tr);
a8259075
SR
6367 if (!t_options)
6368 return;
6369
5452af66 6370 for (i = 0; trace_options[i]; i++)
2b6080f2 6371 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6372}
6373
499e5470
SR
6374static ssize_t
6375rb_simple_read(struct file *filp, char __user *ubuf,
6376 size_t cnt, loff_t *ppos)
6377{
348f0fc2 6378 struct trace_array *tr = filp->private_data;
499e5470
SR
6379 char buf[64];
6380 int r;
6381
10246fa3 6382 r = tracer_tracing_is_on(tr);
499e5470
SR
6383 r = sprintf(buf, "%d\n", r);
6384
6385 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6386}
6387
6388static ssize_t
6389rb_simple_write(struct file *filp, const char __user *ubuf,
6390 size_t cnt, loff_t *ppos)
6391{
348f0fc2 6392 struct trace_array *tr = filp->private_data;
12883efb 6393 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6394 unsigned long val;
6395 int ret;
6396
6397 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6398 if (ret)
6399 return ret;
6400
6401 if (buffer) {
2df8f8a6
SR
6402 mutex_lock(&trace_types_lock);
6403 if (val) {
10246fa3 6404 tracer_tracing_on(tr);
2b6080f2
SR
6405 if (tr->current_trace->start)
6406 tr->current_trace->start(tr);
2df8f8a6 6407 } else {
10246fa3 6408 tracer_tracing_off(tr);
2b6080f2
SR
6409 if (tr->current_trace->stop)
6410 tr->current_trace->stop(tr);
2df8f8a6
SR
6411 }
6412 mutex_unlock(&trace_types_lock);
499e5470
SR
6413 }
6414
6415 (*ppos)++;
6416
6417 return cnt;
6418}
6419
6420static const struct file_operations rb_simple_fops = {
7b85af63 6421 .open = tracing_open_generic_tr,
499e5470
SR
6422 .read = rb_simple_read,
6423 .write = rb_simple_write,
7b85af63 6424 .release = tracing_release_generic_tr,
499e5470
SR
6425 .llseek = default_llseek,
6426};
6427
277ba044
SR
6428struct dentry *trace_instance_dir;
6429
6430static void
8434dc93 6431init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6432
55034cd6
SRRH
6433static int
6434allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6435{
6436 enum ring_buffer_flags rb_flags;
737223fb
SRRH
6437
6438 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6439
dced341b
SRRH
6440 buf->tr = tr;
6441
55034cd6
SRRH
6442 buf->buffer = ring_buffer_alloc(size, rb_flags);
6443 if (!buf->buffer)
6444 return -ENOMEM;
737223fb 6445
55034cd6
SRRH
6446 buf->data = alloc_percpu(struct trace_array_cpu);
6447 if (!buf->data) {
6448 ring_buffer_free(buf->buffer);
6449 return -ENOMEM;
6450 }
737223fb 6451
737223fb
SRRH
6452 /* Allocate the first page for all buffers */
6453 set_buffer_entries(&tr->trace_buffer,
6454 ring_buffer_size(tr->trace_buffer.buffer, 0));
6455
55034cd6
SRRH
6456 return 0;
6457}
737223fb 6458
55034cd6
SRRH
6459static int allocate_trace_buffers(struct trace_array *tr, int size)
6460{
6461 int ret;
737223fb 6462
55034cd6
SRRH
6463 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6464 if (ret)
6465 return ret;
737223fb 6466
55034cd6
SRRH
6467#ifdef CONFIG_TRACER_MAX_TRACE
6468 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6469 allocate_snapshot ? size : 1);
6470 if (WARN_ON(ret)) {
737223fb 6471 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6472 free_percpu(tr->trace_buffer.data);
6473 return -ENOMEM;
6474 }
6475 tr->allocated_snapshot = allocate_snapshot;
737223fb 6476
55034cd6
SRRH
6477 /*
6478 * Only the top level trace array gets its snapshot allocated
6479 * from the kernel command line.
6480 */
6481 allocate_snapshot = false;
737223fb 6482#endif
55034cd6 6483 return 0;
737223fb
SRRH
6484}
6485
f0b70cc4
SRRH
6486static void free_trace_buffer(struct trace_buffer *buf)
6487{
6488 if (buf->buffer) {
6489 ring_buffer_free(buf->buffer);
6490 buf->buffer = NULL;
6491 free_percpu(buf->data);
6492 buf->data = NULL;
6493 }
6494}
6495
23aaa3c1
SRRH
6496static void free_trace_buffers(struct trace_array *tr)
6497{
6498 if (!tr)
6499 return;
6500
f0b70cc4 6501 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6502
6503#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6504 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6505#endif
6506}
6507
eae47358 6508static int instance_mkdir(const char *name)
737223fb 6509{
277ba044
SR
6510 struct trace_array *tr;
6511 int ret;
277ba044
SR
6512
6513 mutex_lock(&trace_types_lock);
6514
6515 ret = -EEXIST;
6516 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6517 if (tr->name && strcmp(tr->name, name) == 0)
6518 goto out_unlock;
6519 }
6520
6521 ret = -ENOMEM;
6522 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6523 if (!tr)
6524 goto out_unlock;
6525
6526 tr->name = kstrdup(name, GFP_KERNEL);
6527 if (!tr->name)
6528 goto out_free_tr;
6529
ccfe9e42
AL
6530 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6531 goto out_free_tr;
6532
6533 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6534
277ba044
SR
6535 raw_spin_lock_init(&tr->start_lock);
6536
0b9b12c1
SRRH
6537 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6538
277ba044
SR
6539 tr->current_trace = &nop_trace;
6540
6541 INIT_LIST_HEAD(&tr->systems);
6542 INIT_LIST_HEAD(&tr->events);
6543
737223fb 6544 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6545 goto out_free_tr;
6546
8434dc93 6547 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6548 if (!tr->dir)
6549 goto out_free_tr;
6550
6551 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6552 if (ret) {
8434dc93 6553 tracefs_remove_recursive(tr->dir);
277ba044 6554 goto out_free_tr;
609e85a7 6555 }
277ba044 6556
8434dc93 6557 init_tracer_tracefs(tr, tr->dir);
277ba044
SR
6558
6559 list_add(&tr->list, &ftrace_trace_arrays);
6560
6561 mutex_unlock(&trace_types_lock);
6562
6563 return 0;
6564
6565 out_free_tr:
23aaa3c1 6566 free_trace_buffers(tr);
ccfe9e42 6567 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6568 kfree(tr->name);
6569 kfree(tr);
6570
6571 out_unlock:
6572 mutex_unlock(&trace_types_lock);
6573
6574 return ret;
6575
6576}
6577
eae47358 6578static int instance_rmdir(const char *name)
0c8916c3
SR
6579{
6580 struct trace_array *tr;
6581 int found = 0;
6582 int ret;
6583
6584 mutex_lock(&trace_types_lock);
6585
6586 ret = -ENODEV;
6587 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6588 if (tr->name && strcmp(tr->name, name) == 0) {
6589 found = 1;
6590 break;
6591 }
6592 }
6593 if (!found)
6594 goto out_unlock;
6595
a695cb58 6596 ret = -EBUSY;
cf6ab6d9 6597 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6598 goto out_unlock;
6599
0c8916c3
SR
6600 list_del(&tr->list);
6601
6b450d25 6602 tracing_set_nop(tr);
0c8916c3 6603 event_trace_del_tracer(tr);
591dffda 6604 ftrace_destroy_function_files(tr);
0c8916c3 6605 debugfs_remove_recursive(tr->dir);
a9fcaaac 6606 free_trace_buffers(tr);
0c8916c3
SR
6607
6608 kfree(tr->name);
6609 kfree(tr);
6610
6611 ret = 0;
6612
6613 out_unlock:
6614 mutex_unlock(&trace_types_lock);
6615
6616 return ret;
6617}
6618
277ba044
SR
6619static __init void create_trace_instances(struct dentry *d_tracer)
6620{
eae47358
SRRH
6621 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6622 instance_mkdir,
6623 instance_rmdir);
277ba044
SR
6624 if (WARN_ON(!trace_instance_dir))
6625 return;
277ba044
SR
6626}
6627
2b6080f2 6628static void
8434dc93 6629init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6630{
121aaee7 6631 int cpu;
2b6080f2 6632
607e2ea1
SRRH
6633 trace_create_file("available_tracers", 0444, d_tracer,
6634 tr, &show_traces_fops);
6635
6636 trace_create_file("current_tracer", 0644, d_tracer,
6637 tr, &set_tracer_fops);
6638
ccfe9e42
AL
6639 trace_create_file("tracing_cpumask", 0644, d_tracer,
6640 tr, &tracing_cpumask_fops);
6641
2b6080f2
SR
6642 trace_create_file("trace_options", 0644, d_tracer,
6643 tr, &tracing_iter_fops);
6644
6645 trace_create_file("trace", 0644, d_tracer,
6484c71c 6646 tr, &tracing_fops);
2b6080f2
SR
6647
6648 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6649 tr, &tracing_pipe_fops);
2b6080f2
SR
6650
6651 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6652 tr, &tracing_entries_fops);
2b6080f2
SR
6653
6654 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6655 tr, &tracing_total_entries_fops);
6656
238ae93d 6657 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6658 tr, &tracing_free_buffer_fops);
6659
6660 trace_create_file("trace_marker", 0220, d_tracer,
6661 tr, &tracing_mark_fops);
6662
6663 trace_create_file("trace_clock", 0644, d_tracer, tr,
6664 &trace_clock_fops);
6665
6666 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6667 tr, &rb_simple_fops);
ce9bae55 6668
6d9b3fa5
SRRH
6669#ifdef CONFIG_TRACER_MAX_TRACE
6670 trace_create_file("tracing_max_latency", 0644, d_tracer,
6671 &tr->max_latency, &tracing_max_lat_fops);
6672#endif
6673
591dffda
SRRH
6674 if (ftrace_create_function_files(tr, d_tracer))
6675 WARN(1, "Could not allocate function filter files");
6676
ce9bae55
SRRH
6677#ifdef CONFIG_TRACER_SNAPSHOT
6678 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6679 tr, &snapshot_fops);
ce9bae55 6680#endif
121aaee7
SRRH
6681
6682 for_each_tracing_cpu(cpu)
8434dc93 6683 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6684
2b6080f2
SR
6685}
6686
f76180bc
SRRH
6687static struct vfsmount *trace_automount(void *ingore)
6688{
6689 struct vfsmount *mnt;
6690 struct file_system_type *type;
6691
6692 /*
6693 * To maintain backward compatibility for tools that mount
6694 * debugfs to get to the tracing facility, tracefs is automatically
6695 * mounted to the debugfs/tracing directory.
6696 */
6697 type = get_fs_type("tracefs");
6698 if (!type)
6699 return NULL;
6700 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6701 put_filesystem(type);
6702 if (IS_ERR(mnt))
6703 return NULL;
6704 mntget(mnt);
6705
6706 return mnt;
6707}
6708
7eeafbca
SRRH
6709/**
6710 * tracing_init_dentry - initialize top level trace array
6711 *
6712 * This is called when creating files or directories in the tracing
6713 * directory. It is called via fs_initcall() by any of the boot up code
6714 * and expects to return the dentry of the top level tracing directory.
6715 */
6716struct dentry *tracing_init_dentry(void)
6717{
6718 struct trace_array *tr = &global_trace;
6719
f76180bc 6720 /* The top level trace array uses NULL as parent */
7eeafbca 6721 if (tr->dir)
f76180bc 6722 return NULL;
7eeafbca
SRRH
6723
6724 if (WARN_ON(!debugfs_initialized()))
6725 return ERR_PTR(-ENODEV);
6726
f76180bc
SRRH
6727 /*
6728 * As there may still be users that expect the tracing
6729 * files to exist in debugfs/tracing, we must automount
6730 * the tracefs file system there, so older tools still
6731 * work with the newer kerenl.
6732 */
6733 tr->dir = debugfs_create_automount("tracing", NULL,
6734 trace_automount, NULL);
7eeafbca
SRRH
6735 if (!tr->dir) {
6736 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6737 return ERR_PTR(-ENOMEM);
6738 }
6739
8434dc93 6740 return NULL;
7eeafbca
SRRH
6741}
6742
0c564a53
SRRH
6743extern struct trace_enum_map *__start_ftrace_enum_maps[];
6744extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6745
6746static void __init trace_enum_init(void)
6747{
3673b8e4
SRRH
6748 int len;
6749
6750 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6751 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6752}
6753
6754#ifdef CONFIG_MODULES
6755static void trace_module_add_enums(struct module *mod)
6756{
6757 if (!mod->num_trace_enums)
6758 return;
6759
6760 /*
6761 * Modules with bad taint do not have events created, do
6762 * not bother with enums either.
6763 */
6764 if (trace_module_has_bad_taint(mod))
6765 return;
6766
9828413d 6767 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6768}
6769
9828413d
SRRH
6770#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6771static void trace_module_remove_enums(struct module *mod)
6772{
6773 union trace_enum_map_item *map;
6774 union trace_enum_map_item **last = &trace_enum_maps;
6775
6776 if (!mod->num_trace_enums)
6777 return;
6778
6779 mutex_lock(&trace_enum_mutex);
6780
6781 map = trace_enum_maps;
6782
6783 while (map) {
6784 if (map->head.mod == mod)
6785 break;
6786 map = trace_enum_jmp_to_tail(map);
6787 last = &map->tail.next;
6788 map = map->tail.next;
6789 }
6790 if (!map)
6791 goto out;
6792
6793 *last = trace_enum_jmp_to_tail(map)->tail.next;
6794 kfree(map);
6795 out:
6796 mutex_unlock(&trace_enum_mutex);
6797}
6798#else
6799static inline void trace_module_remove_enums(struct module *mod) { }
6800#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6801
3673b8e4
SRRH
6802static int trace_module_notify(struct notifier_block *self,
6803 unsigned long val, void *data)
6804{
6805 struct module *mod = data;
6806
6807 switch (val) {
6808 case MODULE_STATE_COMING:
6809 trace_module_add_enums(mod);
6810 break;
9828413d
SRRH
6811 case MODULE_STATE_GOING:
6812 trace_module_remove_enums(mod);
6813 break;
3673b8e4
SRRH
6814 }
6815
6816 return 0;
0c564a53
SRRH
6817}
6818
3673b8e4
SRRH
6819static struct notifier_block trace_module_nb = {
6820 .notifier_call = trace_module_notify,
6821 .priority = 0,
6822};
9828413d 6823#endif /* CONFIG_MODULES */
3673b8e4 6824
8434dc93 6825static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6826{
6827 struct dentry *d_tracer;
bc0c38d1 6828
7e53bd42
LJ
6829 trace_access_lock_init();
6830
bc0c38d1 6831 d_tracer = tracing_init_dentry();
14a5ae40 6832 if (IS_ERR(d_tracer))
ed6f1c99 6833 return 0;
bc0c38d1 6834
8434dc93 6835 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6836
5452af66 6837 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6838 &global_trace, &tracing_thresh_fops);
a8259075 6839
339ae5d3 6840 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6841 NULL, &tracing_readme_fops);
6842
69abe6a5
AP
6843 trace_create_file("saved_cmdlines", 0444, d_tracer,
6844 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6845
939c7a4f
YY
6846 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6847 NULL, &tracing_saved_cmdlines_size_fops);
6848
0c564a53
SRRH
6849 trace_enum_init();
6850
9828413d
SRRH
6851 trace_create_enum_file(d_tracer);
6852
3673b8e4
SRRH
6853#ifdef CONFIG_MODULES
6854 register_module_notifier(&trace_module_nb);
6855#endif
6856
bc0c38d1 6857#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6858 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6859 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6860#endif
b04cc6b1 6861
277ba044 6862 create_trace_instances(d_tracer);
5452af66 6863
2b6080f2 6864 create_trace_options_dir(&global_trace);
b04cc6b1 6865
09d23a1d
SRRH
6866 /* If the tracer was started via cmdline, create options for it here */
6867 if (global_trace.current_trace != &nop_trace)
6868 update_tracer_options(&global_trace, global_trace.current_trace);
6869
b5ad384e 6870 return 0;
bc0c38d1
SR
6871}
6872
3f5a54e3
SR
6873static int trace_panic_handler(struct notifier_block *this,
6874 unsigned long event, void *unused)
6875{
944ac425 6876 if (ftrace_dump_on_oops)
cecbca96 6877 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6878 return NOTIFY_OK;
6879}
6880
6881static struct notifier_block trace_panic_notifier = {
6882 .notifier_call = trace_panic_handler,
6883 .next = NULL,
6884 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6885};
6886
6887static int trace_die_handler(struct notifier_block *self,
6888 unsigned long val,
6889 void *data)
6890{
6891 switch (val) {
6892 case DIE_OOPS:
944ac425 6893 if (ftrace_dump_on_oops)
cecbca96 6894 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6895 break;
6896 default:
6897 break;
6898 }
6899 return NOTIFY_OK;
6900}
6901
6902static struct notifier_block trace_die_notifier = {
6903 .notifier_call = trace_die_handler,
6904 .priority = 200
6905};
6906
6907/*
6908 * printk is set to max of 1024, we really don't need it that big.
6909 * Nothing should be printing 1000 characters anyway.
6910 */
6911#define TRACE_MAX_PRINT 1000
6912
6913/*
6914 * Define here KERN_TRACE so that we have one place to modify
6915 * it if we decide to change what log level the ftrace dump
6916 * should be at.
6917 */
428aee14 6918#define KERN_TRACE KERN_EMERG
3f5a54e3 6919
955b61e5 6920void
3f5a54e3
SR
6921trace_printk_seq(struct trace_seq *s)
6922{
6923 /* Probably should print a warning here. */
3a161d99
SRRH
6924 if (s->seq.len >= TRACE_MAX_PRINT)
6925 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6926
820b75f6
SRRH
6927 /*
6928 * More paranoid code. Although the buffer size is set to
6929 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6930 * an extra layer of protection.
6931 */
6932 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6933 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6934
6935 /* should be zero ended, but we are paranoid. */
3a161d99 6936 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6937
6938 printk(KERN_TRACE "%s", s->buffer);
6939
f9520750 6940 trace_seq_init(s);
3f5a54e3
SR
6941}
6942
955b61e5
JW
6943void trace_init_global_iter(struct trace_iterator *iter)
6944{
6945 iter->tr = &global_trace;
2b6080f2 6946 iter->trace = iter->tr->current_trace;
ae3b5093 6947 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6948 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6949
6950 if (iter->trace && iter->trace->open)
6951 iter->trace->open(iter);
6952
6953 /* Annotate start of buffers if we had overruns */
6954 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6955 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6956
6957 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6958 if (trace_clocks[iter->tr->clock_id].in_ns)
6959 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6960}
6961
7fe70b57 6962void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6963{
3f5a54e3
SR
6964 /* use static because iter can be a bit big for the stack */
6965 static struct trace_iterator iter;
7fe70b57 6966 static atomic_t dump_running;
cf586b61 6967 unsigned int old_userobj;
d769041f
SR
6968 unsigned long flags;
6969 int cnt = 0, cpu;
3f5a54e3 6970
7fe70b57
SRRH
6971 /* Only allow one dump user at a time. */
6972 if (atomic_inc_return(&dump_running) != 1) {
6973 atomic_dec(&dump_running);
6974 return;
6975 }
3f5a54e3 6976
7fe70b57
SRRH
6977 /*
6978 * Always turn off tracing when we dump.
6979 * We don't need to show trace output of what happens
6980 * between multiple crashes.
6981 *
6982 * If the user does a sysrq-z, then they can re-enable
6983 * tracing with echo 1 > tracing_on.
6984 */
0ee6b6cf 6985 tracing_off();
cf586b61 6986
7fe70b57 6987 local_irq_save(flags);
3f5a54e3 6988
38dbe0b1 6989 /* Simulate the iterator */
955b61e5
JW
6990 trace_init_global_iter(&iter);
6991
d769041f 6992 for_each_tracing_cpu(cpu) {
12883efb 6993 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6994 }
6995
cf586b61
FW
6996 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6997
b54d3de9
TE
6998 /* don't look at user memory in panic mode */
6999 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7000
cecbca96
FW
7001 switch (oops_dump_mode) {
7002 case DUMP_ALL:
ae3b5093 7003 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7004 break;
7005 case DUMP_ORIG:
7006 iter.cpu_file = raw_smp_processor_id();
7007 break;
7008 case DUMP_NONE:
7009 goto out_enable;
7010 default:
7011 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7012 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7013 }
7014
7015 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7016
7fe70b57
SRRH
7017 /* Did function tracer already get disabled? */
7018 if (ftrace_is_dead()) {
7019 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7020 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7021 }
7022
3f5a54e3
SR
7023 /*
7024 * We need to stop all tracing on all CPUS to read the
7025 * the next buffer. This is a bit expensive, but is
7026 * not done often. We fill all what we can read,
7027 * and then release the locks again.
7028 */
7029
3f5a54e3
SR
7030 while (!trace_empty(&iter)) {
7031
7032 if (!cnt)
7033 printk(KERN_TRACE "---------------------------------\n");
7034
7035 cnt++;
7036
7037 /* reset all but tr, trace, and overruns */
7038 memset(&iter.seq, 0,
7039 sizeof(struct trace_iterator) -
7040 offsetof(struct trace_iterator, seq));
7041 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7042 iter.pos = -1;
7043
955b61e5 7044 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7045 int ret;
7046
7047 ret = print_trace_line(&iter);
7048 if (ret != TRACE_TYPE_NO_CONSUME)
7049 trace_consume(&iter);
3f5a54e3 7050 }
b892e5c8 7051 touch_nmi_watchdog();
3f5a54e3
SR
7052
7053 trace_printk_seq(&iter.seq);
7054 }
7055
7056 if (!cnt)
7057 printk(KERN_TRACE " (ftrace buffer empty)\n");
7058 else
7059 printk(KERN_TRACE "---------------------------------\n");
7060
cecbca96 7061 out_enable:
7fe70b57 7062 trace_flags |= old_userobj;
cf586b61 7063
7fe70b57
SRRH
7064 for_each_tracing_cpu(cpu) {
7065 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7066 }
7fe70b57 7067 atomic_dec(&dump_running);
cd891ae0 7068 local_irq_restore(flags);
3f5a54e3 7069}
a8eecf22 7070EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7071
3928a8a2 7072__init static int tracer_alloc_buffers(void)
bc0c38d1 7073{
73c5162a 7074 int ring_buf_size;
9e01c1b7 7075 int ret = -ENOMEM;
4c11d7ae 7076
9e01c1b7
RR
7077 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7078 goto out;
7079
ccfe9e42 7080 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7081 goto out_free_buffer_mask;
4c11d7ae 7082
07d777fe
SR
7083 /* Only allocate trace_printk buffers if a trace_printk exists */
7084 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7085 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7086 trace_printk_init_buffers();
7087
73c5162a
SR
7088 /* To save memory, keep the ring buffer size to its minimum */
7089 if (ring_buffer_expanded)
7090 ring_buf_size = trace_buf_size;
7091 else
7092 ring_buf_size = 1;
7093
9e01c1b7 7094 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7095 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7096
2b6080f2
SR
7097 raw_spin_lock_init(&global_trace.start_lock);
7098
2c4a33ab
SRRH
7099 /* Used for event triggers */
7100 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7101 if (!temp_buffer)
7102 goto out_free_cpumask;
7103
939c7a4f
YY
7104 if (trace_create_savedcmd() < 0)
7105 goto out_free_temp_buffer;
7106
9e01c1b7 7107 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7108 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7109 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7110 WARN_ON(1);
939c7a4f 7111 goto out_free_savedcmd;
4c11d7ae 7112 }
a7603ff4 7113
499e5470
SR
7114 if (global_trace.buffer_disabled)
7115 tracing_off();
4c11d7ae 7116
e1e232ca
SR
7117 if (trace_boot_clock) {
7118 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7119 if (ret < 0)
7120 pr_warning("Trace clock %s not defined, going back to default\n",
7121 trace_boot_clock);
7122 }
7123
ca164318
SRRH
7124 /*
7125 * register_tracer() might reference current_trace, so it
7126 * needs to be set before we register anything. This is
7127 * just a bootstrap of current_trace anyway.
7128 */
2b6080f2
SR
7129 global_trace.current_trace = &nop_trace;
7130
0b9b12c1
SRRH
7131 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7132
4104d326
SRRH
7133 ftrace_init_global_array_ops(&global_trace);
7134
ca164318
SRRH
7135 register_tracer(&nop_trace);
7136
60a11774
SR
7137 /* All seems OK, enable tracing */
7138 tracing_disabled = 0;
3928a8a2 7139
3f5a54e3
SR
7140 atomic_notifier_chain_register(&panic_notifier_list,
7141 &trace_panic_notifier);
7142
7143 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7144
ae63b31e
SR
7145 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7146
7147 INIT_LIST_HEAD(&global_trace.systems);
7148 INIT_LIST_HEAD(&global_trace.events);
7149 list_add(&global_trace.list, &ftrace_trace_arrays);
7150
7bcfaf54
SR
7151 while (trace_boot_options) {
7152 char *option;
7153
7154 option = strsep(&trace_boot_options, ",");
2b6080f2 7155 trace_set_options(&global_trace, option);
7bcfaf54
SR
7156 }
7157
77fd5c15
SRRH
7158 register_snapshot_cmd();
7159
2fc1dfbe 7160 return 0;
3f5a54e3 7161
939c7a4f
YY
7162out_free_savedcmd:
7163 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7164out_free_temp_buffer:
7165 ring_buffer_free(temp_buffer);
9e01c1b7 7166out_free_cpumask:
ccfe9e42 7167 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7168out_free_buffer_mask:
7169 free_cpumask_var(tracing_buffer_mask);
7170out:
7171 return ret;
bc0c38d1 7172}
b2821ae6 7173
5f893b26
SRRH
7174void __init trace_init(void)
7175{
0daa2302
SRRH
7176 if (tracepoint_printk) {
7177 tracepoint_print_iter =
7178 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7179 if (WARN_ON(!tracepoint_print_iter))
7180 tracepoint_printk = 0;
7181 }
5f893b26 7182 tracer_alloc_buffers();
0c564a53 7183 trace_event_init();
5f893b26
SRRH
7184}
7185
b2821ae6
SR
7186__init static int clear_boot_tracer(void)
7187{
7188 /*
7189 * The default tracer at boot buffer is an init section.
7190 * This function is called in lateinit. If we did not
7191 * find the boot tracer, then clear it out, to prevent
7192 * later registration from accessing the buffer that is
7193 * about to be freed.
7194 */
7195 if (!default_bootup_tracer)
7196 return 0;
7197
7198 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7199 default_bootup_tracer);
7200 default_bootup_tracer = NULL;
7201
7202 return 0;
7203}
7204
8434dc93 7205fs_initcall(tracer_init_tracefs);
b2821ae6 7206late_initcall(clear_boot_tracer);