]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing: Add a method to pass in trace_array descriptor to option files
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
67012ab1 221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
e1e232ca
SR
227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
0daa2302
SRRH
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
de7edd31 245
cf8e3474 246unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
983f938a
SRRH
253/* trace_flags holds trace_options default values */
254#define TRACE_DEFAULT_FLAGS \
255 (FUNCTION_DEFAULT_FLAGS | \
256 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
257 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
258 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
259 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
260
4fcdae83
SR
261/*
262 * The global_trace is the descriptor that holds the tracing
263 * buffers for the live tracing. For each CPU, it contains
264 * a link list of pages that will store trace entries. The
265 * page descriptor of the pages in the memory is used to hold
266 * the link list by linking the lru item in the page descriptor
267 * to each of the pages in the buffer per CPU.
268 *
269 * For each active CPU there is a data field that holds the
270 * pages for the buffer for that CPU. Each CPU has the same number
271 * of pages allocated for its buffer.
272 */
983f938a
SRRH
273static struct trace_array global_trace = {
274 .trace_flags = TRACE_DEFAULT_FLAGS,
275};
bc0c38d1 276
ae63b31e 277LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 278
ff451961
SRRH
279int trace_array_get(struct trace_array *this_tr)
280{
281 struct trace_array *tr;
282 int ret = -ENODEV;
283
284 mutex_lock(&trace_types_lock);
285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
286 if (tr == this_tr) {
287 tr->ref++;
288 ret = 0;
289 break;
290 }
291 }
292 mutex_unlock(&trace_types_lock);
293
294 return ret;
295}
296
297static void __trace_array_put(struct trace_array *this_tr)
298{
299 WARN_ON(!this_tr->ref);
300 this_tr->ref--;
301}
302
303void trace_array_put(struct trace_array *this_tr)
304{
305 mutex_lock(&trace_types_lock);
306 __trace_array_put(this_tr);
307 mutex_unlock(&trace_types_lock);
308}
309
7f1d2f82 310int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
311 struct ring_buffer *buffer,
312 struct ring_buffer_event *event)
eb02ce01 313{
5d6ad960 314 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
315 !filter_match_preds(file->filter, rec)) {
316 ring_buffer_discard_commit(buffer, event);
317 return 1;
318 }
319
320 return 0;
321}
322EXPORT_SYMBOL_GPL(filter_check_discard);
323
2425bcb9 324int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
325 struct ring_buffer *buffer,
326 struct ring_buffer_event *event)
327{
328 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
329 !filter_match_preds(call->filter, rec)) {
330 ring_buffer_discard_commit(buffer, event);
331 return 1;
332 }
333
334 return 0;
eb02ce01 335}
f306cc82 336EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 337
ad1438a0 338static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
339{
340 u64 ts;
341
342 /* Early boot up does not have a buffer yet */
9457158b 343 if (!buf->buffer)
37886f6a
SR
344 return trace_clock_local();
345
9457158b
AL
346 ts = ring_buffer_time_stamp(buf->buffer, cpu);
347 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
348
349 return ts;
350}
bc0c38d1 351
9457158b
AL
352cycle_t ftrace_now(int cpu)
353{
354 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
355}
356
10246fa3
SRRH
357/**
358 * tracing_is_enabled - Show if global_trace has been disabled
359 *
360 * Shows if the global trace has been enabled or not. It uses the
361 * mirror flag "buffer_disabled" to be used in fast paths such as for
362 * the irqsoff tracer. But it may be inaccurate due to races. If you
363 * need to know the accurate state, use tracing_is_on() which is a little
364 * slower, but accurate.
365 */
9036990d
SR
366int tracing_is_enabled(void)
367{
10246fa3
SRRH
368 /*
369 * For quick access (irqsoff uses this in fast path), just
370 * return the mirror variable of the state of the ring buffer.
371 * It's a little racy, but we don't really care.
372 */
373 smp_rmb();
374 return !global_trace.buffer_disabled;
9036990d
SR
375}
376
4fcdae83 377/*
3928a8a2
SR
378 * trace_buf_size is the size in bytes that is allocated
379 * for a buffer. Note, the number of bytes is always rounded
380 * to page size.
3f5a54e3
SR
381 *
382 * This number is purposely set to a low number of 16384.
383 * If the dump on oops happens, it will be much appreciated
384 * to not have to wait for all that output. Anyway this can be
385 * boot time and run time configurable.
4fcdae83 386 */
3928a8a2 387#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 388
3928a8a2 389static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 390
4fcdae83 391/* trace_types holds a link list of available tracers. */
bc0c38d1 392static struct tracer *trace_types __read_mostly;
4fcdae83 393
4fcdae83
SR
394/*
395 * trace_types_lock is used to protect the trace_types list.
4fcdae83 396 */
a8227415 397DEFINE_MUTEX(trace_types_lock);
4fcdae83 398
7e53bd42
LJ
399/*
400 * serialize the access of the ring buffer
401 *
402 * ring buffer serializes readers, but it is low level protection.
403 * The validity of the events (which returns by ring_buffer_peek() ..etc)
404 * are not protected by ring buffer.
405 *
406 * The content of events may become garbage if we allow other process consumes
407 * these events concurrently:
408 * A) the page of the consumed events may become a normal page
409 * (not reader page) in ring buffer, and this page will be rewrited
410 * by events producer.
411 * B) The page of the consumed events may become a page for splice_read,
412 * and this page will be returned to system.
413 *
414 * These primitives allow multi process access to different cpu ring buffer
415 * concurrently.
416 *
417 * These primitives don't distinguish read-only and read-consume access.
418 * Multi read-only access are also serialized.
419 */
420
421#ifdef CONFIG_SMP
422static DECLARE_RWSEM(all_cpu_access_lock);
423static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
424
425static inline void trace_access_lock(int cpu)
426{
ae3b5093 427 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
428 /* gain it for accessing the whole ring buffer. */
429 down_write(&all_cpu_access_lock);
430 } else {
431 /* gain it for accessing a cpu ring buffer. */
432
ae3b5093 433 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
434 down_read(&all_cpu_access_lock);
435
436 /* Secondly block other access to this @cpu ring buffer. */
437 mutex_lock(&per_cpu(cpu_access_lock, cpu));
438 }
439}
440
441static inline void trace_access_unlock(int cpu)
442{
ae3b5093 443 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
444 up_write(&all_cpu_access_lock);
445 } else {
446 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
447 up_read(&all_cpu_access_lock);
448 }
449}
450
451static inline void trace_access_lock_init(void)
452{
453 int cpu;
454
455 for_each_possible_cpu(cpu)
456 mutex_init(&per_cpu(cpu_access_lock, cpu));
457}
458
459#else
460
461static DEFINE_MUTEX(access_lock);
462
463static inline void trace_access_lock(int cpu)
464{
465 (void)cpu;
466 mutex_lock(&access_lock);
467}
468
469static inline void trace_access_unlock(int cpu)
470{
471 (void)cpu;
472 mutex_unlock(&access_lock);
473}
474
475static inline void trace_access_lock_init(void)
476{
477}
478
479#endif
480
d78a4614
SRRH
481#ifdef CONFIG_STACKTRACE
482static void __ftrace_trace_stack(struct ring_buffer *buffer,
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
73dddbb5
SRRH
485static inline void ftrace_trace_stack(struct ring_buffer *buffer,
486 unsigned long flags,
487 int skip, int pc, struct pt_regs *regs);
ca475e83 488
d78a4614
SRRH
489#else
490static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
491 unsigned long flags,
492 int skip, int pc, struct pt_regs *regs)
493{
494}
ca475e83 495static inline void ftrace_trace_stack(struct ring_buffer *buffer,
73dddbb5
SRRH
496 unsigned long flags,
497 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
498{
499}
500
d78a4614
SRRH
501#endif
502
5280bcef 503static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
504{
505 if (tr->trace_buffer.buffer)
506 ring_buffer_record_on(tr->trace_buffer.buffer);
507 /*
508 * This flag is looked at when buffers haven't been allocated
509 * yet, or by some tracers (like irqsoff), that just want to
510 * know if the ring buffer has been disabled, but it can handle
511 * races of where it gets disabled but we still do a record.
512 * As the check is in the fast path of the tracers, it is more
513 * important to be fast than accurate.
514 */
515 tr->buffer_disabled = 0;
516 /* Make the flag seen by readers */
517 smp_wmb();
518}
519
499e5470
SR
520/**
521 * tracing_on - enable tracing buffers
522 *
523 * This function enables tracing buffers that may have been
524 * disabled with tracing_off.
525 */
526void tracing_on(void)
527{
10246fa3 528 tracer_tracing_on(&global_trace);
499e5470
SR
529}
530EXPORT_SYMBOL_GPL(tracing_on);
531
09ae7234
SRRH
532/**
533 * __trace_puts - write a constant string into the trace buffer.
534 * @ip: The address of the caller
535 * @str: The constant string to write
536 * @size: The size of the string.
537 */
538int __trace_puts(unsigned long ip, const char *str, int size)
539{
540 struct ring_buffer_event *event;
541 struct ring_buffer *buffer;
542 struct print_entry *entry;
543 unsigned long irq_flags;
544 int alloc;
8abfb872
J
545 int pc;
546
983f938a 547 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
548 return 0;
549
8abfb872 550 pc = preempt_count();
09ae7234 551
3132e107
SRRH
552 if (unlikely(tracing_selftest_running || tracing_disabled))
553 return 0;
554
09ae7234
SRRH
555 alloc = sizeof(*entry) + size + 2; /* possible \n added */
556
557 local_save_flags(irq_flags);
558 buffer = global_trace.trace_buffer.buffer;
559 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 560 irq_flags, pc);
09ae7234
SRRH
561 if (!event)
562 return 0;
563
564 entry = ring_buffer_event_data(event);
565 entry->ip = ip;
566
567 memcpy(&entry->buf, str, size);
568
569 /* Add a newline if necessary */
570 if (entry->buf[size - 1] != '\n') {
571 entry->buf[size] = '\n';
572 entry->buf[size + 1] = '\0';
573 } else
574 entry->buf[size] = '\0';
575
576 __buffer_unlock_commit(buffer, event);
73dddbb5 577 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
578
579 return size;
580}
581EXPORT_SYMBOL_GPL(__trace_puts);
582
583/**
584 * __trace_bputs - write the pointer to a constant string into trace buffer
585 * @ip: The address of the caller
586 * @str: The constant string to write to the buffer to
587 */
588int __trace_bputs(unsigned long ip, const char *str)
589{
590 struct ring_buffer_event *event;
591 struct ring_buffer *buffer;
592 struct bputs_entry *entry;
593 unsigned long irq_flags;
594 int size = sizeof(struct bputs_entry);
8abfb872
J
595 int pc;
596
983f938a 597 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
598 return 0;
599
8abfb872 600 pc = preempt_count();
09ae7234 601
3132e107
SRRH
602 if (unlikely(tracing_selftest_running || tracing_disabled))
603 return 0;
604
09ae7234
SRRH
605 local_save_flags(irq_flags);
606 buffer = global_trace.trace_buffer.buffer;
607 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 608 irq_flags, pc);
09ae7234
SRRH
609 if (!event)
610 return 0;
611
612 entry = ring_buffer_event_data(event);
613 entry->ip = ip;
614 entry->str = str;
615
616 __buffer_unlock_commit(buffer, event);
73dddbb5 617 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
618
619 return 1;
620}
621EXPORT_SYMBOL_GPL(__trace_bputs);
622
ad909e21
SRRH
623#ifdef CONFIG_TRACER_SNAPSHOT
624/**
625 * trace_snapshot - take a snapshot of the current buffer.
626 *
627 * This causes a swap between the snapshot buffer and the current live
628 * tracing buffer. You can use this to take snapshots of the live
629 * trace when some condition is triggered, but continue to trace.
630 *
631 * Note, make sure to allocate the snapshot with either
632 * a tracing_snapshot_alloc(), or by doing it manually
633 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
634 *
635 * If the snapshot buffer is not allocated, it will stop tracing.
636 * Basically making a permanent snapshot.
637 */
638void tracing_snapshot(void)
639{
640 struct trace_array *tr = &global_trace;
641 struct tracer *tracer = tr->current_trace;
642 unsigned long flags;
643
1b22e382
SRRH
644 if (in_nmi()) {
645 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
646 internal_trace_puts("*** snapshot is being ignored ***\n");
647 return;
648 }
649
ad909e21 650 if (!tr->allocated_snapshot) {
ca268da6
SRRH
651 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
652 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
653 tracing_off();
654 return;
655 }
656
657 /* Note, snapshot can not be used when the tracer uses it */
658 if (tracer->use_max_tr) {
ca268da6
SRRH
659 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
660 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
661 return;
662 }
663
664 local_irq_save(flags);
665 update_max_tr(tr, current, smp_processor_id());
666 local_irq_restore(flags);
667}
1b22e382 668EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
669
670static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
671 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
672static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
673
674static int alloc_snapshot(struct trace_array *tr)
675{
676 int ret;
677
678 if (!tr->allocated_snapshot) {
679
680 /* allocate spare buffer */
681 ret = resize_buffer_duplicate_size(&tr->max_buffer,
682 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
683 if (ret < 0)
684 return ret;
685
686 tr->allocated_snapshot = true;
687 }
688
689 return 0;
690}
691
ad1438a0 692static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
693{
694 /*
695 * We don't free the ring buffer. instead, resize it because
696 * The max_tr ring buffer has some state (e.g. ring->clock) and
697 * we want preserve it.
698 */
699 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
700 set_buffer_entries(&tr->max_buffer, 1);
701 tracing_reset_online_cpus(&tr->max_buffer);
702 tr->allocated_snapshot = false;
703}
ad909e21 704
93e31ffb
TZ
705/**
706 * tracing_alloc_snapshot - allocate snapshot buffer.
707 *
708 * This only allocates the snapshot buffer if it isn't already
709 * allocated - it doesn't also take a snapshot.
710 *
711 * This is meant to be used in cases where the snapshot buffer needs
712 * to be set up for events that can't sleep but need to be able to
713 * trigger a snapshot.
714 */
715int tracing_alloc_snapshot(void)
716{
717 struct trace_array *tr = &global_trace;
718 int ret;
719
720 ret = alloc_snapshot(tr);
721 WARN_ON(ret < 0);
722
723 return ret;
724}
725EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
726
ad909e21
SRRH
727/**
728 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
729 *
730 * This is similar to trace_snapshot(), but it will allocate the
731 * snapshot buffer if it isn't already allocated. Use this only
732 * where it is safe to sleep, as the allocation may sleep.
733 *
734 * This causes a swap between the snapshot buffer and the current live
735 * tracing buffer. You can use this to take snapshots of the live
736 * trace when some condition is triggered, but continue to trace.
737 */
738void tracing_snapshot_alloc(void)
739{
ad909e21
SRRH
740 int ret;
741
93e31ffb
TZ
742 ret = tracing_alloc_snapshot();
743 if (ret < 0)
3209cff4 744 return;
ad909e21
SRRH
745
746 tracing_snapshot();
747}
1b22e382 748EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
749#else
750void tracing_snapshot(void)
751{
752 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
753}
1b22e382 754EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
755int tracing_alloc_snapshot(void)
756{
757 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
758 return -ENODEV;
759}
760EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
761void tracing_snapshot_alloc(void)
762{
763 /* Give warning */
764 tracing_snapshot();
765}
1b22e382 766EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
767#endif /* CONFIG_TRACER_SNAPSHOT */
768
5280bcef 769static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
770{
771 if (tr->trace_buffer.buffer)
772 ring_buffer_record_off(tr->trace_buffer.buffer);
773 /*
774 * This flag is looked at when buffers haven't been allocated
775 * yet, or by some tracers (like irqsoff), that just want to
776 * know if the ring buffer has been disabled, but it can handle
777 * races of where it gets disabled but we still do a record.
778 * As the check is in the fast path of the tracers, it is more
779 * important to be fast than accurate.
780 */
781 tr->buffer_disabled = 1;
782 /* Make the flag seen by readers */
783 smp_wmb();
784}
785
499e5470
SR
786/**
787 * tracing_off - turn off tracing buffers
788 *
789 * This function stops the tracing buffers from recording data.
790 * It does not disable any overhead the tracers themselves may
791 * be causing. This function simply causes all recording to
792 * the ring buffers to fail.
793 */
794void tracing_off(void)
795{
10246fa3 796 tracer_tracing_off(&global_trace);
499e5470
SR
797}
798EXPORT_SYMBOL_GPL(tracing_off);
799
de7edd31
SRRH
800void disable_trace_on_warning(void)
801{
802 if (__disable_trace_on_warning)
803 tracing_off();
804}
805
10246fa3
SRRH
806/**
807 * tracer_tracing_is_on - show real state of ring buffer enabled
808 * @tr : the trace array to know if ring buffer is enabled
809 *
810 * Shows real state of the ring buffer if it is enabled or not.
811 */
5280bcef 812static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
813{
814 if (tr->trace_buffer.buffer)
815 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
816 return !tr->buffer_disabled;
817}
818
499e5470
SR
819/**
820 * tracing_is_on - show state of ring buffers enabled
821 */
822int tracing_is_on(void)
823{
10246fa3 824 return tracer_tracing_is_on(&global_trace);
499e5470
SR
825}
826EXPORT_SYMBOL_GPL(tracing_is_on);
827
3928a8a2 828static int __init set_buf_size(char *str)
bc0c38d1 829{
3928a8a2 830 unsigned long buf_size;
c6caeeb1 831
bc0c38d1
SR
832 if (!str)
833 return 0;
9d612bef 834 buf_size = memparse(str, &str);
c6caeeb1 835 /* nr_entries can not be zero */
9d612bef 836 if (buf_size == 0)
c6caeeb1 837 return 0;
3928a8a2 838 trace_buf_size = buf_size;
bc0c38d1
SR
839 return 1;
840}
3928a8a2 841__setup("trace_buf_size=", set_buf_size);
bc0c38d1 842
0e950173
TB
843static int __init set_tracing_thresh(char *str)
844{
87abb3b1 845 unsigned long threshold;
0e950173
TB
846 int ret;
847
848 if (!str)
849 return 0;
bcd83ea6 850 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
851 if (ret < 0)
852 return 0;
87abb3b1 853 tracing_thresh = threshold * 1000;
0e950173
TB
854 return 1;
855}
856__setup("tracing_thresh=", set_tracing_thresh);
857
57f50be1
SR
858unsigned long nsecs_to_usecs(unsigned long nsecs)
859{
860 return nsecs / 1000;
861}
862
a3418a36
SRRH
863/*
864 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
865 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
866 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
867 * of strings in the order that the enums were defined.
868 */
869#undef C
870#define C(a, b) b
871
4fcdae83 872/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 873static const char *trace_options[] = {
a3418a36 874 TRACE_FLAGS
bc0c38d1
SR
875 NULL
876};
877
5079f326
Z
878static struct {
879 u64 (*func)(void);
880 const char *name;
8be0709f 881 int in_ns; /* is this clock in nanoseconds? */
5079f326 882} trace_clocks[] = {
1b3e5c09
TG
883 { trace_clock_local, "local", 1 },
884 { trace_clock_global, "global", 1 },
885 { trace_clock_counter, "counter", 0 },
e7fda6c4 886 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
887 { trace_clock, "perf", 1 },
888 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 889 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 890 ARCH_TRACE_CLOCKS
5079f326
Z
891};
892
b63f39ea 893/*
894 * trace_parser_get_init - gets the buffer for trace parser
895 */
896int trace_parser_get_init(struct trace_parser *parser, int size)
897{
898 memset(parser, 0, sizeof(*parser));
899
900 parser->buffer = kmalloc(size, GFP_KERNEL);
901 if (!parser->buffer)
902 return 1;
903
904 parser->size = size;
905 return 0;
906}
907
908/*
909 * trace_parser_put - frees the buffer for trace parser
910 */
911void trace_parser_put(struct trace_parser *parser)
912{
913 kfree(parser->buffer);
914}
915
916/*
917 * trace_get_user - reads the user input string separated by space
918 * (matched by isspace(ch))
919 *
920 * For each string found the 'struct trace_parser' is updated,
921 * and the function returns.
922 *
923 * Returns number of bytes read.
924 *
925 * See kernel/trace/trace.h for 'struct trace_parser' details.
926 */
927int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
928 size_t cnt, loff_t *ppos)
929{
930 char ch;
931 size_t read = 0;
932 ssize_t ret;
933
934 if (!*ppos)
935 trace_parser_clear(parser);
936
937 ret = get_user(ch, ubuf++);
938 if (ret)
939 goto out;
940
941 read++;
942 cnt--;
943
944 /*
945 * The parser is not finished with the last write,
946 * continue reading the user input without skipping spaces.
947 */
948 if (!parser->cont) {
949 /* skip white space */
950 while (cnt && isspace(ch)) {
951 ret = get_user(ch, ubuf++);
952 if (ret)
953 goto out;
954 read++;
955 cnt--;
956 }
957
958 /* only spaces were written */
959 if (isspace(ch)) {
960 *ppos += read;
961 ret = read;
962 goto out;
963 }
964
965 parser->idx = 0;
966 }
967
968 /* read the non-space input */
969 while (cnt && !isspace(ch)) {
3c235a33 970 if (parser->idx < parser->size - 1)
b63f39ea 971 parser->buffer[parser->idx++] = ch;
972 else {
973 ret = -EINVAL;
974 goto out;
975 }
976 ret = get_user(ch, ubuf++);
977 if (ret)
978 goto out;
979 read++;
980 cnt--;
981 }
982
983 /* We either got finished input or we have to wait for another call. */
984 if (isspace(ch)) {
985 parser->buffer[parser->idx] = 0;
986 parser->cont = false;
057db848 987 } else if (parser->idx < parser->size - 1) {
b63f39ea 988 parser->cont = true;
989 parser->buffer[parser->idx++] = ch;
057db848
SR
990 } else {
991 ret = -EINVAL;
992 goto out;
b63f39ea 993 }
994
995 *ppos += read;
996 ret = read;
997
998out:
999 return ret;
1000}
1001
3a161d99 1002/* TODO add a seq_buf_to_buffer() */
b8b94265 1003static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1004{
1005 int len;
3c56819b 1006
5ac48378 1007 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1008 return -EBUSY;
1009
5ac48378 1010 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1011 if (cnt > len)
1012 cnt = len;
3a161d99 1013 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1014
3a161d99 1015 s->seq.readpos += cnt;
3c56819b
EGM
1016 return cnt;
1017}
1018
0e950173
TB
1019unsigned long __read_mostly tracing_thresh;
1020
5d4a9dba 1021#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1022/*
1023 * Copy the new maximum trace into the separate maximum-trace
1024 * structure. (this way the maximum trace is permanently saved,
1025 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1026 */
1027static void
1028__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1029{
12883efb
SRRH
1030 struct trace_buffer *trace_buf = &tr->trace_buffer;
1031 struct trace_buffer *max_buf = &tr->max_buffer;
1032 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1033 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1034
12883efb
SRRH
1035 max_buf->cpu = cpu;
1036 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1037
6d9b3fa5 1038 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1039 max_data->critical_start = data->critical_start;
1040 max_data->critical_end = data->critical_end;
5d4a9dba 1041
1acaa1b2 1042 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1043 max_data->pid = tsk->pid;
f17a5194
SRRH
1044 /*
1045 * If tsk == current, then use current_uid(), as that does not use
1046 * RCU. The irq tracer can be called out of RCU scope.
1047 */
1048 if (tsk == current)
1049 max_data->uid = current_uid();
1050 else
1051 max_data->uid = task_uid(tsk);
1052
8248ac05
SR
1053 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1054 max_data->policy = tsk->policy;
1055 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1056
1057 /* record this tasks comm */
1058 tracing_record_cmdline(tsk);
1059}
1060
4fcdae83
SR
1061/**
1062 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1063 * @tr: tracer
1064 * @tsk: the task with the latency
1065 * @cpu: The cpu that initiated the trace.
1066 *
1067 * Flip the buffers between the @tr and the max_tr and record information
1068 * about which task was the cause of this latency.
1069 */
e309b41d 1070void
bc0c38d1
SR
1071update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1072{
2721e72d 1073 struct ring_buffer *buf;
bc0c38d1 1074
2b6080f2 1075 if (tr->stop_count)
b8de7bd1
SR
1076 return;
1077
4c11d7ae 1078 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1079
45ad21ca 1080 if (!tr->allocated_snapshot) {
debdd57f 1081 /* Only the nop tracer should hit this when disabling */
2b6080f2 1082 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1083 return;
debdd57f 1084 }
34600f0e 1085
0b9b12c1 1086 arch_spin_lock(&tr->max_lock);
3928a8a2 1087
12883efb
SRRH
1088 buf = tr->trace_buffer.buffer;
1089 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1090 tr->max_buffer.buffer = buf;
3928a8a2 1091
bc0c38d1 1092 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1093 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1094}
1095
1096/**
1097 * update_max_tr_single - only copy one trace over, and reset the rest
1098 * @tr - tracer
1099 * @tsk - task with the latency
1100 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1101 *
1102 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1103 */
e309b41d 1104void
bc0c38d1
SR
1105update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1106{
3928a8a2 1107 int ret;
bc0c38d1 1108
2b6080f2 1109 if (tr->stop_count)
b8de7bd1
SR
1110 return;
1111
4c11d7ae 1112 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1113 if (!tr->allocated_snapshot) {
2930e04d 1114 /* Only the nop tracer should hit this when disabling */
9e8529af 1115 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1116 return;
2930e04d 1117 }
ef710e10 1118
0b9b12c1 1119 arch_spin_lock(&tr->max_lock);
bc0c38d1 1120
12883efb 1121 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1122
e8165dbb
SR
1123 if (ret == -EBUSY) {
1124 /*
1125 * We failed to swap the buffer due to a commit taking
1126 * place on this CPU. We fail to record, but we reset
1127 * the max trace buffer (no one writes directly to it)
1128 * and flag that it failed.
1129 */
12883efb 1130 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1131 "Failed to swap buffers due to commit in progress\n");
1132 }
1133
e8165dbb 1134 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1135
1136 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1137 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1138}
5d4a9dba 1139#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1140
e30f53aa 1141static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1142{
15693458
SRRH
1143 /* Iterators are static, they should be filled or empty */
1144 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1145 return 0;
0d5c6e1c 1146
e30f53aa
RV
1147 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1148 full);
0d5c6e1c
SR
1149}
1150
f4e781c0
SRRH
1151#ifdef CONFIG_FTRACE_STARTUP_TEST
1152static int run_tracer_selftest(struct tracer *type)
1153{
1154 struct trace_array *tr = &global_trace;
1155 struct tracer *saved_tracer = tr->current_trace;
1156 int ret;
0d5c6e1c 1157
f4e781c0
SRRH
1158 if (!type->selftest || tracing_selftest_disabled)
1159 return 0;
0d5c6e1c
SR
1160
1161 /*
f4e781c0
SRRH
1162 * Run a selftest on this tracer.
1163 * Here we reset the trace buffer, and set the current
1164 * tracer to be this tracer. The tracer can then run some
1165 * internal tracing to verify that everything is in order.
1166 * If we fail, we do not register this tracer.
0d5c6e1c 1167 */
f4e781c0 1168 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1169
f4e781c0
SRRH
1170 tr->current_trace = type;
1171
1172#ifdef CONFIG_TRACER_MAX_TRACE
1173 if (type->use_max_tr) {
1174 /* If we expanded the buffers, make sure the max is expanded too */
1175 if (ring_buffer_expanded)
1176 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1177 RING_BUFFER_ALL_CPUS);
1178 tr->allocated_snapshot = true;
1179 }
1180#endif
1181
1182 /* the test is responsible for initializing and enabling */
1183 pr_info("Testing tracer %s: ", type->name);
1184 ret = type->selftest(type, tr);
1185 /* the test is responsible for resetting too */
1186 tr->current_trace = saved_tracer;
1187 if (ret) {
1188 printk(KERN_CONT "FAILED!\n");
1189 /* Add the warning after printing 'FAILED' */
1190 WARN_ON(1);
1191 return -1;
1192 }
1193 /* Only reset on passing, to avoid touching corrupted buffers */
1194 tracing_reset_online_cpus(&tr->trace_buffer);
1195
1196#ifdef CONFIG_TRACER_MAX_TRACE
1197 if (type->use_max_tr) {
1198 tr->allocated_snapshot = false;
0d5c6e1c 1199
f4e781c0
SRRH
1200 /* Shrink the max buffer again */
1201 if (ring_buffer_expanded)
1202 ring_buffer_resize(tr->max_buffer.buffer, 1,
1203 RING_BUFFER_ALL_CPUS);
1204 }
1205#endif
1206
1207 printk(KERN_CONT "PASSED\n");
1208 return 0;
1209}
1210#else
1211static inline int run_tracer_selftest(struct tracer *type)
1212{
1213 return 0;
0d5c6e1c 1214}
f4e781c0 1215#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1216
41d9c0be
SRRH
1217static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1218
4fcdae83
SR
1219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
bc0c38d1
SR
1225int register_tracer(struct tracer *type)
1226{
1227 struct tracer *t;
bc0c38d1
SR
1228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
24a461d5 1235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
bc0c38d1 1240 mutex_lock(&trace_types_lock);
86fa2f60 1241
8e1b82e0
FW
1242 tracing_selftest_running = true;
1243
bc0c38d1
SR
1244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246 /* already found */
ee6c2c1b 1247 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
adf9f195
FW
1254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
1256 if (!type->flags)
1257 type->flags = &dummy_tracer_flags;
1258 else
1259 if (!type->flags->opts)
1260 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1261
f4e781c0
SRRH
1262 ret = run_tracer_selftest(type);
1263 if (ret < 0)
1264 goto out;
60a11774 1265
bc0c38d1
SR
1266 type->next = trace_types;
1267 trace_types = type;
41d9c0be 1268 add_tracer_options(&global_trace, type);
60a11774 1269
bc0c38d1 1270 out:
8e1b82e0 1271 tracing_selftest_running = false;
bc0c38d1
SR
1272 mutex_unlock(&trace_types_lock);
1273
dac74940
SR
1274 if (ret || !default_bootup_tracer)
1275 goto out_unlock;
1276
ee6c2c1b 1277 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1278 goto out_unlock;
1279
1280 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1281 /* Do we want this tracer to start on bootup? */
607e2ea1 1282 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1283 default_bootup_tracer = NULL;
1284 /* disable other selftests, since this will break it. */
55034cd6 1285 tracing_selftest_disabled = true;
b2821ae6 1286#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1287 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1288 type->name);
b2821ae6 1289#endif
b2821ae6 1290
dac74940 1291 out_unlock:
bc0c38d1
SR
1292 return ret;
1293}
1294
12883efb 1295void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1296{
12883efb 1297 struct ring_buffer *buffer = buf->buffer;
f633903a 1298
a5416411
HT
1299 if (!buffer)
1300 return;
1301
f633903a
SR
1302 ring_buffer_record_disable(buffer);
1303
1304 /* Make sure all commits have finished */
1305 synchronize_sched();
68179686 1306 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1307
1308 ring_buffer_record_enable(buffer);
1309}
1310
12883efb 1311void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1312{
12883efb 1313 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1314 int cpu;
1315
a5416411
HT
1316 if (!buffer)
1317 return;
1318
621968cd
SR
1319 ring_buffer_record_disable(buffer);
1320
1321 /* Make sure all commits have finished */
1322 synchronize_sched();
1323
9457158b 1324 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1325
1326 for_each_online_cpu(cpu)
68179686 1327 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1328
1329 ring_buffer_record_enable(buffer);
213cc060
PE
1330}
1331
09d8091c 1332/* Must have trace_types_lock held */
873c642f 1333void tracing_reset_all_online_cpus(void)
9456f0fa 1334{
873c642f
SRRH
1335 struct trace_array *tr;
1336
873c642f 1337 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1338 tracing_reset_online_cpus(&tr->trace_buffer);
1339#ifdef CONFIG_TRACER_MAX_TRACE
1340 tracing_reset_online_cpus(&tr->max_buffer);
1341#endif
873c642f 1342 }
9456f0fa
SR
1343}
1344
939c7a4f 1345#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1346#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1347static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1348struct saved_cmdlines_buffer {
1349 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1350 unsigned *map_cmdline_to_pid;
1351 unsigned cmdline_num;
1352 int cmdline_idx;
1353 char *saved_cmdlines;
1354};
1355static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1356
25b0b44a 1357/* temporary disable recording */
4fd27358 1358static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1359
939c7a4f
YY
1360static inline char *get_saved_cmdlines(int idx)
1361{
1362 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1363}
1364
1365static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1366{
939c7a4f
YY
1367 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1368}
1369
1370static int allocate_cmdlines_buffer(unsigned int val,
1371 struct saved_cmdlines_buffer *s)
1372{
1373 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1374 GFP_KERNEL);
1375 if (!s->map_cmdline_to_pid)
1376 return -ENOMEM;
1377
1378 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1379 if (!s->saved_cmdlines) {
1380 kfree(s->map_cmdline_to_pid);
1381 return -ENOMEM;
1382 }
1383
1384 s->cmdline_idx = 0;
1385 s->cmdline_num = val;
1386 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1387 sizeof(s->map_pid_to_cmdline));
1388 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1389 val * sizeof(*s->map_cmdline_to_pid));
1390
1391 return 0;
1392}
1393
1394static int trace_create_savedcmd(void)
1395{
1396 int ret;
1397
a6af8fbf 1398 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1399 if (!savedcmd)
1400 return -ENOMEM;
1401
1402 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1403 if (ret < 0) {
1404 kfree(savedcmd);
1405 savedcmd = NULL;
1406 return -ENOMEM;
1407 }
1408
1409 return 0;
bc0c38d1
SR
1410}
1411
b5130b1e
CE
1412int is_tracing_stopped(void)
1413{
2b6080f2 1414 return global_trace.stop_count;
b5130b1e
CE
1415}
1416
0f048701
SR
1417/**
1418 * tracing_start - quick start of the tracer
1419 *
1420 * If tracing is enabled but was stopped by tracing_stop,
1421 * this will start the tracer back up.
1422 */
1423void tracing_start(void)
1424{
1425 struct ring_buffer *buffer;
1426 unsigned long flags;
1427
1428 if (tracing_disabled)
1429 return;
1430
2b6080f2
SR
1431 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1432 if (--global_trace.stop_count) {
1433 if (global_trace.stop_count < 0) {
b06a8301
SR
1434 /* Someone screwed up their debugging */
1435 WARN_ON_ONCE(1);
2b6080f2 1436 global_trace.stop_count = 0;
b06a8301 1437 }
0f048701
SR
1438 goto out;
1439 }
1440
a2f80714 1441 /* Prevent the buffers from switching */
0b9b12c1 1442 arch_spin_lock(&global_trace.max_lock);
0f048701 1443
12883efb 1444 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1445 if (buffer)
1446 ring_buffer_record_enable(buffer);
1447
12883efb
SRRH
1448#ifdef CONFIG_TRACER_MAX_TRACE
1449 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1450 if (buffer)
1451 ring_buffer_record_enable(buffer);
12883efb 1452#endif
0f048701 1453
0b9b12c1 1454 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1455
0f048701 1456 out:
2b6080f2
SR
1457 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1458}
1459
1460static void tracing_start_tr(struct trace_array *tr)
1461{
1462 struct ring_buffer *buffer;
1463 unsigned long flags;
1464
1465 if (tracing_disabled)
1466 return;
1467
1468 /* If global, we need to also start the max tracer */
1469 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1470 return tracing_start();
1471
1472 raw_spin_lock_irqsave(&tr->start_lock, flags);
1473
1474 if (--tr->stop_count) {
1475 if (tr->stop_count < 0) {
1476 /* Someone screwed up their debugging */
1477 WARN_ON_ONCE(1);
1478 tr->stop_count = 0;
1479 }
1480 goto out;
1481 }
1482
12883efb 1483 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1484 if (buffer)
1485 ring_buffer_record_enable(buffer);
1486
1487 out:
1488 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1489}
1490
1491/**
1492 * tracing_stop - quick stop of the tracer
1493 *
1494 * Light weight way to stop tracing. Use in conjunction with
1495 * tracing_start.
1496 */
1497void tracing_stop(void)
1498{
1499 struct ring_buffer *buffer;
1500 unsigned long flags;
1501
2b6080f2
SR
1502 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1503 if (global_trace.stop_count++)
0f048701
SR
1504 goto out;
1505
a2f80714 1506 /* Prevent the buffers from switching */
0b9b12c1 1507 arch_spin_lock(&global_trace.max_lock);
a2f80714 1508
12883efb 1509 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1510 if (buffer)
1511 ring_buffer_record_disable(buffer);
1512
12883efb
SRRH
1513#ifdef CONFIG_TRACER_MAX_TRACE
1514 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1515 if (buffer)
1516 ring_buffer_record_disable(buffer);
12883efb 1517#endif
0f048701 1518
0b9b12c1 1519 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1520
0f048701 1521 out:
2b6080f2
SR
1522 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1523}
1524
1525static void tracing_stop_tr(struct trace_array *tr)
1526{
1527 struct ring_buffer *buffer;
1528 unsigned long flags;
1529
1530 /* If global, we need to also stop the max tracer */
1531 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1532 return tracing_stop();
1533
1534 raw_spin_lock_irqsave(&tr->start_lock, flags);
1535 if (tr->stop_count++)
1536 goto out;
1537
12883efb 1538 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1539 if (buffer)
1540 ring_buffer_record_disable(buffer);
1541
1542 out:
1543 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1544}
1545
e309b41d 1546void trace_stop_cmdline_recording(void);
bc0c38d1 1547
379cfdac 1548static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1549{
a635cf04 1550 unsigned pid, idx;
bc0c38d1
SR
1551
1552 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1553 return 0;
bc0c38d1
SR
1554
1555 /*
1556 * It's not the end of the world if we don't get
1557 * the lock, but we also don't want to spin
1558 * nor do we want to disable interrupts,
1559 * so if we miss here, then better luck next time.
1560 */
0199c4e6 1561 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1562 return 0;
bc0c38d1 1563
939c7a4f 1564 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1565 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1566 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1567
a635cf04
CE
1568 /*
1569 * Check whether the cmdline buffer at idx has a pid
1570 * mapped. We are going to overwrite that entry so we
1571 * need to clear the map_pid_to_cmdline. Otherwise we
1572 * would read the new comm for the old pid.
1573 */
939c7a4f 1574 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1575 if (pid != NO_CMDLINE_MAP)
939c7a4f 1576 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1577
939c7a4f
YY
1578 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1579 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1580
939c7a4f 1581 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1582 }
1583
939c7a4f 1584 set_cmdline(idx, tsk->comm);
bc0c38d1 1585
0199c4e6 1586 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1587
1588 return 1;
bc0c38d1
SR
1589}
1590
4c27e756 1591static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1592{
bc0c38d1
SR
1593 unsigned map;
1594
4ca53085
SR
1595 if (!pid) {
1596 strcpy(comm, "<idle>");
1597 return;
1598 }
bc0c38d1 1599
74bf4076
SR
1600 if (WARN_ON_ONCE(pid < 0)) {
1601 strcpy(comm, "<XXX>");
1602 return;
1603 }
1604
4ca53085
SR
1605 if (pid > PID_MAX_DEFAULT) {
1606 strcpy(comm, "<...>");
1607 return;
1608 }
bc0c38d1 1609
939c7a4f 1610 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1611 if (map != NO_CMDLINE_MAP)
939c7a4f 1612 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1613 else
1614 strcpy(comm, "<...>");
4c27e756
SRRH
1615}
1616
1617void trace_find_cmdline(int pid, char comm[])
1618{
1619 preempt_disable();
1620 arch_spin_lock(&trace_cmdline_lock);
1621
1622 __trace_find_cmdline(pid, comm);
bc0c38d1 1623
0199c4e6 1624 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1625 preempt_enable();
bc0c38d1
SR
1626}
1627
e309b41d 1628void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1629{
0fb9656d 1630 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1631 return;
1632
7ffbd48d
SR
1633 if (!__this_cpu_read(trace_cmdline_save))
1634 return;
1635
379cfdac
SRRH
1636 if (trace_save_cmdline(tsk))
1637 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1638}
1639
45dcd8b8 1640void
38697053
SR
1641tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1642 int pc)
bc0c38d1
SR
1643{
1644 struct task_struct *tsk = current;
bc0c38d1 1645
777e208d
SR
1646 entry->preempt_count = pc & 0xff;
1647 entry->pid = (tsk) ? tsk->pid : 0;
1648 entry->flags =
9244489a 1649#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1650 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1651#else
1652 TRACE_FLAG_IRQS_NOSUPPORT |
1653#endif
bc0c38d1
SR
1654 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1655 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1656 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1657 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1658}
f413cdb8 1659EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1660
e77405ad
SR
1661struct ring_buffer_event *
1662trace_buffer_lock_reserve(struct ring_buffer *buffer,
1663 int type,
1664 unsigned long len,
1665 unsigned long flags, int pc)
51a763dd
ACM
1666{
1667 struct ring_buffer_event *event;
1668
e77405ad 1669 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1670 if (event != NULL) {
1671 struct trace_entry *ent = ring_buffer_event_data(event);
1672
1673 tracing_generic_entry_update(ent, flags, pc);
1674 ent->type = type;
1675 }
1676
1677 return event;
1678}
51a763dd 1679
7ffbd48d
SR
1680void
1681__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1682{
1683 __this_cpu_write(trace_cmdline_save, true);
1684 ring_buffer_unlock_commit(buffer, event);
1685}
1686
b7f0c959
SRRH
1687void trace_buffer_unlock_commit(struct trace_array *tr,
1688 struct ring_buffer *buffer,
1689 struct ring_buffer_event *event,
1690 unsigned long flags, int pc)
51a763dd 1691{
7ffbd48d 1692 __buffer_unlock_commit(buffer, event);
51a763dd 1693
73dddbb5 1694 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
e77405ad 1695 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1696}
0d5c6e1c 1697EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1698
2c4a33ab
SRRH
1699static struct ring_buffer *temp_buffer;
1700
ccb469a1
SR
1701struct ring_buffer_event *
1702trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1703 struct trace_event_file *trace_file,
ccb469a1
SR
1704 int type, unsigned long len,
1705 unsigned long flags, int pc)
1706{
2c4a33ab
SRRH
1707 struct ring_buffer_event *entry;
1708
7f1d2f82 1709 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1710 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1711 type, len, flags, pc);
2c4a33ab
SRRH
1712 /*
1713 * If tracing is off, but we have triggers enabled
1714 * we still need to look at the event data. Use the temp_buffer
1715 * to store the trace event for the tigger to use. It's recusive
1716 * safe and will not be recorded anywhere.
1717 */
5d6ad960 1718 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1719 *current_rb = temp_buffer;
1720 entry = trace_buffer_lock_reserve(*current_rb,
1721 type, len, flags, pc);
1722 }
1723 return entry;
ccb469a1
SR
1724}
1725EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1726
ef5580d0 1727struct ring_buffer_event *
e77405ad
SR
1728trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1729 int type, unsigned long len,
ef5580d0
SR
1730 unsigned long flags, int pc)
1731{
12883efb 1732 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1733 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1734 type, len, flags, pc);
1735}
94487d6d 1736EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1737
b7f0c959
SRRH
1738void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1739 struct ring_buffer *buffer,
0d5c6e1c
SR
1740 struct ring_buffer_event *event,
1741 unsigned long flags, int pc,
1742 struct pt_regs *regs)
1fd8df2c 1743{
7ffbd48d 1744 __buffer_unlock_commit(buffer, event);
1fd8df2c 1745
73dddbb5 1746 ftrace_trace_stack(buffer, flags, 6, pc, regs);
1fd8df2c
MH
1747 ftrace_trace_userstack(buffer, flags, pc);
1748}
0d5c6e1c 1749EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1750
e77405ad
SR
1751void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1752 struct ring_buffer_event *event)
77d9f465 1753{
e77405ad 1754 ring_buffer_discard_commit(buffer, event);
ef5580d0 1755}
12acd473 1756EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1757
e309b41d 1758void
7be42151 1759trace_function(struct trace_array *tr,
38697053
SR
1760 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1761 int pc)
bc0c38d1 1762{
2425bcb9 1763 struct trace_event_call *call = &event_function;
12883efb 1764 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1765 struct ring_buffer_event *event;
777e208d 1766 struct ftrace_entry *entry;
bc0c38d1 1767
d769041f 1768 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1769 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1770 return;
1771
e77405ad 1772 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1773 flags, pc);
3928a8a2
SR
1774 if (!event)
1775 return;
1776 entry = ring_buffer_event_data(event);
777e208d
SR
1777 entry->ip = ip;
1778 entry->parent_ip = parent_ip;
e1112b4d 1779
f306cc82 1780 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1781 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1782}
1783
c0a0d0d3 1784#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1785
1786#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1787struct ftrace_stack {
1788 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1789};
1790
1791static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1792static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1793
e77405ad 1794static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1795 unsigned long flags,
1fd8df2c 1796 int skip, int pc, struct pt_regs *regs)
86387f7e 1797{
2425bcb9 1798 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1799 struct ring_buffer_event *event;
777e208d 1800 struct stack_entry *entry;
86387f7e 1801 struct stack_trace trace;
4a9bd3f1
SR
1802 int use_stack;
1803 int size = FTRACE_STACK_ENTRIES;
1804
1805 trace.nr_entries = 0;
1806 trace.skip = skip;
1807
1808 /*
1809 * Since events can happen in NMIs there's no safe way to
1810 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1811 * or NMI comes in, it will just have to use the default
1812 * FTRACE_STACK_SIZE.
1813 */
1814 preempt_disable_notrace();
1815
82146529 1816 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1817 /*
1818 * We don't need any atomic variables, just a barrier.
1819 * If an interrupt comes in, we don't care, because it would
1820 * have exited and put the counter back to what we want.
1821 * We just need a barrier to keep gcc from moving things
1822 * around.
1823 */
1824 barrier();
1825 if (use_stack == 1) {
bdffd893 1826 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1827 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1828
1829 if (regs)
1830 save_stack_trace_regs(regs, &trace);
1831 else
1832 save_stack_trace(&trace);
1833
1834 if (trace.nr_entries > size)
1835 size = trace.nr_entries;
1836 } else
1837 /* From now on, use_stack is a boolean */
1838 use_stack = 0;
1839
1840 size *= sizeof(unsigned long);
86387f7e 1841
e77405ad 1842 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1843 sizeof(*entry) + size, flags, pc);
3928a8a2 1844 if (!event)
4a9bd3f1
SR
1845 goto out;
1846 entry = ring_buffer_event_data(event);
86387f7e 1847
4a9bd3f1
SR
1848 memset(&entry->caller, 0, size);
1849
1850 if (use_stack)
1851 memcpy(&entry->caller, trace.entries,
1852 trace.nr_entries * sizeof(unsigned long));
1853 else {
1854 trace.max_entries = FTRACE_STACK_ENTRIES;
1855 trace.entries = entry->caller;
1856 if (regs)
1857 save_stack_trace_regs(regs, &trace);
1858 else
1859 save_stack_trace(&trace);
1860 }
1861
1862 entry->size = trace.nr_entries;
86387f7e 1863
f306cc82 1864 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1865 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1866
1867 out:
1868 /* Again, don't let gcc optimize things here */
1869 barrier();
82146529 1870 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1871 preempt_enable_notrace();
1872
f0a920d5
IM
1873}
1874
73dddbb5
SRRH
1875static inline void ftrace_trace_stack(struct ring_buffer *buffer,
1876 unsigned long flags,
1877 int skip, int pc, struct pt_regs *regs)
53614991 1878{
983f938a 1879 if (!(global_trace.trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
1880 return;
1881
73dddbb5 1882 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1883}
1884
c0a0d0d3
FW
1885void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1886 int pc)
38697053 1887{
12883efb 1888 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1889}
1890
03889384
SR
1891/**
1892 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1893 * @skip: Number of functions to skip (helper handlers)
03889384 1894 */
c142be8e 1895void trace_dump_stack(int skip)
03889384
SR
1896{
1897 unsigned long flags;
1898
1899 if (tracing_disabled || tracing_selftest_running)
e36c5458 1900 return;
03889384
SR
1901
1902 local_save_flags(flags);
1903
c142be8e
SRRH
1904 /*
1905 * Skip 3 more, seems to get us at the caller of
1906 * this function.
1907 */
1908 skip += 3;
1909 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1910 flags, skip, preempt_count(), NULL);
03889384
SR
1911}
1912
91e86e56
SR
1913static DEFINE_PER_CPU(int, user_stack_count);
1914
e77405ad
SR
1915void
1916ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1917{
2425bcb9 1918 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1919 struct ring_buffer_event *event;
02b67518
TE
1920 struct userstack_entry *entry;
1921 struct stack_trace trace;
02b67518 1922
983f938a 1923 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
1924 return;
1925
b6345879
SR
1926 /*
1927 * NMIs can not handle page faults, even with fix ups.
1928 * The save user stack can (and often does) fault.
1929 */
1930 if (unlikely(in_nmi()))
1931 return;
02b67518 1932
91e86e56
SR
1933 /*
1934 * prevent recursion, since the user stack tracing may
1935 * trigger other kernel events.
1936 */
1937 preempt_disable();
1938 if (__this_cpu_read(user_stack_count))
1939 goto out;
1940
1941 __this_cpu_inc(user_stack_count);
1942
e77405ad 1943 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1944 sizeof(*entry), flags, pc);
02b67518 1945 if (!event)
1dbd1951 1946 goto out_drop_count;
02b67518 1947 entry = ring_buffer_event_data(event);
02b67518 1948
48659d31 1949 entry->tgid = current->tgid;
02b67518
TE
1950 memset(&entry->caller, 0, sizeof(entry->caller));
1951
1952 trace.nr_entries = 0;
1953 trace.max_entries = FTRACE_STACK_ENTRIES;
1954 trace.skip = 0;
1955 trace.entries = entry->caller;
1956
1957 save_stack_trace_user(&trace);
f306cc82 1958 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1959 __buffer_unlock_commit(buffer, event);
91e86e56 1960
1dbd1951 1961 out_drop_count:
91e86e56 1962 __this_cpu_dec(user_stack_count);
91e86e56
SR
1963 out:
1964 preempt_enable();
02b67518
TE
1965}
1966
4fd27358
HE
1967#ifdef UNUSED
1968static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1969{
7be42151 1970 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1971}
4fd27358 1972#endif /* UNUSED */
02b67518 1973
c0a0d0d3
FW
1974#endif /* CONFIG_STACKTRACE */
1975
07d777fe
SR
1976/* created for use with alloc_percpu */
1977struct trace_buffer_struct {
1978 char buffer[TRACE_BUF_SIZE];
1979};
1980
1981static struct trace_buffer_struct *trace_percpu_buffer;
1982static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1983static struct trace_buffer_struct *trace_percpu_irq_buffer;
1984static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1985
1986/*
1987 * The buffer used is dependent on the context. There is a per cpu
1988 * buffer for normal context, softirq contex, hard irq context and
1989 * for NMI context. Thise allows for lockless recording.
1990 *
1991 * Note, if the buffers failed to be allocated, then this returns NULL
1992 */
1993static char *get_trace_buf(void)
1994{
1995 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1996
1997 /*
1998 * If we have allocated per cpu buffers, then we do not
1999 * need to do any locking.
2000 */
2001 if (in_nmi())
2002 percpu_buffer = trace_percpu_nmi_buffer;
2003 else if (in_irq())
2004 percpu_buffer = trace_percpu_irq_buffer;
2005 else if (in_softirq())
2006 percpu_buffer = trace_percpu_sirq_buffer;
2007 else
2008 percpu_buffer = trace_percpu_buffer;
2009
2010 if (!percpu_buffer)
2011 return NULL;
2012
d8a0349c 2013 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2014}
2015
2016static int alloc_percpu_trace_buffer(void)
2017{
2018 struct trace_buffer_struct *buffers;
2019 struct trace_buffer_struct *sirq_buffers;
2020 struct trace_buffer_struct *irq_buffers;
2021 struct trace_buffer_struct *nmi_buffers;
2022
2023 buffers = alloc_percpu(struct trace_buffer_struct);
2024 if (!buffers)
2025 goto err_warn;
2026
2027 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2028 if (!sirq_buffers)
2029 goto err_sirq;
2030
2031 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2032 if (!irq_buffers)
2033 goto err_irq;
2034
2035 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2036 if (!nmi_buffers)
2037 goto err_nmi;
2038
2039 trace_percpu_buffer = buffers;
2040 trace_percpu_sirq_buffer = sirq_buffers;
2041 trace_percpu_irq_buffer = irq_buffers;
2042 trace_percpu_nmi_buffer = nmi_buffers;
2043
2044 return 0;
2045
2046 err_nmi:
2047 free_percpu(irq_buffers);
2048 err_irq:
2049 free_percpu(sirq_buffers);
2050 err_sirq:
2051 free_percpu(buffers);
2052 err_warn:
2053 WARN(1, "Could not allocate percpu trace_printk buffer");
2054 return -ENOMEM;
2055}
2056
81698831
SR
2057static int buffers_allocated;
2058
07d777fe
SR
2059void trace_printk_init_buffers(void)
2060{
07d777fe
SR
2061 if (buffers_allocated)
2062 return;
2063
2064 if (alloc_percpu_trace_buffer())
2065 return;
2066
2184db46
SR
2067 /* trace_printk() is for debug use only. Don't use it in production. */
2068
69a1c994
BP
2069 pr_warning("\n");
2070 pr_warning("**********************************************************\n");
2184db46
SR
2071 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2072 pr_warning("** **\n");
2073 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2074 pr_warning("** **\n");
2075 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2076 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2077 pr_warning("** **\n");
2078 pr_warning("** If you see this message and you are not debugging **\n");
2079 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2080 pr_warning("** **\n");
2081 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2082 pr_warning("**********************************************************\n");
07d777fe 2083
b382ede6
SR
2084 /* Expand the buffers to set size */
2085 tracing_update_buffers();
2086
07d777fe 2087 buffers_allocated = 1;
81698831
SR
2088
2089 /*
2090 * trace_printk_init_buffers() can be called by modules.
2091 * If that happens, then we need to start cmdline recording
2092 * directly here. If the global_trace.buffer is already
2093 * allocated here, then this was called by module code.
2094 */
12883efb 2095 if (global_trace.trace_buffer.buffer)
81698831
SR
2096 tracing_start_cmdline_record();
2097}
2098
2099void trace_printk_start_comm(void)
2100{
2101 /* Start tracing comms if trace printk is set */
2102 if (!buffers_allocated)
2103 return;
2104 tracing_start_cmdline_record();
2105}
2106
2107static void trace_printk_start_stop_comm(int enabled)
2108{
2109 if (!buffers_allocated)
2110 return;
2111
2112 if (enabled)
2113 tracing_start_cmdline_record();
2114 else
2115 tracing_stop_cmdline_record();
07d777fe
SR
2116}
2117
769b0441 2118/**
48ead020 2119 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2120 *
2121 */
40ce74f1 2122int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2123{
2425bcb9 2124 struct trace_event_call *call = &event_bprint;
769b0441 2125 struct ring_buffer_event *event;
e77405ad 2126 struct ring_buffer *buffer;
769b0441 2127 struct trace_array *tr = &global_trace;
48ead020 2128 struct bprint_entry *entry;
769b0441 2129 unsigned long flags;
07d777fe
SR
2130 char *tbuffer;
2131 int len = 0, size, pc;
769b0441
FW
2132
2133 if (unlikely(tracing_selftest_running || tracing_disabled))
2134 return 0;
2135
2136 /* Don't pollute graph traces with trace_vprintk internals */
2137 pause_graph_tracing();
2138
2139 pc = preempt_count();
5168ae50 2140 preempt_disable_notrace();
769b0441 2141
07d777fe
SR
2142 tbuffer = get_trace_buf();
2143 if (!tbuffer) {
2144 len = 0;
769b0441 2145 goto out;
07d777fe 2146 }
769b0441 2147
07d777fe 2148 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2149
07d777fe
SR
2150 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2151 goto out;
769b0441 2152
07d777fe 2153 local_save_flags(flags);
769b0441 2154 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2155 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2156 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2157 flags, pc);
769b0441 2158 if (!event)
07d777fe 2159 goto out;
769b0441
FW
2160 entry = ring_buffer_event_data(event);
2161 entry->ip = ip;
769b0441
FW
2162 entry->fmt = fmt;
2163
07d777fe 2164 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2165 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2166 __buffer_unlock_commit(buffer, event);
73dddbb5 2167 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
d931369b 2168 }
769b0441 2169
769b0441 2170out:
5168ae50 2171 preempt_enable_notrace();
769b0441
FW
2172 unpause_graph_tracing();
2173
2174 return len;
2175}
48ead020
FW
2176EXPORT_SYMBOL_GPL(trace_vbprintk);
2177
12883efb
SRRH
2178static int
2179__trace_array_vprintk(struct ring_buffer *buffer,
2180 unsigned long ip, const char *fmt, va_list args)
48ead020 2181{
2425bcb9 2182 struct trace_event_call *call = &event_print;
48ead020 2183 struct ring_buffer_event *event;
07d777fe 2184 int len = 0, size, pc;
48ead020 2185 struct print_entry *entry;
07d777fe
SR
2186 unsigned long flags;
2187 char *tbuffer;
48ead020
FW
2188
2189 if (tracing_disabled || tracing_selftest_running)
2190 return 0;
2191
07d777fe
SR
2192 /* Don't pollute graph traces with trace_vprintk internals */
2193 pause_graph_tracing();
2194
48ead020
FW
2195 pc = preempt_count();
2196 preempt_disable_notrace();
48ead020 2197
07d777fe
SR
2198
2199 tbuffer = get_trace_buf();
2200 if (!tbuffer) {
2201 len = 0;
48ead020 2202 goto out;
07d777fe 2203 }
48ead020 2204
3558a5ac 2205 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2206
07d777fe 2207 local_save_flags(flags);
48ead020 2208 size = sizeof(*entry) + len + 1;
e77405ad 2209 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2210 flags, pc);
48ead020 2211 if (!event)
07d777fe 2212 goto out;
48ead020 2213 entry = ring_buffer_event_data(event);
c13d2f7c 2214 entry->ip = ip;
48ead020 2215
3558a5ac 2216 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2217 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2218 __buffer_unlock_commit(buffer, event);
73dddbb5 2219 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
d931369b 2220 }
48ead020
FW
2221 out:
2222 preempt_enable_notrace();
07d777fe 2223 unpause_graph_tracing();
48ead020
FW
2224
2225 return len;
2226}
659372d3 2227
12883efb
SRRH
2228int trace_array_vprintk(struct trace_array *tr,
2229 unsigned long ip, const char *fmt, va_list args)
2230{
2231 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2232}
2233
2234int trace_array_printk(struct trace_array *tr,
2235 unsigned long ip, const char *fmt, ...)
2236{
2237 int ret;
2238 va_list ap;
2239
983f938a 2240 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2241 return 0;
2242
2243 va_start(ap, fmt);
2244 ret = trace_array_vprintk(tr, ip, fmt, ap);
2245 va_end(ap);
2246 return ret;
2247}
2248
2249int trace_array_printk_buf(struct ring_buffer *buffer,
2250 unsigned long ip, const char *fmt, ...)
2251{
2252 int ret;
2253 va_list ap;
2254
983f938a 2255 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2256 return 0;
2257
2258 va_start(ap, fmt);
2259 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2260 va_end(ap);
2261 return ret;
2262}
2263
659372d3
SR
2264int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2265{
a813a159 2266 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2267}
769b0441
FW
2268EXPORT_SYMBOL_GPL(trace_vprintk);
2269
e2ac8ef5 2270static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2271{
6d158a81
SR
2272 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2273
5a90f577 2274 iter->idx++;
6d158a81
SR
2275 if (buf_iter)
2276 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2277}
2278
e309b41d 2279static struct trace_entry *
bc21b478
SR
2280peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2281 unsigned long *lost_events)
dd0e545f 2282{
3928a8a2 2283 struct ring_buffer_event *event;
6d158a81 2284 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2285
d769041f
SR
2286 if (buf_iter)
2287 event = ring_buffer_iter_peek(buf_iter, ts);
2288 else
12883efb 2289 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2290 lost_events);
d769041f 2291
4a9bd3f1
SR
2292 if (event) {
2293 iter->ent_size = ring_buffer_event_length(event);
2294 return ring_buffer_event_data(event);
2295 }
2296 iter->ent_size = 0;
2297 return NULL;
dd0e545f 2298}
d769041f 2299
dd0e545f 2300static struct trace_entry *
bc21b478
SR
2301__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2302 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2303{
12883efb 2304 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2305 struct trace_entry *ent, *next = NULL;
aa27497c 2306 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2307 int cpu_file = iter->cpu_file;
3928a8a2 2308 u64 next_ts = 0, ts;
bc0c38d1 2309 int next_cpu = -1;
12b5da34 2310 int next_size = 0;
bc0c38d1
SR
2311 int cpu;
2312
b04cc6b1
FW
2313 /*
2314 * If we are in a per_cpu trace file, don't bother by iterating over
2315 * all cpu and peek directly.
2316 */
ae3b5093 2317 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2318 if (ring_buffer_empty_cpu(buffer, cpu_file))
2319 return NULL;
bc21b478 2320 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2321 if (ent_cpu)
2322 *ent_cpu = cpu_file;
2323
2324 return ent;
2325 }
2326
ab46428c 2327 for_each_tracing_cpu(cpu) {
dd0e545f 2328
3928a8a2
SR
2329 if (ring_buffer_empty_cpu(buffer, cpu))
2330 continue;
dd0e545f 2331
bc21b478 2332 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2333
cdd31cd2
IM
2334 /*
2335 * Pick the entry with the smallest timestamp:
2336 */
3928a8a2 2337 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2338 next = ent;
2339 next_cpu = cpu;
3928a8a2 2340 next_ts = ts;
bc21b478 2341 next_lost = lost_events;
12b5da34 2342 next_size = iter->ent_size;
bc0c38d1
SR
2343 }
2344 }
2345
12b5da34
SR
2346 iter->ent_size = next_size;
2347
bc0c38d1
SR
2348 if (ent_cpu)
2349 *ent_cpu = next_cpu;
2350
3928a8a2
SR
2351 if (ent_ts)
2352 *ent_ts = next_ts;
2353
bc21b478
SR
2354 if (missing_events)
2355 *missing_events = next_lost;
2356
bc0c38d1
SR
2357 return next;
2358}
2359
dd0e545f 2360/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2361struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2362 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2363{
bc21b478 2364 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2365}
2366
2367/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2368void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2369{
bc21b478
SR
2370 iter->ent = __find_next_entry(iter, &iter->cpu,
2371 &iter->lost_events, &iter->ts);
dd0e545f 2372
3928a8a2 2373 if (iter->ent)
e2ac8ef5 2374 trace_iterator_increment(iter);
dd0e545f 2375
3928a8a2 2376 return iter->ent ? iter : NULL;
b3806b43 2377}
bc0c38d1 2378
e309b41d 2379static void trace_consume(struct trace_iterator *iter)
b3806b43 2380{
12883efb 2381 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2382 &iter->lost_events);
bc0c38d1
SR
2383}
2384
e309b41d 2385static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2386{
2387 struct trace_iterator *iter = m->private;
bc0c38d1 2388 int i = (int)*pos;
4e3c3333 2389 void *ent;
bc0c38d1 2390
a63ce5b3
SR
2391 WARN_ON_ONCE(iter->leftover);
2392
bc0c38d1
SR
2393 (*pos)++;
2394
2395 /* can't go backwards */
2396 if (iter->idx > i)
2397 return NULL;
2398
2399 if (iter->idx < 0)
955b61e5 2400 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2401 else
2402 ent = iter;
2403
2404 while (ent && iter->idx < i)
955b61e5 2405 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2406
2407 iter->pos = *pos;
2408
bc0c38d1
SR
2409 return ent;
2410}
2411
955b61e5 2412void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2413{
2f26ebd5
SR
2414 struct ring_buffer_event *event;
2415 struct ring_buffer_iter *buf_iter;
2416 unsigned long entries = 0;
2417 u64 ts;
2418
12883efb 2419 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2420
6d158a81
SR
2421 buf_iter = trace_buffer_iter(iter, cpu);
2422 if (!buf_iter)
2f26ebd5
SR
2423 return;
2424
2f26ebd5
SR
2425 ring_buffer_iter_reset(buf_iter);
2426
2427 /*
2428 * We could have the case with the max latency tracers
2429 * that a reset never took place on a cpu. This is evident
2430 * by the timestamp being before the start of the buffer.
2431 */
2432 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2433 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2434 break;
2435 entries++;
2436 ring_buffer_read(buf_iter, NULL);
2437 }
2438
12883efb 2439 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2440}
2441
d7350c3f 2442/*
d7350c3f
FW
2443 * The current tracer is copied to avoid a global locking
2444 * all around.
2445 */
bc0c38d1
SR
2446static void *s_start(struct seq_file *m, loff_t *pos)
2447{
2448 struct trace_iterator *iter = m->private;
2b6080f2 2449 struct trace_array *tr = iter->tr;
b04cc6b1 2450 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2451 void *p = NULL;
2452 loff_t l = 0;
3928a8a2 2453 int cpu;
bc0c38d1 2454
2fd196ec
HT
2455 /*
2456 * copy the tracer to avoid using a global lock all around.
2457 * iter->trace is a copy of current_trace, the pointer to the
2458 * name may be used instead of a strcmp(), as iter->trace->name
2459 * will point to the same string as current_trace->name.
2460 */
bc0c38d1 2461 mutex_lock(&trace_types_lock);
2b6080f2
SR
2462 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2463 *iter->trace = *tr->current_trace;
d7350c3f 2464 mutex_unlock(&trace_types_lock);
bc0c38d1 2465
12883efb 2466#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2467 if (iter->snapshot && iter->trace->use_max_tr)
2468 return ERR_PTR(-EBUSY);
12883efb 2469#endif
debdd57f
HT
2470
2471 if (!iter->snapshot)
2472 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2473
bc0c38d1
SR
2474 if (*pos != iter->pos) {
2475 iter->ent = NULL;
2476 iter->cpu = 0;
2477 iter->idx = -1;
2478
ae3b5093 2479 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2480 for_each_tracing_cpu(cpu)
2f26ebd5 2481 tracing_iter_reset(iter, cpu);
b04cc6b1 2482 } else
2f26ebd5 2483 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2484
ac91d854 2485 iter->leftover = 0;
bc0c38d1
SR
2486 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2487 ;
2488
2489 } else {
a63ce5b3
SR
2490 /*
2491 * If we overflowed the seq_file before, then we want
2492 * to just reuse the trace_seq buffer again.
2493 */
2494 if (iter->leftover)
2495 p = iter;
2496 else {
2497 l = *pos - 1;
2498 p = s_next(m, p, &l);
2499 }
bc0c38d1
SR
2500 }
2501
4f535968 2502 trace_event_read_lock();
7e53bd42 2503 trace_access_lock(cpu_file);
bc0c38d1
SR
2504 return p;
2505}
2506
2507static void s_stop(struct seq_file *m, void *p)
2508{
7e53bd42
LJ
2509 struct trace_iterator *iter = m->private;
2510
12883efb 2511#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2512 if (iter->snapshot && iter->trace->use_max_tr)
2513 return;
12883efb 2514#endif
debdd57f
HT
2515
2516 if (!iter->snapshot)
2517 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2518
7e53bd42 2519 trace_access_unlock(iter->cpu_file);
4f535968 2520 trace_event_read_unlock();
bc0c38d1
SR
2521}
2522
39eaf7ef 2523static void
12883efb
SRRH
2524get_total_entries(struct trace_buffer *buf,
2525 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2526{
2527 unsigned long count;
2528 int cpu;
2529
2530 *total = 0;
2531 *entries = 0;
2532
2533 for_each_tracing_cpu(cpu) {
12883efb 2534 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2535 /*
2536 * If this buffer has skipped entries, then we hold all
2537 * entries for the trace and we need to ignore the
2538 * ones before the time stamp.
2539 */
12883efb
SRRH
2540 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2541 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2542 /* total is the same as the entries */
2543 *total += count;
2544 } else
2545 *total += count +
12883efb 2546 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2547 *entries += count;
2548 }
2549}
2550
e309b41d 2551static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2552{
d79ac28f
RV
2553 seq_puts(m, "# _------=> CPU# \n"
2554 "# / _-----=> irqs-off \n"
2555 "# | / _----=> need-resched \n"
2556 "# || / _---=> hardirq/softirq \n"
2557 "# ||| / _--=> preempt-depth \n"
2558 "# |||| / delay \n"
2559 "# cmd pid ||||| time | caller \n"
2560 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2561}
2562
12883efb 2563static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2564{
39eaf7ef
SR
2565 unsigned long total;
2566 unsigned long entries;
2567
12883efb 2568 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2569 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2570 entries, total, num_online_cpus());
2571 seq_puts(m, "#\n");
2572}
2573
12883efb 2574static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2575{
12883efb 2576 print_event_info(buf, m);
d79ac28f
RV
2577 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2578 "# | | | | |\n");
bc0c38d1
SR
2579}
2580
12883efb 2581static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2582{
12883efb 2583 print_event_info(buf, m);
d79ac28f
RV
2584 seq_puts(m, "# _-----=> irqs-off\n"
2585 "# / _----=> need-resched\n"
2586 "# | / _---=> hardirq/softirq\n"
2587 "# || / _--=> preempt-depth\n"
2588 "# ||| / delay\n"
2589 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2590 "# | | | |||| | |\n");
77271ce4 2591}
bc0c38d1 2592
62b915f1 2593void
bc0c38d1
SR
2594print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2595{
983f938a 2596 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2597 struct trace_buffer *buf = iter->trace_buffer;
2598 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2599 struct tracer *type = iter->trace;
39eaf7ef
SR
2600 unsigned long entries;
2601 unsigned long total;
bc0c38d1
SR
2602 const char *name = "preemption";
2603
d840f718 2604 name = type->name;
bc0c38d1 2605
12883efb 2606 get_total_entries(buf, &total, &entries);
bc0c38d1 2607
888b55dc 2608 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2609 name, UTS_RELEASE);
888b55dc 2610 seq_puts(m, "# -----------------------------------"
bc0c38d1 2611 "---------------------------------\n");
888b55dc 2612 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2613 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2614 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2615 entries,
4c11d7ae 2616 total,
12883efb 2617 buf->cpu,
bc0c38d1
SR
2618#if defined(CONFIG_PREEMPT_NONE)
2619 "server",
2620#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2621 "desktop",
b5c21b45 2622#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2623 "preempt",
2624#else
2625 "unknown",
2626#endif
2627 /* These are reserved for later use */
2628 0, 0, 0, 0);
2629#ifdef CONFIG_SMP
2630 seq_printf(m, " #P:%d)\n", num_online_cpus());
2631#else
2632 seq_puts(m, ")\n");
2633#endif
888b55dc
KM
2634 seq_puts(m, "# -----------------\n");
2635 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2636 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2637 data->comm, data->pid,
2638 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2639 data->policy, data->rt_priority);
888b55dc 2640 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2641
2642 if (data->critical_start) {
888b55dc 2643 seq_puts(m, "# => started at: ");
214023c3
SR
2644 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2645 trace_print_seq(m, &iter->seq);
888b55dc 2646 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2647 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2648 trace_print_seq(m, &iter->seq);
8248ac05 2649 seq_puts(m, "\n#\n");
bc0c38d1
SR
2650 }
2651
888b55dc 2652 seq_puts(m, "#\n");
bc0c38d1
SR
2653}
2654
a309720c
SR
2655static void test_cpu_buff_start(struct trace_iterator *iter)
2656{
2657 struct trace_seq *s = &iter->seq;
983f938a 2658 struct trace_array *tr = iter->tr;
a309720c 2659
983f938a 2660 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
2661 return;
2662
2663 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2664 return;
2665
4462344e 2666 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2667 return;
2668
12883efb 2669 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2670 return;
2671
4462344e 2672 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2673
2674 /* Don't print started cpu buffer for the first entry of the trace */
2675 if (iter->idx > 1)
2676 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2677 iter->cpu);
a309720c
SR
2678}
2679
2c4f035f 2680static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2681{
983f938a 2682 struct trace_array *tr = iter->tr;
214023c3 2683 struct trace_seq *s = &iter->seq;
983f938a 2684 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2685 struct trace_entry *entry;
f633cef0 2686 struct trace_event *event;
bc0c38d1 2687
4e3c3333 2688 entry = iter->ent;
dd0e545f 2689
a309720c
SR
2690 test_cpu_buff_start(iter);
2691
c4a8e8be 2692 event = ftrace_find_event(entry->type);
bc0c38d1 2693
983f938a 2694 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2695 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2696 trace_print_lat_context(iter);
2697 else
2698 trace_print_context(iter);
c4a8e8be 2699 }
bc0c38d1 2700
19a7fe20
SRRH
2701 if (trace_seq_has_overflowed(s))
2702 return TRACE_TYPE_PARTIAL_LINE;
2703
268ccda0 2704 if (event)
a9a57763 2705 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2706
19a7fe20 2707 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2708
19a7fe20 2709 return trace_handle_return(s);
bc0c38d1
SR
2710}
2711
2c4f035f 2712static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 2713{
983f938a 2714 struct trace_array *tr = iter->tr;
f9896bf3
IM
2715 struct trace_seq *s = &iter->seq;
2716 struct trace_entry *entry;
f633cef0 2717 struct trace_event *event;
f9896bf3
IM
2718
2719 entry = iter->ent;
dd0e545f 2720
983f938a 2721 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
2722 trace_seq_printf(s, "%d %d %llu ",
2723 entry->pid, iter->cpu, iter->ts);
2724
2725 if (trace_seq_has_overflowed(s))
2726 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2727
f633cef0 2728 event = ftrace_find_event(entry->type);
268ccda0 2729 if (event)
a9a57763 2730 return event->funcs->raw(iter, 0, event);
d9793bd8 2731
19a7fe20 2732 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2733
19a7fe20 2734 return trace_handle_return(s);
f9896bf3
IM
2735}
2736
2c4f035f 2737static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 2738{
983f938a 2739 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
2740 struct trace_seq *s = &iter->seq;
2741 unsigned char newline = '\n';
2742 struct trace_entry *entry;
f633cef0 2743 struct trace_event *event;
5e3ca0ec
IM
2744
2745 entry = iter->ent;
dd0e545f 2746
983f938a 2747 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2748 SEQ_PUT_HEX_FIELD(s, entry->pid);
2749 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2750 SEQ_PUT_HEX_FIELD(s, iter->ts);
2751 if (trace_seq_has_overflowed(s))
2752 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2753 }
5e3ca0ec 2754
f633cef0 2755 event = ftrace_find_event(entry->type);
268ccda0 2756 if (event) {
a9a57763 2757 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2758 if (ret != TRACE_TYPE_HANDLED)
2759 return ret;
2760 }
7104f300 2761
19a7fe20 2762 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2763
19a7fe20 2764 return trace_handle_return(s);
5e3ca0ec
IM
2765}
2766
2c4f035f 2767static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 2768{
983f938a 2769 struct trace_array *tr = iter->tr;
cb0f12aa
IM
2770 struct trace_seq *s = &iter->seq;
2771 struct trace_entry *entry;
f633cef0 2772 struct trace_event *event;
cb0f12aa
IM
2773
2774 entry = iter->ent;
dd0e545f 2775
983f938a 2776 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2777 SEQ_PUT_FIELD(s, entry->pid);
2778 SEQ_PUT_FIELD(s, iter->cpu);
2779 SEQ_PUT_FIELD(s, iter->ts);
2780 if (trace_seq_has_overflowed(s))
2781 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2782 }
cb0f12aa 2783
f633cef0 2784 event = ftrace_find_event(entry->type);
a9a57763
SR
2785 return event ? event->funcs->binary(iter, 0, event) :
2786 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2787}
2788
62b915f1 2789int trace_empty(struct trace_iterator *iter)
bc0c38d1 2790{
6d158a81 2791 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2792 int cpu;
2793
9aba60fe 2794 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2795 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2796 cpu = iter->cpu_file;
6d158a81
SR
2797 buf_iter = trace_buffer_iter(iter, cpu);
2798 if (buf_iter) {
2799 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2800 return 0;
2801 } else {
12883efb 2802 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2803 return 0;
2804 }
2805 return 1;
2806 }
2807
ab46428c 2808 for_each_tracing_cpu(cpu) {
6d158a81
SR
2809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2812 return 0;
2813 } else {
12883efb 2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2815 return 0;
2816 }
bc0c38d1 2817 }
d769041f 2818
797d3712 2819 return 1;
bc0c38d1
SR
2820}
2821
4f535968 2822/* Called with trace_event_read_lock() held. */
955b61e5 2823enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2824{
983f938a
SRRH
2825 struct trace_array *tr = iter->tr;
2826 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
2827 enum print_line_t ret;
2828
19a7fe20
SRRH
2829 if (iter->lost_events) {
2830 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2831 iter->cpu, iter->lost_events);
2832 if (trace_seq_has_overflowed(&iter->seq))
2833 return TRACE_TYPE_PARTIAL_LINE;
2834 }
bc21b478 2835
2c4f035f
FW
2836 if (iter->trace && iter->trace->print_line) {
2837 ret = iter->trace->print_line(iter);
2838 if (ret != TRACE_TYPE_UNHANDLED)
2839 return ret;
2840 }
72829bc3 2841
09ae7234
SRRH
2842 if (iter->ent->type == TRACE_BPUTS &&
2843 trace_flags & TRACE_ITER_PRINTK &&
2844 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2845 return trace_print_bputs_msg_only(iter);
2846
48ead020
FW
2847 if (iter->ent->type == TRACE_BPRINT &&
2848 trace_flags & TRACE_ITER_PRINTK &&
2849 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2850 return trace_print_bprintk_msg_only(iter);
48ead020 2851
66896a85
FW
2852 if (iter->ent->type == TRACE_PRINT &&
2853 trace_flags & TRACE_ITER_PRINTK &&
2854 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2855 return trace_print_printk_msg_only(iter);
66896a85 2856
cb0f12aa
IM
2857 if (trace_flags & TRACE_ITER_BIN)
2858 return print_bin_fmt(iter);
2859
5e3ca0ec
IM
2860 if (trace_flags & TRACE_ITER_HEX)
2861 return print_hex_fmt(iter);
2862
f9896bf3
IM
2863 if (trace_flags & TRACE_ITER_RAW)
2864 return print_raw_fmt(iter);
2865
f9896bf3
IM
2866 return print_trace_fmt(iter);
2867}
2868
7e9a49ef
JO
2869void trace_latency_header(struct seq_file *m)
2870{
2871 struct trace_iterator *iter = m->private;
983f938a 2872 struct trace_array *tr = iter->tr;
7e9a49ef
JO
2873
2874 /* print nothing if the buffers are empty */
2875 if (trace_empty(iter))
2876 return;
2877
2878 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2879 print_trace_header(m, iter);
2880
983f938a 2881 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
2882 print_lat_help_header(m);
2883}
2884
62b915f1
JO
2885void trace_default_header(struct seq_file *m)
2886{
2887 struct trace_iterator *iter = m->private;
983f938a
SRRH
2888 struct trace_array *tr = iter->tr;
2889 unsigned long trace_flags = tr->trace_flags;
62b915f1 2890
f56e7f8e
JO
2891 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2892 return;
2893
62b915f1
JO
2894 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2895 /* print nothing if the buffers are empty */
2896 if (trace_empty(iter))
2897 return;
2898 print_trace_header(m, iter);
2899 if (!(trace_flags & TRACE_ITER_VERBOSE))
2900 print_lat_help_header(m);
2901 } else {
77271ce4
SR
2902 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2903 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2904 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2905 else
12883efb 2906 print_func_help_header(iter->trace_buffer, m);
77271ce4 2907 }
62b915f1
JO
2908 }
2909}
2910
e0a413f6
SR
2911static void test_ftrace_alive(struct seq_file *m)
2912{
2913 if (!ftrace_is_dead())
2914 return;
d79ac28f
RV
2915 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2916 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2917}
2918
d8741e2e 2919#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2920static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2921{
d79ac28f
RV
2922 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2923 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2924 "# Takes a snapshot of the main buffer.\n"
2925 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2926 "# (Doesn't have to be '2' works with any number that\n"
2927 "# is not a '0' or '1')\n");
d8741e2e 2928}
f1affcaa
SRRH
2929
2930static void show_snapshot_percpu_help(struct seq_file *m)
2931{
fa6f0cc7 2932 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2933#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2934 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2935 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2936#else
d79ac28f
RV
2937 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2938 "# Must use main snapshot file to allocate.\n");
f1affcaa 2939#endif
d79ac28f
RV
2940 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2941 "# (Doesn't have to be '2' works with any number that\n"
2942 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2943}
2944
d8741e2e
SRRH
2945static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2946{
45ad21ca 2947 if (iter->tr->allocated_snapshot)
fa6f0cc7 2948 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2949 else
fa6f0cc7 2950 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2951
fa6f0cc7 2952 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2953 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2954 show_snapshot_main_help(m);
2955 else
2956 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2957}
2958#else
2959/* Should never be called */
2960static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2961#endif
2962
bc0c38d1
SR
2963static int s_show(struct seq_file *m, void *v)
2964{
2965 struct trace_iterator *iter = v;
a63ce5b3 2966 int ret;
bc0c38d1
SR
2967
2968 if (iter->ent == NULL) {
2969 if (iter->tr) {
2970 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2971 seq_puts(m, "#\n");
e0a413f6 2972 test_ftrace_alive(m);
bc0c38d1 2973 }
d8741e2e
SRRH
2974 if (iter->snapshot && trace_empty(iter))
2975 print_snapshot_help(m, iter);
2976 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2977 iter->trace->print_header(m);
62b915f1
JO
2978 else
2979 trace_default_header(m);
2980
a63ce5b3
SR
2981 } else if (iter->leftover) {
2982 /*
2983 * If we filled the seq_file buffer earlier, we
2984 * want to just show it now.
2985 */
2986 ret = trace_print_seq(m, &iter->seq);
2987
2988 /* ret should this time be zero, but you never know */
2989 iter->leftover = ret;
2990
bc0c38d1 2991 } else {
f9896bf3 2992 print_trace_line(iter);
a63ce5b3
SR
2993 ret = trace_print_seq(m, &iter->seq);
2994 /*
2995 * If we overflow the seq_file buffer, then it will
2996 * ask us for this data again at start up.
2997 * Use that instead.
2998 * ret is 0 if seq_file write succeeded.
2999 * -1 otherwise.
3000 */
3001 iter->leftover = ret;
bc0c38d1
SR
3002 }
3003
3004 return 0;
3005}
3006
649e9c70
ON
3007/*
3008 * Should be used after trace_array_get(), trace_types_lock
3009 * ensures that i_cdev was already initialized.
3010 */
3011static inline int tracing_get_cpu(struct inode *inode)
3012{
3013 if (inode->i_cdev) /* See trace_create_cpu_file() */
3014 return (long)inode->i_cdev - 1;
3015 return RING_BUFFER_ALL_CPUS;
3016}
3017
88e9d34c 3018static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3019 .start = s_start,
3020 .next = s_next,
3021 .stop = s_stop,
3022 .show = s_show,
bc0c38d1
SR
3023};
3024
e309b41d 3025static struct trace_iterator *
6484c71c 3026__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3027{
6484c71c 3028 struct trace_array *tr = inode->i_private;
bc0c38d1 3029 struct trace_iterator *iter;
50e18b94 3030 int cpu;
bc0c38d1 3031
85a2f9b4
SR
3032 if (tracing_disabled)
3033 return ERR_PTR(-ENODEV);
60a11774 3034
50e18b94 3035 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3036 if (!iter)
3037 return ERR_PTR(-ENOMEM);
bc0c38d1 3038
72917235 3039 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3040 GFP_KERNEL);
93574fcc
DC
3041 if (!iter->buffer_iter)
3042 goto release;
3043
d7350c3f
FW
3044 /*
3045 * We make a copy of the current tracer to avoid concurrent
3046 * changes on it while we are reading.
3047 */
bc0c38d1 3048 mutex_lock(&trace_types_lock);
d7350c3f 3049 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3050 if (!iter->trace)
d7350c3f 3051 goto fail;
85a2f9b4 3052
2b6080f2 3053 *iter->trace = *tr->current_trace;
d7350c3f 3054
79f55997 3055 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3056 goto fail;
3057
12883efb
SRRH
3058 iter->tr = tr;
3059
3060#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3061 /* Currently only the top directory has a snapshot */
3062 if (tr->current_trace->print_max || snapshot)
12883efb 3063 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3064 else
12883efb
SRRH
3065#endif
3066 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3067 iter->snapshot = snapshot;
bc0c38d1 3068 iter->pos = -1;
6484c71c 3069 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3070 mutex_init(&iter->mutex);
bc0c38d1 3071
8bba1bf5
MM
3072 /* Notify the tracer early; before we stop tracing. */
3073 if (iter->trace && iter->trace->open)
a93751ca 3074 iter->trace->open(iter);
8bba1bf5 3075
12ef7d44 3076 /* Annotate start of buffers if we had overruns */
12883efb 3077 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3078 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3079
8be0709f 3080 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3081 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3082 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3083
debdd57f
HT
3084 /* stop the trace while dumping if we are not opening "snapshot" */
3085 if (!iter->snapshot)
2b6080f2 3086 tracing_stop_tr(tr);
2f26ebd5 3087
ae3b5093 3088 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3089 for_each_tracing_cpu(cpu) {
b04cc6b1 3090 iter->buffer_iter[cpu] =
12883efb 3091 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3092 }
3093 ring_buffer_read_prepare_sync();
3094 for_each_tracing_cpu(cpu) {
3095 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3096 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3097 }
3098 } else {
3099 cpu = iter->cpu_file;
3928a8a2 3100 iter->buffer_iter[cpu] =
12883efb 3101 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3102 ring_buffer_read_prepare_sync();
3103 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3104 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3105 }
3106
bc0c38d1
SR
3107 mutex_unlock(&trace_types_lock);
3108
bc0c38d1 3109 return iter;
3928a8a2 3110
d7350c3f 3111 fail:
3928a8a2 3112 mutex_unlock(&trace_types_lock);
d7350c3f 3113 kfree(iter->trace);
6d158a81 3114 kfree(iter->buffer_iter);
93574fcc 3115release:
50e18b94
JO
3116 seq_release_private(inode, file);
3117 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3118}
3119
3120int tracing_open_generic(struct inode *inode, struct file *filp)
3121{
60a11774
SR
3122 if (tracing_disabled)
3123 return -ENODEV;
3124
bc0c38d1
SR
3125 filp->private_data = inode->i_private;
3126 return 0;
3127}
3128
2e86421d
GB
3129bool tracing_is_disabled(void)
3130{
3131 return (tracing_disabled) ? true: false;
3132}
3133
7b85af63
SRRH
3134/*
3135 * Open and update trace_array ref count.
3136 * Must have the current trace_array passed to it.
3137 */
dcc30223 3138static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3139{
3140 struct trace_array *tr = inode->i_private;
3141
3142 if (tracing_disabled)
3143 return -ENODEV;
3144
3145 if (trace_array_get(tr) < 0)
3146 return -ENODEV;
3147
3148 filp->private_data = inode->i_private;
3149
3150 return 0;
7b85af63
SRRH
3151}
3152
4fd27358 3153static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3154{
6484c71c 3155 struct trace_array *tr = inode->i_private;
907f2784 3156 struct seq_file *m = file->private_data;
4acd4d00 3157 struct trace_iterator *iter;
3928a8a2 3158 int cpu;
bc0c38d1 3159
ff451961 3160 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3161 trace_array_put(tr);
4acd4d00 3162 return 0;
ff451961 3163 }
4acd4d00 3164
6484c71c 3165 /* Writes do not use seq_file */
4acd4d00 3166 iter = m->private;
bc0c38d1 3167 mutex_lock(&trace_types_lock);
a695cb58 3168
3928a8a2
SR
3169 for_each_tracing_cpu(cpu) {
3170 if (iter->buffer_iter[cpu])
3171 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3172 }
3173
bc0c38d1
SR
3174 if (iter->trace && iter->trace->close)
3175 iter->trace->close(iter);
3176
debdd57f
HT
3177 if (!iter->snapshot)
3178 /* reenable tracing if it was previously enabled */
2b6080f2 3179 tracing_start_tr(tr);
f77d09a3
AL
3180
3181 __trace_array_put(tr);
3182
bc0c38d1
SR
3183 mutex_unlock(&trace_types_lock);
3184
d7350c3f 3185 mutex_destroy(&iter->mutex);
b0dfa978 3186 free_cpumask_var(iter->started);
d7350c3f 3187 kfree(iter->trace);
6d158a81 3188 kfree(iter->buffer_iter);
50e18b94 3189 seq_release_private(inode, file);
ff451961 3190
bc0c38d1
SR
3191 return 0;
3192}
3193
7b85af63
SRRH
3194static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3195{
3196 struct trace_array *tr = inode->i_private;
3197
3198 trace_array_put(tr);
bc0c38d1
SR
3199 return 0;
3200}
3201
7b85af63
SRRH
3202static int tracing_single_release_tr(struct inode *inode, struct file *file)
3203{
3204 struct trace_array *tr = inode->i_private;
3205
3206 trace_array_put(tr);
3207
3208 return single_release(inode, file);
3209}
3210
bc0c38d1
SR
3211static int tracing_open(struct inode *inode, struct file *file)
3212{
6484c71c 3213 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3214 struct trace_iterator *iter;
3215 int ret = 0;
bc0c38d1 3216
ff451961
SRRH
3217 if (trace_array_get(tr) < 0)
3218 return -ENODEV;
3219
4acd4d00 3220 /* If this file was open for write, then erase contents */
6484c71c
ON
3221 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3222 int cpu = tracing_get_cpu(inode);
3223
3224 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3225 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3226 else
6484c71c 3227 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3228 }
bc0c38d1 3229
4acd4d00 3230 if (file->f_mode & FMODE_READ) {
6484c71c 3231 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3232 if (IS_ERR(iter))
3233 ret = PTR_ERR(iter);
983f938a 3234 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3235 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3236 }
ff451961
SRRH
3237
3238 if (ret < 0)
3239 trace_array_put(tr);
3240
bc0c38d1
SR
3241 return ret;
3242}
3243
607e2ea1
SRRH
3244/*
3245 * Some tracers are not suitable for instance buffers.
3246 * A tracer is always available for the global array (toplevel)
3247 * or if it explicitly states that it is.
3248 */
3249static bool
3250trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3251{
3252 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3253}
3254
3255/* Find the next tracer that this trace array may use */
3256static struct tracer *
3257get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3258{
3259 while (t && !trace_ok_for_array(t, tr))
3260 t = t->next;
3261
3262 return t;
3263}
3264
e309b41d 3265static void *
bc0c38d1
SR
3266t_next(struct seq_file *m, void *v, loff_t *pos)
3267{
607e2ea1 3268 struct trace_array *tr = m->private;
f129e965 3269 struct tracer *t = v;
bc0c38d1
SR
3270
3271 (*pos)++;
3272
3273 if (t)
607e2ea1 3274 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3275
bc0c38d1
SR
3276 return t;
3277}
3278
3279static void *t_start(struct seq_file *m, loff_t *pos)
3280{
607e2ea1 3281 struct trace_array *tr = m->private;
f129e965 3282 struct tracer *t;
bc0c38d1
SR
3283 loff_t l = 0;
3284
3285 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3286
3287 t = get_tracer_for_array(tr, trace_types);
3288 for (; t && l < *pos; t = t_next(m, t, &l))
3289 ;
bc0c38d1
SR
3290
3291 return t;
3292}
3293
3294static void t_stop(struct seq_file *m, void *p)
3295{
3296 mutex_unlock(&trace_types_lock);
3297}
3298
3299static int t_show(struct seq_file *m, void *v)
3300{
3301 struct tracer *t = v;
3302
3303 if (!t)
3304 return 0;
3305
fa6f0cc7 3306 seq_puts(m, t->name);
bc0c38d1
SR
3307 if (t->next)
3308 seq_putc(m, ' ');
3309 else
3310 seq_putc(m, '\n');
3311
3312 return 0;
3313}
3314
88e9d34c 3315static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3316 .start = t_start,
3317 .next = t_next,
3318 .stop = t_stop,
3319 .show = t_show,
bc0c38d1
SR
3320};
3321
3322static int show_traces_open(struct inode *inode, struct file *file)
3323{
607e2ea1
SRRH
3324 struct trace_array *tr = inode->i_private;
3325 struct seq_file *m;
3326 int ret;
3327
60a11774
SR
3328 if (tracing_disabled)
3329 return -ENODEV;
3330
607e2ea1
SRRH
3331 ret = seq_open(file, &show_traces_seq_ops);
3332 if (ret)
3333 return ret;
3334
3335 m = file->private_data;
3336 m->private = tr;
3337
3338 return 0;
bc0c38d1
SR
3339}
3340
4acd4d00
SR
3341static ssize_t
3342tracing_write_stub(struct file *filp, const char __user *ubuf,
3343 size_t count, loff_t *ppos)
3344{
3345 return count;
3346}
3347
098c879e 3348loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3349{
098c879e
SRRH
3350 int ret;
3351
364829b1 3352 if (file->f_mode & FMODE_READ)
098c879e 3353 ret = seq_lseek(file, offset, whence);
364829b1 3354 else
098c879e
SRRH
3355 file->f_pos = ret = 0;
3356
3357 return ret;
364829b1
SP
3358}
3359
5e2336a0 3360static const struct file_operations tracing_fops = {
4bf39a94
IM
3361 .open = tracing_open,
3362 .read = seq_read,
4acd4d00 3363 .write = tracing_write_stub,
098c879e 3364 .llseek = tracing_lseek,
4bf39a94 3365 .release = tracing_release,
bc0c38d1
SR
3366};
3367
5e2336a0 3368static const struct file_operations show_traces_fops = {
c7078de1
IM
3369 .open = show_traces_open,
3370 .read = seq_read,
3371 .release = seq_release,
b444786f 3372 .llseek = seq_lseek,
c7078de1
IM
3373};
3374
36dfe925
IM
3375/*
3376 * The tracer itself will not take this lock, but still we want
3377 * to provide a consistent cpumask to user-space:
3378 */
3379static DEFINE_MUTEX(tracing_cpumask_update_lock);
3380
3381/*
3382 * Temporary storage for the character representation of the
3383 * CPU bitmask (and one more byte for the newline):
3384 */
3385static char mask_str[NR_CPUS + 1];
3386
c7078de1
IM
3387static ssize_t
3388tracing_cpumask_read(struct file *filp, char __user *ubuf,
3389 size_t count, loff_t *ppos)
3390{
ccfe9e42 3391 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3392 int len;
c7078de1
IM
3393
3394 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3395
1a40243b
TH
3396 len = snprintf(mask_str, count, "%*pb\n",
3397 cpumask_pr_args(tr->tracing_cpumask));
3398 if (len >= count) {
36dfe925
IM
3399 count = -EINVAL;
3400 goto out_err;
3401 }
36dfe925
IM
3402 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3403
3404out_err:
c7078de1
IM
3405 mutex_unlock(&tracing_cpumask_update_lock);
3406
3407 return count;
3408}
3409
3410static ssize_t
3411tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3412 size_t count, loff_t *ppos)
3413{
ccfe9e42 3414 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3415 cpumask_var_t tracing_cpumask_new;
2b6080f2 3416 int err, cpu;
9e01c1b7
RR
3417
3418 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3419 return -ENOMEM;
c7078de1 3420
9e01c1b7 3421 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3422 if (err)
36dfe925
IM
3423 goto err_unlock;
3424
215368e8
LZ
3425 mutex_lock(&tracing_cpumask_update_lock);
3426
a5e25883 3427 local_irq_disable();
0b9b12c1 3428 arch_spin_lock(&tr->max_lock);
ab46428c 3429 for_each_tracing_cpu(cpu) {
36dfe925
IM
3430 /*
3431 * Increase/decrease the disabled counter if we are
3432 * about to flip a bit in the cpumask:
3433 */
ccfe9e42 3434 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3435 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3436 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3437 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3438 }
ccfe9e42 3439 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3440 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3441 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3442 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3443 }
3444 }
0b9b12c1 3445 arch_spin_unlock(&tr->max_lock);
a5e25883 3446 local_irq_enable();
36dfe925 3447
ccfe9e42 3448 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3449
3450 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3451 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3452
3453 return count;
36dfe925
IM
3454
3455err_unlock:
215368e8 3456 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3457
3458 return err;
c7078de1
IM
3459}
3460
5e2336a0 3461static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3462 .open = tracing_open_generic_tr,
c7078de1
IM
3463 .read = tracing_cpumask_read,
3464 .write = tracing_cpumask_write,
ccfe9e42 3465 .release = tracing_release_generic_tr,
b444786f 3466 .llseek = generic_file_llseek,
bc0c38d1
SR
3467};
3468
fdb372ed 3469static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3470{
d8e83d26 3471 struct tracer_opt *trace_opts;
2b6080f2 3472 struct trace_array *tr = m->private;
d8e83d26 3473 u32 tracer_flags;
d8e83d26 3474 int i;
adf9f195 3475
d8e83d26 3476 mutex_lock(&trace_types_lock);
2b6080f2
SR
3477 tracer_flags = tr->current_trace->flags->val;
3478 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3479
bc0c38d1 3480 for (i = 0; trace_options[i]; i++) {
983f938a 3481 if (tr->trace_flags & (1 << i))
fdb372ed 3482 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3483 else
fdb372ed 3484 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3485 }
3486
adf9f195
FW
3487 for (i = 0; trace_opts[i].name; i++) {
3488 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3489 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3490 else
fdb372ed 3491 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3492 }
d8e83d26 3493 mutex_unlock(&trace_types_lock);
adf9f195 3494
fdb372ed 3495 return 0;
bc0c38d1 3496}
bc0c38d1 3497
8c1a49ae 3498static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3499 struct tracer_flags *tracer_flags,
3500 struct tracer_opt *opts, int neg)
3501{
8c1a49ae 3502 struct tracer *trace = tr->current_trace;
8d18eaaf 3503 int ret;
bc0c38d1 3504
8c1a49ae 3505 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3506 if (ret)
3507 return ret;
3508
3509 if (neg)
3510 tracer_flags->val &= ~opts->bit;
3511 else
3512 tracer_flags->val |= opts->bit;
3513 return 0;
bc0c38d1
SR
3514}
3515
adf9f195 3516/* Try to assign a tracer specific option */
8c1a49ae 3517static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3518{
8c1a49ae 3519 struct tracer *trace = tr->current_trace;
7770841e 3520 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3521 struct tracer_opt *opts = NULL;
8d18eaaf 3522 int i;
adf9f195 3523
7770841e
Z
3524 for (i = 0; tracer_flags->opts[i].name; i++) {
3525 opts = &tracer_flags->opts[i];
adf9f195 3526
8d18eaaf 3527 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3528 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3529 }
adf9f195 3530
8d18eaaf 3531 return -EINVAL;
adf9f195
FW
3532}
3533
613f04a0
SRRH
3534/* Some tracers require overwrite to stay enabled */
3535int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3536{
3537 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3538 return -1;
3539
3540 return 0;
3541}
3542
2b6080f2 3543int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3544{
3545 /* do nothing if flag is already set */
983f938a 3546 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
3547 return 0;
3548
3549 /* Give the tracer a chance to approve the change */
2b6080f2 3550 if (tr->current_trace->flag_changed)
bf6065b5 3551 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3552 return -EINVAL;
af4617bd
SR
3553
3554 if (enabled)
983f938a 3555 tr->trace_flags |= mask;
af4617bd 3556 else
983f938a 3557 tr->trace_flags &= ~mask;
e870e9a1
LZ
3558
3559 if (mask == TRACE_ITER_RECORD_CMD)
3560 trace_event_enable_cmd_record(enabled);
750912fa 3561
80902822 3562 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3563 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3564#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3565 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3566#endif
3567 }
81698831 3568
b9f9108c 3569 if (mask == TRACE_ITER_PRINTK) {
81698831 3570 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3571 trace_printk_control(enabled);
3572 }
613f04a0
SRRH
3573
3574 return 0;
af4617bd
SR
3575}
3576
2b6080f2 3577static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3578{
8d18eaaf 3579 char *cmp;
bc0c38d1 3580 int neg = 0;
613f04a0 3581 int ret = -ENODEV;
bc0c38d1
SR
3582 int i;
3583
7bcfaf54 3584 cmp = strstrip(option);
bc0c38d1 3585
8d18eaaf 3586 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3587 neg = 1;
3588 cmp += 2;
3589 }
3590
69d34da2
SRRH
3591 mutex_lock(&trace_types_lock);
3592
bc0c38d1 3593 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3594 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3595 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3596 break;
3597 }
3598 }
adf9f195
FW
3599
3600 /* If no option could be set, test the specific tracer options */
69d34da2 3601 if (!trace_options[i])
8c1a49ae 3602 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3603
3604 mutex_unlock(&trace_types_lock);
bc0c38d1 3605
7bcfaf54
SR
3606 return ret;
3607}
3608
3609static ssize_t
3610tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3611 size_t cnt, loff_t *ppos)
3612{
2b6080f2
SR
3613 struct seq_file *m = filp->private_data;
3614 struct trace_array *tr = m->private;
7bcfaf54 3615 char buf[64];
613f04a0 3616 int ret;
7bcfaf54
SR
3617
3618 if (cnt >= sizeof(buf))
3619 return -EINVAL;
3620
3621 if (copy_from_user(&buf, ubuf, cnt))
3622 return -EFAULT;
3623
a8dd2176
SR
3624 buf[cnt] = 0;
3625
2b6080f2 3626 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3627 if (ret < 0)
3628 return ret;
7bcfaf54 3629
cf8517cf 3630 *ppos += cnt;
bc0c38d1
SR
3631
3632 return cnt;
3633}
3634
fdb372ed
LZ
3635static int tracing_trace_options_open(struct inode *inode, struct file *file)
3636{
7b85af63 3637 struct trace_array *tr = inode->i_private;
f77d09a3 3638 int ret;
7b85af63 3639
fdb372ed
LZ
3640 if (tracing_disabled)
3641 return -ENODEV;
2b6080f2 3642
7b85af63
SRRH
3643 if (trace_array_get(tr) < 0)
3644 return -ENODEV;
3645
f77d09a3
AL
3646 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3647 if (ret < 0)
3648 trace_array_put(tr);
3649
3650 return ret;
fdb372ed
LZ
3651}
3652
5e2336a0 3653static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3654 .open = tracing_trace_options_open,
3655 .read = seq_read,
3656 .llseek = seq_lseek,
7b85af63 3657 .release = tracing_single_release_tr,
ee6bce52 3658 .write = tracing_trace_options_write,
bc0c38d1
SR
3659};
3660
7bd2f24c
IM
3661static const char readme_msg[] =
3662 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3663 "# echo 0 > tracing_on : quick way to disable tracing\n"
3664 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3665 " Important files:\n"
3666 " trace\t\t\t- The static contents of the buffer\n"
3667 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3668 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3669 " current_tracer\t- function and latency tracers\n"
3670 " available_tracers\t- list of configured tracers for current_tracer\n"
3671 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3672 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3673 " trace_clock\t\t-change the clock used to order events\n"
3674 " local: Per cpu clock but may not be synced across CPUs\n"
3675 " global: Synced across CPUs but slows tracing down.\n"
3676 " counter: Not a clock, but just an increment\n"
3677 " uptime: Jiffy counter from time of boot\n"
3678 " perf: Same clock that perf events use\n"
3679#ifdef CONFIG_X86_64
3680 " x86-tsc: TSC cycle counter\n"
3681#endif
3682 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3683 " tracing_cpumask\t- Limit which CPUs to trace\n"
3684 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3685 "\t\t\t Remove sub-buffer with rmdir\n"
3686 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3687 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3688 "\t\t\t option name\n"
939c7a4f 3689 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3690#ifdef CONFIG_DYNAMIC_FTRACE
3691 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3692 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3693 "\t\t\t functions\n"
3694 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3695 "\t modules: Can select a group via module\n"
3696 "\t Format: :mod:<module-name>\n"
3697 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3698 "\t triggers: a command to perform when function is hit\n"
3699 "\t Format: <function>:<trigger>[:count]\n"
3700 "\t trigger: traceon, traceoff\n"
3701 "\t\t enable_event:<system>:<event>\n"
3702 "\t\t disable_event:<system>:<event>\n"
22f45649 3703#ifdef CONFIG_STACKTRACE
71485c45 3704 "\t\t stacktrace\n"
22f45649
SRRH
3705#endif
3706#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3707 "\t\t snapshot\n"
22f45649 3708#endif
17a280ea
SRRH
3709 "\t\t dump\n"
3710 "\t\t cpudump\n"
71485c45
SRRH
3711 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3712 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3713 "\t The first one will disable tracing every time do_fault is hit\n"
3714 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3715 "\t The first time do trap is hit and it disables tracing, the\n"
3716 "\t counter will decrement to 2. If tracing is already disabled,\n"
3717 "\t the counter will not decrement. It only decrements when the\n"
3718 "\t trigger did work\n"
3719 "\t To remove trigger without count:\n"
3720 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3721 "\t To remove trigger with a count:\n"
3722 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3723 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3724 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3725 "\t modules: Can select a group via module command :mod:\n"
3726 "\t Does not accept triggers\n"
22f45649
SRRH
3727#endif /* CONFIG_DYNAMIC_FTRACE */
3728#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3729 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3730 "\t\t (function)\n"
22f45649
SRRH
3731#endif
3732#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3733 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3734 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3735 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3736#endif
3737#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3738 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3739 "\t\t\t snapshot buffer. Read the contents for more\n"
3740 "\t\t\t information\n"
22f45649 3741#endif
991821c8 3742#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3743 " stack_trace\t\t- Shows the max stack trace when active\n"
3744 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3745 "\t\t\t Write into this file to reset the max size (trigger a\n"
3746 "\t\t\t new trace)\n"
22f45649 3747#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3748 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3749 "\t\t\t traces\n"
22f45649 3750#endif
991821c8 3751#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3752 " events/\t\t- Directory containing all trace event subsystems:\n"
3753 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3754 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3755 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3756 "\t\t\t events\n"
26f25564 3757 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3758 " events/<system>/<event>/\t- Directory containing control files for\n"
3759 "\t\t\t <event>:\n"
26f25564
TZ
3760 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3761 " filter\t\t- If set, only events passing filter are traced\n"
3762 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3763 "\t Format: <trigger>[:count][if <filter>]\n"
3764 "\t trigger: traceon, traceoff\n"
3765 "\t enable_event:<system>:<event>\n"
3766 "\t disable_event:<system>:<event>\n"
26f25564 3767#ifdef CONFIG_STACKTRACE
71485c45 3768 "\t\t stacktrace\n"
26f25564
TZ
3769#endif
3770#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3771 "\t\t snapshot\n"
26f25564 3772#endif
71485c45
SRRH
3773 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3774 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3775 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3776 "\t events/block/block_unplug/trigger\n"
3777 "\t The first disables tracing every time block_unplug is hit.\n"
3778 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3779 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3780 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3781 "\t Like function triggers, the counter is only decremented if it\n"
3782 "\t enabled or disabled tracing.\n"
3783 "\t To remove a trigger without a count:\n"
3784 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3785 "\t To remove a trigger with a count:\n"
3786 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3787 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3788;
3789
3790static ssize_t
3791tracing_readme_read(struct file *filp, char __user *ubuf,
3792 size_t cnt, loff_t *ppos)
3793{
3794 return simple_read_from_buffer(ubuf, cnt, ppos,
3795 readme_msg, strlen(readme_msg));
3796}
3797
5e2336a0 3798static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3799 .open = tracing_open_generic,
3800 .read = tracing_readme_read,
b444786f 3801 .llseek = generic_file_llseek,
7bd2f24c
IM
3802};
3803
42584c81
YY
3804static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3805{
3806 unsigned int *ptr = v;
69abe6a5 3807
42584c81
YY
3808 if (*pos || m->count)
3809 ptr++;
69abe6a5 3810
42584c81 3811 (*pos)++;
69abe6a5 3812
939c7a4f
YY
3813 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3814 ptr++) {
42584c81
YY
3815 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3816 continue;
69abe6a5 3817
42584c81
YY
3818 return ptr;
3819 }
69abe6a5 3820
42584c81
YY
3821 return NULL;
3822}
3823
3824static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3825{
3826 void *v;
3827 loff_t l = 0;
69abe6a5 3828
4c27e756
SRRH
3829 preempt_disable();
3830 arch_spin_lock(&trace_cmdline_lock);
3831
939c7a4f 3832 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3833 while (l <= *pos) {
3834 v = saved_cmdlines_next(m, v, &l);
3835 if (!v)
3836 return NULL;
69abe6a5
AP
3837 }
3838
42584c81
YY
3839 return v;
3840}
3841
3842static void saved_cmdlines_stop(struct seq_file *m, void *v)
3843{
4c27e756
SRRH
3844 arch_spin_unlock(&trace_cmdline_lock);
3845 preempt_enable();
42584c81 3846}
69abe6a5 3847
42584c81
YY
3848static int saved_cmdlines_show(struct seq_file *m, void *v)
3849{
3850 char buf[TASK_COMM_LEN];
3851 unsigned int *pid = v;
69abe6a5 3852
4c27e756 3853 __trace_find_cmdline(*pid, buf);
42584c81
YY
3854 seq_printf(m, "%d %s\n", *pid, buf);
3855 return 0;
3856}
3857
3858static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3859 .start = saved_cmdlines_start,
3860 .next = saved_cmdlines_next,
3861 .stop = saved_cmdlines_stop,
3862 .show = saved_cmdlines_show,
3863};
3864
3865static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3866{
3867 if (tracing_disabled)
3868 return -ENODEV;
3869
3870 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3871}
3872
3873static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3874 .open = tracing_saved_cmdlines_open,
3875 .read = seq_read,
3876 .llseek = seq_lseek,
3877 .release = seq_release,
69abe6a5
AP
3878};
3879
939c7a4f
YY
3880static ssize_t
3881tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3883{
3884 char buf[64];
3885 int r;
3886
3887 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3888 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3889 arch_spin_unlock(&trace_cmdline_lock);
3890
3891 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3892}
3893
3894static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3895{
3896 kfree(s->saved_cmdlines);
3897 kfree(s->map_cmdline_to_pid);
3898 kfree(s);
3899}
3900
3901static int tracing_resize_saved_cmdlines(unsigned int val)
3902{
3903 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3904
a6af8fbf 3905 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3906 if (!s)
3907 return -ENOMEM;
3908
3909 if (allocate_cmdlines_buffer(val, s) < 0) {
3910 kfree(s);
3911 return -ENOMEM;
3912 }
3913
3914 arch_spin_lock(&trace_cmdline_lock);
3915 savedcmd_temp = savedcmd;
3916 savedcmd = s;
3917 arch_spin_unlock(&trace_cmdline_lock);
3918 free_saved_cmdlines_buffer(savedcmd_temp);
3919
3920 return 0;
3921}
3922
3923static ssize_t
3924tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3925 size_t cnt, loff_t *ppos)
3926{
3927 unsigned long val;
3928 int ret;
3929
3930 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3931 if (ret)
3932 return ret;
3933
3934 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3935 if (!val || val > PID_MAX_DEFAULT)
3936 return -EINVAL;
3937
3938 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3939 if (ret < 0)
3940 return ret;
3941
3942 *ppos += cnt;
3943
3944 return cnt;
3945}
3946
3947static const struct file_operations tracing_saved_cmdlines_size_fops = {
3948 .open = tracing_open_generic,
3949 .read = tracing_saved_cmdlines_size_read,
3950 .write = tracing_saved_cmdlines_size_write,
3951};
3952
9828413d
SRRH
3953#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3954static union trace_enum_map_item *
3955update_enum_map(union trace_enum_map_item *ptr)
3956{
3957 if (!ptr->map.enum_string) {
3958 if (ptr->tail.next) {
3959 ptr = ptr->tail.next;
3960 /* Set ptr to the next real item (skip head) */
3961 ptr++;
3962 } else
3963 return NULL;
3964 }
3965 return ptr;
3966}
3967
3968static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3969{
3970 union trace_enum_map_item *ptr = v;
3971
3972 /*
3973 * Paranoid! If ptr points to end, we don't want to increment past it.
3974 * This really should never happen.
3975 */
3976 ptr = update_enum_map(ptr);
3977 if (WARN_ON_ONCE(!ptr))
3978 return NULL;
3979
3980 ptr++;
3981
3982 (*pos)++;
3983
3984 ptr = update_enum_map(ptr);
3985
3986 return ptr;
3987}
3988
3989static void *enum_map_start(struct seq_file *m, loff_t *pos)
3990{
3991 union trace_enum_map_item *v;
3992 loff_t l = 0;
3993
3994 mutex_lock(&trace_enum_mutex);
3995
3996 v = trace_enum_maps;
3997 if (v)
3998 v++;
3999
4000 while (v && l < *pos) {
4001 v = enum_map_next(m, v, &l);
4002 }
4003
4004 return v;
4005}
4006
4007static void enum_map_stop(struct seq_file *m, void *v)
4008{
4009 mutex_unlock(&trace_enum_mutex);
4010}
4011
4012static int enum_map_show(struct seq_file *m, void *v)
4013{
4014 union trace_enum_map_item *ptr = v;
4015
4016 seq_printf(m, "%s %ld (%s)\n",
4017 ptr->map.enum_string, ptr->map.enum_value,
4018 ptr->map.system);
4019
4020 return 0;
4021}
4022
4023static const struct seq_operations tracing_enum_map_seq_ops = {
4024 .start = enum_map_start,
4025 .next = enum_map_next,
4026 .stop = enum_map_stop,
4027 .show = enum_map_show,
4028};
4029
4030static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4031{
4032 if (tracing_disabled)
4033 return -ENODEV;
4034
4035 return seq_open(filp, &tracing_enum_map_seq_ops);
4036}
4037
4038static const struct file_operations tracing_enum_map_fops = {
4039 .open = tracing_enum_map_open,
4040 .read = seq_read,
4041 .llseek = seq_lseek,
4042 .release = seq_release,
4043};
4044
4045static inline union trace_enum_map_item *
4046trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4047{
4048 /* Return tail of array given the head */
4049 return ptr + ptr->head.length + 1;
4050}
4051
4052static void
4053trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4054 int len)
4055{
4056 struct trace_enum_map **stop;
4057 struct trace_enum_map **map;
4058 union trace_enum_map_item *map_array;
4059 union trace_enum_map_item *ptr;
4060
4061 stop = start + len;
4062
4063 /*
4064 * The trace_enum_maps contains the map plus a head and tail item,
4065 * where the head holds the module and length of array, and the
4066 * tail holds a pointer to the next list.
4067 */
4068 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4069 if (!map_array) {
4070 pr_warning("Unable to allocate trace enum mapping\n");
4071 return;
4072 }
4073
4074 mutex_lock(&trace_enum_mutex);
4075
4076 if (!trace_enum_maps)
4077 trace_enum_maps = map_array;
4078 else {
4079 ptr = trace_enum_maps;
4080 for (;;) {
4081 ptr = trace_enum_jmp_to_tail(ptr);
4082 if (!ptr->tail.next)
4083 break;
4084 ptr = ptr->tail.next;
4085
4086 }
4087 ptr->tail.next = map_array;
4088 }
4089 map_array->head.mod = mod;
4090 map_array->head.length = len;
4091 map_array++;
4092
4093 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4094 map_array->map = **map;
4095 map_array++;
4096 }
4097 memset(map_array, 0, sizeof(*map_array));
4098
4099 mutex_unlock(&trace_enum_mutex);
4100}
4101
4102static void trace_create_enum_file(struct dentry *d_tracer)
4103{
4104 trace_create_file("enum_map", 0444, d_tracer,
4105 NULL, &tracing_enum_map_fops);
4106}
4107
4108#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4109static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4110static inline void trace_insert_enum_map_file(struct module *mod,
4111 struct trace_enum_map **start, int len) { }
4112#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4113
4114static void trace_insert_enum_map(struct module *mod,
4115 struct trace_enum_map **start, int len)
0c564a53
SRRH
4116{
4117 struct trace_enum_map **map;
0c564a53
SRRH
4118
4119 if (len <= 0)
4120 return;
4121
4122 map = start;
4123
4124 trace_event_enum_update(map, len);
9828413d
SRRH
4125
4126 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4127}
4128
bc0c38d1
SR
4129static ssize_t
4130tracing_set_trace_read(struct file *filp, char __user *ubuf,
4131 size_t cnt, loff_t *ppos)
4132{
2b6080f2 4133 struct trace_array *tr = filp->private_data;
ee6c2c1b 4134 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4135 int r;
4136
4137 mutex_lock(&trace_types_lock);
2b6080f2 4138 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4139 mutex_unlock(&trace_types_lock);
4140
4bf39a94 4141 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4142}
4143
b6f11df2
ACM
4144int tracer_init(struct tracer *t, struct trace_array *tr)
4145{
12883efb 4146 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4147 return t->init(tr);
4148}
4149
12883efb 4150static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4151{
4152 int cpu;
737223fb 4153
438ced17 4154 for_each_tracing_cpu(cpu)
12883efb 4155 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4156}
4157
12883efb 4158#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4159/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4160static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4161 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4162{
4163 int cpu, ret = 0;
4164
4165 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4166 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4167 ret = ring_buffer_resize(trace_buf->buffer,
4168 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4169 if (ret < 0)
4170 break;
12883efb
SRRH
4171 per_cpu_ptr(trace_buf->data, cpu)->entries =
4172 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4173 }
4174 } else {
12883efb
SRRH
4175 ret = ring_buffer_resize(trace_buf->buffer,
4176 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4177 if (ret == 0)
12883efb
SRRH
4178 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4179 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4180 }
4181
4182 return ret;
4183}
12883efb 4184#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4185
2b6080f2
SR
4186static int __tracing_resize_ring_buffer(struct trace_array *tr,
4187 unsigned long size, int cpu)
73c5162a
SR
4188{
4189 int ret;
4190
4191 /*
4192 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4193 * we use the size that was given, and we can forget about
4194 * expanding it later.
73c5162a 4195 */
55034cd6 4196 ring_buffer_expanded = true;
73c5162a 4197
b382ede6 4198 /* May be called before buffers are initialized */
12883efb 4199 if (!tr->trace_buffer.buffer)
b382ede6
SR
4200 return 0;
4201
12883efb 4202 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4203 if (ret < 0)
4204 return ret;
4205
12883efb 4206#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4207 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4208 !tr->current_trace->use_max_tr)
ef710e10
KM
4209 goto out;
4210
12883efb 4211 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4212 if (ret < 0) {
12883efb
SRRH
4213 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4214 &tr->trace_buffer, cpu);
73c5162a 4215 if (r < 0) {
a123c52b
SR
4216 /*
4217 * AARGH! We are left with different
4218 * size max buffer!!!!
4219 * The max buffer is our "snapshot" buffer.
4220 * When a tracer needs a snapshot (one of the
4221 * latency tracers), it swaps the max buffer
4222 * with the saved snap shot. We succeeded to
4223 * update the size of the main buffer, but failed to
4224 * update the size of the max buffer. But when we tried
4225 * to reset the main buffer to the original size, we
4226 * failed there too. This is very unlikely to
4227 * happen, but if it does, warn and kill all
4228 * tracing.
4229 */
73c5162a
SR
4230 WARN_ON(1);
4231 tracing_disabled = 1;
4232 }
4233 return ret;
4234 }
4235
438ced17 4236 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4237 set_buffer_entries(&tr->max_buffer, size);
438ced17 4238 else
12883efb 4239 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4240
ef710e10 4241 out:
12883efb
SRRH
4242#endif /* CONFIG_TRACER_MAX_TRACE */
4243
438ced17 4244 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4245 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4246 else
12883efb 4247 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4248
4249 return ret;
4250}
4251
2b6080f2
SR
4252static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4253 unsigned long size, int cpu_id)
4f271a2a 4254{
83f40318 4255 int ret = size;
4f271a2a
VN
4256
4257 mutex_lock(&trace_types_lock);
4258
438ced17
VN
4259 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4260 /* make sure, this cpu is enabled in the mask */
4261 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4262 ret = -EINVAL;
4263 goto out;
4264 }
4265 }
4f271a2a 4266
2b6080f2 4267 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4268 if (ret < 0)
4269 ret = -ENOMEM;
4270
438ced17 4271out:
4f271a2a
VN
4272 mutex_unlock(&trace_types_lock);
4273
4274 return ret;
4275}
4276
ef710e10 4277
1852fcce
SR
4278/**
4279 * tracing_update_buffers - used by tracing facility to expand ring buffers
4280 *
4281 * To save on memory when the tracing is never used on a system with it
4282 * configured in. The ring buffers are set to a minimum size. But once
4283 * a user starts to use the tracing facility, then they need to grow
4284 * to their default size.
4285 *
4286 * This function is to be called when a tracer is about to be used.
4287 */
4288int tracing_update_buffers(void)
4289{
4290 int ret = 0;
4291
1027fcb2 4292 mutex_lock(&trace_types_lock);
1852fcce 4293 if (!ring_buffer_expanded)
2b6080f2 4294 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4295 RING_BUFFER_ALL_CPUS);
1027fcb2 4296 mutex_unlock(&trace_types_lock);
1852fcce
SR
4297
4298 return ret;
4299}
4300
577b785f
SR
4301struct trace_option_dentry;
4302
4303static struct trace_option_dentry *
2b6080f2 4304create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4305
6b450d25
SRRH
4306/*
4307 * Used to clear out the tracer before deletion of an instance.
4308 * Must have trace_types_lock held.
4309 */
4310static void tracing_set_nop(struct trace_array *tr)
4311{
4312 if (tr->current_trace == &nop_trace)
4313 return;
4314
50512ab5 4315 tr->current_trace->enabled--;
6b450d25
SRRH
4316
4317 if (tr->current_trace->reset)
4318 tr->current_trace->reset(tr);
4319
4320 tr->current_trace = &nop_trace;
4321}
4322
41d9c0be 4323static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4324{
09d23a1d
SRRH
4325 /* Only enable if the directory has been created already. */
4326 if (!tr->dir)
4327 return;
4328
4329 /* Currently, only the top instance has options */
4330 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4331 return;
4332
41d9c0be
SRRH
4333 /* Ignore if they were already created */
4334 if (t->topts)
4335 return;
4336
4337 t->topts = create_trace_option_files(tr, t);
09d23a1d
SRRH
4338}
4339
4340static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4341{
bc0c38d1 4342 struct tracer *t;
12883efb 4343#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4344 bool had_max_tr;
12883efb 4345#endif
d9e54076 4346 int ret = 0;
bc0c38d1 4347
1027fcb2
SR
4348 mutex_lock(&trace_types_lock);
4349
73c5162a 4350 if (!ring_buffer_expanded) {
2b6080f2 4351 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4352 RING_BUFFER_ALL_CPUS);
73c5162a 4353 if (ret < 0)
59f586db 4354 goto out;
73c5162a
SR
4355 ret = 0;
4356 }
4357
bc0c38d1
SR
4358 for (t = trace_types; t; t = t->next) {
4359 if (strcmp(t->name, buf) == 0)
4360 break;
4361 }
c2931e05
FW
4362 if (!t) {
4363 ret = -EINVAL;
4364 goto out;
4365 }
2b6080f2 4366 if (t == tr->current_trace)
bc0c38d1
SR
4367 goto out;
4368
607e2ea1
SRRH
4369 /* Some tracers are only allowed for the top level buffer */
4370 if (!trace_ok_for_array(t, tr)) {
4371 ret = -EINVAL;
4372 goto out;
4373 }
4374
cf6ab6d9
SRRH
4375 /* If trace pipe files are being read, we can't change the tracer */
4376 if (tr->current_trace->ref) {
4377 ret = -EBUSY;
4378 goto out;
4379 }
4380
9f029e83 4381 trace_branch_disable();
613f04a0 4382
50512ab5 4383 tr->current_trace->enabled--;
613f04a0 4384
2b6080f2
SR
4385 if (tr->current_trace->reset)
4386 tr->current_trace->reset(tr);
34600f0e 4387
12883efb 4388 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4389 tr->current_trace = &nop_trace;
34600f0e 4390
45ad21ca
SRRH
4391#ifdef CONFIG_TRACER_MAX_TRACE
4392 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4393
4394 if (had_max_tr && !t->use_max_tr) {
4395 /*
4396 * We need to make sure that the update_max_tr sees that
4397 * current_trace changed to nop_trace to keep it from
4398 * swapping the buffers after we resize it.
4399 * The update_max_tr is called from interrupts disabled
4400 * so a synchronized_sched() is sufficient.
4401 */
4402 synchronize_sched();
3209cff4 4403 free_snapshot(tr);
ef710e10 4404 }
12883efb 4405#endif
12883efb
SRRH
4406
4407#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4408 if (t->use_max_tr && !had_max_tr) {
3209cff4 4409 ret = alloc_snapshot(tr);
d60da506
HT
4410 if (ret < 0)
4411 goto out;
ef710e10 4412 }
12883efb 4413#endif
577b785f 4414
1c80025a 4415 if (t->init) {
b6f11df2 4416 ret = tracer_init(t, tr);
1c80025a
FW
4417 if (ret)
4418 goto out;
4419 }
bc0c38d1 4420
2b6080f2 4421 tr->current_trace = t;
50512ab5 4422 tr->current_trace->enabled++;
9f029e83 4423 trace_branch_enable(tr);
bc0c38d1
SR
4424 out:
4425 mutex_unlock(&trace_types_lock);
4426
d9e54076
PZ
4427 return ret;
4428}
4429
4430static ssize_t
4431tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4432 size_t cnt, loff_t *ppos)
4433{
607e2ea1 4434 struct trace_array *tr = filp->private_data;
ee6c2c1b 4435 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4436 int i;
4437 size_t ret;
e6e7a65a
FW
4438 int err;
4439
4440 ret = cnt;
d9e54076 4441
ee6c2c1b
LZ
4442 if (cnt > MAX_TRACER_SIZE)
4443 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4444
4445 if (copy_from_user(&buf, ubuf, cnt))
4446 return -EFAULT;
4447
4448 buf[cnt] = 0;
4449
4450 /* strip ending whitespace. */
4451 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4452 buf[i] = 0;
4453
607e2ea1 4454 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4455 if (err)
4456 return err;
d9e54076 4457
cf8517cf 4458 *ppos += ret;
bc0c38d1 4459
c2931e05 4460 return ret;
bc0c38d1
SR
4461}
4462
4463static ssize_t
6508fa76
SF
4464tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
bc0c38d1 4466{
bc0c38d1
SR
4467 char buf[64];
4468 int r;
4469
cffae437 4470 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4471 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4472 if (r > sizeof(buf))
4473 r = sizeof(buf);
4bf39a94 4474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4475}
4476
4477static ssize_t
6508fa76
SF
4478tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4479 size_t cnt, loff_t *ppos)
bc0c38d1 4480{
5e39841c 4481 unsigned long val;
c6caeeb1 4482 int ret;
bc0c38d1 4483
22fe9b54
PH
4484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4485 if (ret)
c6caeeb1 4486 return ret;
bc0c38d1
SR
4487
4488 *ptr = val * 1000;
4489
4490 return cnt;
4491}
4492
6508fa76
SF
4493static ssize_t
4494tracing_thresh_read(struct file *filp, char __user *ubuf,
4495 size_t cnt, loff_t *ppos)
4496{
4497 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4498}
4499
4500static ssize_t
4501tracing_thresh_write(struct file *filp, const char __user *ubuf,
4502 size_t cnt, loff_t *ppos)
4503{
4504 struct trace_array *tr = filp->private_data;
4505 int ret;
4506
4507 mutex_lock(&trace_types_lock);
4508 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4509 if (ret < 0)
4510 goto out;
4511
4512 if (tr->current_trace->update_thresh) {
4513 ret = tr->current_trace->update_thresh(tr);
4514 if (ret < 0)
4515 goto out;
4516 }
4517
4518 ret = cnt;
4519out:
4520 mutex_unlock(&trace_types_lock);
4521
4522 return ret;
4523}
4524
4525static ssize_t
4526tracing_max_lat_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4528{
4529 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4535{
4536 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4537}
4538
b3806b43
SR
4539static int tracing_open_pipe(struct inode *inode, struct file *filp)
4540{
15544209 4541 struct trace_array *tr = inode->i_private;
b3806b43 4542 struct trace_iterator *iter;
b04cc6b1 4543 int ret = 0;
b3806b43
SR
4544
4545 if (tracing_disabled)
4546 return -ENODEV;
4547
7b85af63
SRRH
4548 if (trace_array_get(tr) < 0)
4549 return -ENODEV;
4550
b04cc6b1
FW
4551 mutex_lock(&trace_types_lock);
4552
b3806b43
SR
4553 /* create a buffer to store the information to pass to userspace */
4554 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4555 if (!iter) {
4556 ret = -ENOMEM;
f77d09a3 4557 __trace_array_put(tr);
b04cc6b1
FW
4558 goto out;
4559 }
b3806b43 4560
3a161d99 4561 trace_seq_init(&iter->seq);
d716ff71 4562 iter->trace = tr->current_trace;
d7350c3f 4563
4462344e 4564 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4565 ret = -ENOMEM;
d7350c3f 4566 goto fail;
4462344e
RR
4567 }
4568
a309720c 4569 /* trace pipe does not show start of buffer */
4462344e 4570 cpumask_setall(iter->started);
a309720c 4571
983f938a 4572 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
4573 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4574
8be0709f 4575 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4576 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4577 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4578
15544209
ON
4579 iter->tr = tr;
4580 iter->trace_buffer = &tr->trace_buffer;
4581 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4582 mutex_init(&iter->mutex);
b3806b43
SR
4583 filp->private_data = iter;
4584
107bad8b
SR
4585 if (iter->trace->pipe_open)
4586 iter->trace->pipe_open(iter);
107bad8b 4587
b444786f 4588 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4589
4590 tr->current_trace->ref++;
b04cc6b1
FW
4591out:
4592 mutex_unlock(&trace_types_lock);
4593 return ret;
d7350c3f
FW
4594
4595fail:
4596 kfree(iter->trace);
4597 kfree(iter);
7b85af63 4598 __trace_array_put(tr);
d7350c3f
FW
4599 mutex_unlock(&trace_types_lock);
4600 return ret;
b3806b43
SR
4601}
4602
4603static int tracing_release_pipe(struct inode *inode, struct file *file)
4604{
4605 struct trace_iterator *iter = file->private_data;
15544209 4606 struct trace_array *tr = inode->i_private;
b3806b43 4607
b04cc6b1
FW
4608 mutex_lock(&trace_types_lock);
4609
cf6ab6d9
SRRH
4610 tr->current_trace->ref--;
4611
29bf4a5e 4612 if (iter->trace->pipe_close)
c521efd1
SR
4613 iter->trace->pipe_close(iter);
4614
b04cc6b1
FW
4615 mutex_unlock(&trace_types_lock);
4616
4462344e 4617 free_cpumask_var(iter->started);
d7350c3f 4618 mutex_destroy(&iter->mutex);
b3806b43 4619 kfree(iter);
b3806b43 4620
7b85af63
SRRH
4621 trace_array_put(tr);
4622
b3806b43
SR
4623 return 0;
4624}
4625
2a2cc8f7 4626static unsigned int
cc60cdc9 4627trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4628{
983f938a
SRRH
4629 struct trace_array *tr = iter->tr;
4630
15693458
SRRH
4631 /* Iterators are static, they should be filled or empty */
4632 if (trace_buffer_iter(iter, iter->cpu_file))
4633 return POLLIN | POLLRDNORM;
2a2cc8f7 4634
983f938a 4635 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4636 /*
4637 * Always select as readable when in blocking mode
4638 */
4639 return POLLIN | POLLRDNORM;
15693458 4640 else
12883efb 4641 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4642 filp, poll_table);
2a2cc8f7 4643}
2a2cc8f7 4644
cc60cdc9
SR
4645static unsigned int
4646tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4647{
4648 struct trace_iterator *iter = filp->private_data;
4649
4650 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4651}
4652
d716ff71 4653/* Must be called with iter->mutex held. */
ff98781b 4654static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4655{
4656 struct trace_iterator *iter = filp->private_data;
8b8b3683 4657 int ret;
b3806b43 4658
b3806b43 4659 while (trace_empty(iter)) {
2dc8f095 4660
107bad8b 4661 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4662 return -EAGAIN;
107bad8b 4663 }
2dc8f095 4664
b3806b43 4665 /*
250bfd3d 4666 * We block until we read something and tracing is disabled.
b3806b43
SR
4667 * We still block if tracing is disabled, but we have never
4668 * read anything. This allows a user to cat this file, and
4669 * then enable tracing. But after we have read something,
4670 * we give an EOF when tracing is again disabled.
4671 *
4672 * iter->pos will be 0 if we haven't read anything.
4673 */
10246fa3 4674 if (!tracing_is_on() && iter->pos)
b3806b43 4675 break;
f4874261
SRRH
4676
4677 mutex_unlock(&iter->mutex);
4678
e30f53aa 4679 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4680
4681 mutex_lock(&iter->mutex);
4682
8b8b3683
SRRH
4683 if (ret)
4684 return ret;
b3806b43
SR
4685 }
4686
ff98781b
EGM
4687 return 1;
4688}
4689
4690/*
4691 * Consumer reader.
4692 */
4693static ssize_t
4694tracing_read_pipe(struct file *filp, char __user *ubuf,
4695 size_t cnt, loff_t *ppos)
4696{
4697 struct trace_iterator *iter = filp->private_data;
4698 ssize_t sret;
4699
4700 /* return any leftover data */
4701 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4702 if (sret != -EBUSY)
4703 return sret;
4704
f9520750 4705 trace_seq_init(&iter->seq);
ff98781b 4706
d7350c3f
FW
4707 /*
4708 * Avoid more than one consumer on a single file descriptor
4709 * This is just a matter of traces coherency, the ring buffer itself
4710 * is protected.
4711 */
4712 mutex_lock(&iter->mutex);
ff98781b
EGM
4713 if (iter->trace->read) {
4714 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4715 if (sret)
4716 goto out;
4717 }
4718
4719waitagain:
4720 sret = tracing_wait_pipe(filp);
4721 if (sret <= 0)
4722 goto out;
4723
b3806b43 4724 /* stop when tracing is finished */
ff98781b
EGM
4725 if (trace_empty(iter)) {
4726 sret = 0;
107bad8b 4727 goto out;
ff98781b 4728 }
b3806b43
SR
4729
4730 if (cnt >= PAGE_SIZE)
4731 cnt = PAGE_SIZE - 1;
4732
53d0aa77 4733 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4734 memset(&iter->seq, 0,
4735 sizeof(struct trace_iterator) -
4736 offsetof(struct trace_iterator, seq));
ed5467da 4737 cpumask_clear(iter->started);
4823ed7e 4738 iter->pos = -1;
b3806b43 4739
4f535968 4740 trace_event_read_lock();
7e53bd42 4741 trace_access_lock(iter->cpu_file);
955b61e5 4742 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4743 enum print_line_t ret;
5ac48378 4744 int save_len = iter->seq.seq.len;
088b1e42 4745
f9896bf3 4746 ret = print_trace_line(iter);
2c4f035f 4747 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4748 /* don't print partial lines */
5ac48378 4749 iter->seq.seq.len = save_len;
b3806b43 4750 break;
088b1e42 4751 }
b91facc3
FW
4752 if (ret != TRACE_TYPE_NO_CONSUME)
4753 trace_consume(iter);
b3806b43 4754
5ac48378 4755 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4756 break;
ee5e51f5
JO
4757
4758 /*
4759 * Setting the full flag means we reached the trace_seq buffer
4760 * size and we should leave by partial output condition above.
4761 * One of the trace_seq_* functions is not used properly.
4762 */
4763 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4764 iter->ent->type);
b3806b43 4765 }
7e53bd42 4766 trace_access_unlock(iter->cpu_file);
4f535968 4767 trace_event_read_unlock();
b3806b43 4768
b3806b43 4769 /* Now copy what we have to the user */
6c6c2796 4770 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4771 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4772 trace_seq_init(&iter->seq);
9ff4b974
PP
4773
4774 /*
25985edc 4775 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4776 * entries, go back to wait for more entries.
4777 */
6c6c2796 4778 if (sret == -EBUSY)
9ff4b974 4779 goto waitagain;
b3806b43 4780
107bad8b 4781out:
d7350c3f 4782 mutex_unlock(&iter->mutex);
107bad8b 4783
6c6c2796 4784 return sret;
b3806b43
SR
4785}
4786
3c56819b
EGM
4787static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4788 unsigned int idx)
4789{
4790 __free_page(spd->pages[idx]);
4791}
4792
28dfef8f 4793static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4794 .can_merge = 0,
34cd4998 4795 .confirm = generic_pipe_buf_confirm,
92fdd98c 4796 .release = generic_pipe_buf_release,
34cd4998
SR
4797 .steal = generic_pipe_buf_steal,
4798 .get = generic_pipe_buf_get,
3c56819b
EGM
4799};
4800
34cd4998 4801static size_t
fa7c7f6e 4802tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4803{
4804 size_t count;
74f06bb7 4805 int save_len;
34cd4998
SR
4806 int ret;
4807
4808 /* Seq buffer is page-sized, exactly what we need. */
4809 for (;;) {
74f06bb7 4810 save_len = iter->seq.seq.len;
34cd4998 4811 ret = print_trace_line(iter);
74f06bb7
SRRH
4812
4813 if (trace_seq_has_overflowed(&iter->seq)) {
4814 iter->seq.seq.len = save_len;
34cd4998
SR
4815 break;
4816 }
74f06bb7
SRRH
4817
4818 /*
4819 * This should not be hit, because it should only
4820 * be set if the iter->seq overflowed. But check it
4821 * anyway to be safe.
4822 */
34cd4998 4823 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4824 iter->seq.seq.len = save_len;
4825 break;
4826 }
4827
5ac48378 4828 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4829 if (rem < count) {
4830 rem = 0;
4831 iter->seq.seq.len = save_len;
34cd4998
SR
4832 break;
4833 }
4834
74e7ff8c
LJ
4835 if (ret != TRACE_TYPE_NO_CONSUME)
4836 trace_consume(iter);
34cd4998 4837 rem -= count;
955b61e5 4838 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4839 rem = 0;
4840 iter->ent = NULL;
4841 break;
4842 }
4843 }
4844
4845 return rem;
4846}
4847
3c56819b
EGM
4848static ssize_t tracing_splice_read_pipe(struct file *filp,
4849 loff_t *ppos,
4850 struct pipe_inode_info *pipe,
4851 size_t len,
4852 unsigned int flags)
4853{
35f3d14d
JA
4854 struct page *pages_def[PIPE_DEF_BUFFERS];
4855 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4856 struct trace_iterator *iter = filp->private_data;
4857 struct splice_pipe_desc spd = {
35f3d14d
JA
4858 .pages = pages_def,
4859 .partial = partial_def,
34cd4998 4860 .nr_pages = 0, /* This gets updated below. */
047fe360 4861 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4862 .flags = flags,
4863 .ops = &tracing_pipe_buf_ops,
4864 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4865 };
4866 ssize_t ret;
34cd4998 4867 size_t rem;
3c56819b
EGM
4868 unsigned int i;
4869
35f3d14d
JA
4870 if (splice_grow_spd(pipe, &spd))
4871 return -ENOMEM;
4872
d7350c3f 4873 mutex_lock(&iter->mutex);
3c56819b
EGM
4874
4875 if (iter->trace->splice_read) {
4876 ret = iter->trace->splice_read(iter, filp,
4877 ppos, pipe, len, flags);
4878 if (ret)
34cd4998 4879 goto out_err;
3c56819b
EGM
4880 }
4881
4882 ret = tracing_wait_pipe(filp);
4883 if (ret <= 0)
34cd4998 4884 goto out_err;
3c56819b 4885
955b61e5 4886 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4887 ret = -EFAULT;
34cd4998 4888 goto out_err;
3c56819b
EGM
4889 }
4890
4f535968 4891 trace_event_read_lock();
7e53bd42 4892 trace_access_lock(iter->cpu_file);
4f535968 4893
3c56819b 4894 /* Fill as many pages as possible. */
a786c06d 4895 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4896 spd.pages[i] = alloc_page(GFP_KERNEL);
4897 if (!spd.pages[i])
34cd4998 4898 break;
3c56819b 4899
fa7c7f6e 4900 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4901
4902 /* Copy the data into the page, so we can start over. */
4903 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4904 page_address(spd.pages[i]),
5ac48378 4905 trace_seq_used(&iter->seq));
3c56819b 4906 if (ret < 0) {
35f3d14d 4907 __free_page(spd.pages[i]);
3c56819b
EGM
4908 break;
4909 }
35f3d14d 4910 spd.partial[i].offset = 0;
5ac48378 4911 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4912
f9520750 4913 trace_seq_init(&iter->seq);
3c56819b
EGM
4914 }
4915
7e53bd42 4916 trace_access_unlock(iter->cpu_file);
4f535968 4917 trace_event_read_unlock();
d7350c3f 4918 mutex_unlock(&iter->mutex);
3c56819b
EGM
4919
4920 spd.nr_pages = i;
4921
35f3d14d
JA
4922 ret = splice_to_pipe(pipe, &spd);
4923out:
047fe360 4924 splice_shrink_spd(&spd);
35f3d14d 4925 return ret;
3c56819b 4926
34cd4998 4927out_err:
d7350c3f 4928 mutex_unlock(&iter->mutex);
35f3d14d 4929 goto out;
3c56819b
EGM
4930}
4931
a98a3c3f
SR
4932static ssize_t
4933tracing_entries_read(struct file *filp, char __user *ubuf,
4934 size_t cnt, loff_t *ppos)
4935{
0bc392ee
ON
4936 struct inode *inode = file_inode(filp);
4937 struct trace_array *tr = inode->i_private;
4938 int cpu = tracing_get_cpu(inode);
438ced17
VN
4939 char buf[64];
4940 int r = 0;
4941 ssize_t ret;
a98a3c3f 4942
db526ca3 4943 mutex_lock(&trace_types_lock);
438ced17 4944
0bc392ee 4945 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4946 int cpu, buf_size_same;
4947 unsigned long size;
4948
4949 size = 0;
4950 buf_size_same = 1;
4951 /* check if all cpu sizes are same */
4952 for_each_tracing_cpu(cpu) {
4953 /* fill in the size from first enabled cpu */
4954 if (size == 0)
12883efb
SRRH
4955 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4956 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4957 buf_size_same = 0;
4958 break;
4959 }
4960 }
4961
4962 if (buf_size_same) {
4963 if (!ring_buffer_expanded)
4964 r = sprintf(buf, "%lu (expanded: %lu)\n",
4965 size >> 10,
4966 trace_buf_size >> 10);
4967 else
4968 r = sprintf(buf, "%lu\n", size >> 10);
4969 } else
4970 r = sprintf(buf, "X\n");
4971 } else
0bc392ee 4972 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4973
db526ca3
SR
4974 mutex_unlock(&trace_types_lock);
4975
438ced17
VN
4976 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4977 return ret;
a98a3c3f
SR
4978}
4979
4980static ssize_t
4981tracing_entries_write(struct file *filp, const char __user *ubuf,
4982 size_t cnt, loff_t *ppos)
4983{
0bc392ee
ON
4984 struct inode *inode = file_inode(filp);
4985 struct trace_array *tr = inode->i_private;
a98a3c3f 4986 unsigned long val;
4f271a2a 4987 int ret;
a98a3c3f 4988
22fe9b54
PH
4989 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4990 if (ret)
c6caeeb1 4991 return ret;
a98a3c3f
SR
4992
4993 /* must have at least 1 entry */
4994 if (!val)
4995 return -EINVAL;
4996
1696b2b0
SR
4997 /* value is in KB */
4998 val <<= 10;
0bc392ee 4999 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5000 if (ret < 0)
5001 return ret;
a98a3c3f 5002
cf8517cf 5003 *ppos += cnt;
a98a3c3f 5004
4f271a2a
VN
5005 return cnt;
5006}
bf5e6519 5007
f81ab074
VN
5008static ssize_t
5009tracing_total_entries_read(struct file *filp, char __user *ubuf,
5010 size_t cnt, loff_t *ppos)
5011{
5012 struct trace_array *tr = filp->private_data;
5013 char buf[64];
5014 int r, cpu;
5015 unsigned long size = 0, expanded_size = 0;
5016
5017 mutex_lock(&trace_types_lock);
5018 for_each_tracing_cpu(cpu) {
12883efb 5019 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5020 if (!ring_buffer_expanded)
5021 expanded_size += trace_buf_size >> 10;
5022 }
5023 if (ring_buffer_expanded)
5024 r = sprintf(buf, "%lu\n", size);
5025 else
5026 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5027 mutex_unlock(&trace_types_lock);
5028
5029 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5030}
5031
4f271a2a
VN
5032static ssize_t
5033tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5034 size_t cnt, loff_t *ppos)
5035{
5036 /*
5037 * There is no need to read what the user has written, this function
5038 * is just to make sure that there is no error when "echo" is used
5039 */
5040
5041 *ppos += cnt;
a98a3c3f
SR
5042
5043 return cnt;
5044}
5045
4f271a2a
VN
5046static int
5047tracing_free_buffer_release(struct inode *inode, struct file *filp)
5048{
2b6080f2
SR
5049 struct trace_array *tr = inode->i_private;
5050
cf30cf67 5051 /* disable tracing ? */
983f938a 5052 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5053 tracer_tracing_off(tr);
4f271a2a 5054 /* resize the ring buffer to 0 */
2b6080f2 5055 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5056
7b85af63
SRRH
5057 trace_array_put(tr);
5058
4f271a2a
VN
5059 return 0;
5060}
5061
5bf9a1ee
PP
5062static ssize_t
5063tracing_mark_write(struct file *filp, const char __user *ubuf,
5064 size_t cnt, loff_t *fpos)
5065{
d696b58c 5066 unsigned long addr = (unsigned long)ubuf;
2d71619c 5067 struct trace_array *tr = filp->private_data;
d696b58c
SR
5068 struct ring_buffer_event *event;
5069 struct ring_buffer *buffer;
5070 struct print_entry *entry;
5071 unsigned long irq_flags;
5072 struct page *pages[2];
6edb2a8a 5073 void *map_page[2];
d696b58c
SR
5074 int nr_pages = 1;
5075 ssize_t written;
d696b58c
SR
5076 int offset;
5077 int size;
5078 int len;
5079 int ret;
6edb2a8a 5080 int i;
5bf9a1ee 5081
c76f0694 5082 if (tracing_disabled)
5bf9a1ee
PP
5083 return -EINVAL;
5084
983f938a 5085 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5086 return -EINVAL;
5087
5bf9a1ee
PP
5088 if (cnt > TRACE_BUF_SIZE)
5089 cnt = TRACE_BUF_SIZE;
5090
d696b58c
SR
5091 /*
5092 * Userspace is injecting traces into the kernel trace buffer.
5093 * We want to be as non intrusive as possible.
5094 * To do so, we do not want to allocate any special buffers
5095 * or take any locks, but instead write the userspace data
5096 * straight into the ring buffer.
5097 *
5098 * First we need to pin the userspace buffer into memory,
5099 * which, most likely it is, because it just referenced it.
5100 * But there's no guarantee that it is. By using get_user_pages_fast()
5101 * and kmap_atomic/kunmap_atomic() we can get access to the
5102 * pages directly. We then write the data directly into the
5103 * ring buffer.
5104 */
5105 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5106
d696b58c
SR
5107 /* check if we cross pages */
5108 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5109 nr_pages = 2;
5110
5111 offset = addr & (PAGE_SIZE - 1);
5112 addr &= PAGE_MASK;
5113
5114 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5115 if (ret < nr_pages) {
5116 while (--ret >= 0)
5117 put_page(pages[ret]);
5118 written = -EFAULT;
5119 goto out;
5bf9a1ee 5120 }
d696b58c 5121
6edb2a8a
SR
5122 for (i = 0; i < nr_pages; i++)
5123 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5124
5125 local_save_flags(irq_flags);
5126 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5127 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5128 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5129 irq_flags, preempt_count());
5130 if (!event) {
5131 /* Ring buffer disabled, return as if not open for write */
5132 written = -EBADF;
5133 goto out_unlock;
5bf9a1ee 5134 }
d696b58c
SR
5135
5136 entry = ring_buffer_event_data(event);
5137 entry->ip = _THIS_IP_;
5138
5139 if (nr_pages == 2) {
5140 len = PAGE_SIZE - offset;
6edb2a8a
SR
5141 memcpy(&entry->buf, map_page[0] + offset, len);
5142 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5143 } else
6edb2a8a 5144 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5145
d696b58c
SR
5146 if (entry->buf[cnt - 1] != '\n') {
5147 entry->buf[cnt] = '\n';
5148 entry->buf[cnt + 1] = '\0';
5149 } else
5150 entry->buf[cnt] = '\0';
5151
7ffbd48d 5152 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5153
d696b58c 5154 written = cnt;
5bf9a1ee 5155
d696b58c 5156 *fpos += written;
1aa54bca 5157
d696b58c 5158 out_unlock:
7215853e 5159 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5160 kunmap_atomic(map_page[i]);
5161 put_page(pages[i]);
5162 }
d696b58c 5163 out:
1aa54bca 5164 return written;
5bf9a1ee
PP
5165}
5166
13f16d20 5167static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5168{
2b6080f2 5169 struct trace_array *tr = m->private;
5079f326
Z
5170 int i;
5171
5172 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5173 seq_printf(m,
5079f326 5174 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5175 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5176 i == tr->clock_id ? "]" : "");
13f16d20 5177 seq_putc(m, '\n');
5079f326 5178
13f16d20 5179 return 0;
5079f326
Z
5180}
5181
e1e232ca 5182static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5183{
5079f326
Z
5184 int i;
5185
5079f326
Z
5186 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5187 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5188 break;
5189 }
5190 if (i == ARRAY_SIZE(trace_clocks))
5191 return -EINVAL;
5192
5079f326
Z
5193 mutex_lock(&trace_types_lock);
5194
2b6080f2
SR
5195 tr->clock_id = i;
5196
12883efb 5197 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5198
60303ed3
DS
5199 /*
5200 * New clock may not be consistent with the previous clock.
5201 * Reset the buffer so that it doesn't have incomparable timestamps.
5202 */
9457158b 5203 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5204
5205#ifdef CONFIG_TRACER_MAX_TRACE
5206 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5207 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5208 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5209#endif
60303ed3 5210
5079f326
Z
5211 mutex_unlock(&trace_types_lock);
5212
e1e232ca
SR
5213 return 0;
5214}
5215
5216static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5217 size_t cnt, loff_t *fpos)
5218{
5219 struct seq_file *m = filp->private_data;
5220 struct trace_array *tr = m->private;
5221 char buf[64];
5222 const char *clockstr;
5223 int ret;
5224
5225 if (cnt >= sizeof(buf))
5226 return -EINVAL;
5227
5228 if (copy_from_user(&buf, ubuf, cnt))
5229 return -EFAULT;
5230
5231 buf[cnt] = 0;
5232
5233 clockstr = strstrip(buf);
5234
5235 ret = tracing_set_clock(tr, clockstr);
5236 if (ret)
5237 return ret;
5238
5079f326
Z
5239 *fpos += cnt;
5240
5241 return cnt;
5242}
5243
13f16d20
LZ
5244static int tracing_clock_open(struct inode *inode, struct file *file)
5245{
7b85af63
SRRH
5246 struct trace_array *tr = inode->i_private;
5247 int ret;
5248
13f16d20
LZ
5249 if (tracing_disabled)
5250 return -ENODEV;
2b6080f2 5251
7b85af63
SRRH
5252 if (trace_array_get(tr))
5253 return -ENODEV;
5254
5255 ret = single_open(file, tracing_clock_show, inode->i_private);
5256 if (ret < 0)
5257 trace_array_put(tr);
5258
5259 return ret;
13f16d20
LZ
5260}
5261
6de58e62
SRRH
5262struct ftrace_buffer_info {
5263 struct trace_iterator iter;
5264 void *spare;
5265 unsigned int read;
5266};
5267
debdd57f
HT
5268#ifdef CONFIG_TRACER_SNAPSHOT
5269static int tracing_snapshot_open(struct inode *inode, struct file *file)
5270{
6484c71c 5271 struct trace_array *tr = inode->i_private;
debdd57f 5272 struct trace_iterator *iter;
2b6080f2 5273 struct seq_file *m;
debdd57f
HT
5274 int ret = 0;
5275
ff451961
SRRH
5276 if (trace_array_get(tr) < 0)
5277 return -ENODEV;
5278
debdd57f 5279 if (file->f_mode & FMODE_READ) {
6484c71c 5280 iter = __tracing_open(inode, file, true);
debdd57f
HT
5281 if (IS_ERR(iter))
5282 ret = PTR_ERR(iter);
2b6080f2
SR
5283 } else {
5284 /* Writes still need the seq_file to hold the private data */
f77d09a3 5285 ret = -ENOMEM;
2b6080f2
SR
5286 m = kzalloc(sizeof(*m), GFP_KERNEL);
5287 if (!m)
f77d09a3 5288 goto out;
2b6080f2
SR
5289 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5290 if (!iter) {
5291 kfree(m);
f77d09a3 5292 goto out;
2b6080f2 5293 }
f77d09a3
AL
5294 ret = 0;
5295
ff451961 5296 iter->tr = tr;
6484c71c
ON
5297 iter->trace_buffer = &tr->max_buffer;
5298 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5299 m->private = iter;
5300 file->private_data = m;
debdd57f 5301 }
f77d09a3 5302out:
ff451961
SRRH
5303 if (ret < 0)
5304 trace_array_put(tr);
5305
debdd57f
HT
5306 return ret;
5307}
5308
5309static ssize_t
5310tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5311 loff_t *ppos)
5312{
2b6080f2
SR
5313 struct seq_file *m = filp->private_data;
5314 struct trace_iterator *iter = m->private;
5315 struct trace_array *tr = iter->tr;
debdd57f
HT
5316 unsigned long val;
5317 int ret;
5318
5319 ret = tracing_update_buffers();
5320 if (ret < 0)
5321 return ret;
5322
5323 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5324 if (ret)
5325 return ret;
5326
5327 mutex_lock(&trace_types_lock);
5328
2b6080f2 5329 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5330 ret = -EBUSY;
5331 goto out;
5332 }
5333
5334 switch (val) {
5335 case 0:
f1affcaa
SRRH
5336 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5337 ret = -EINVAL;
5338 break;
debdd57f 5339 }
3209cff4
SRRH
5340 if (tr->allocated_snapshot)
5341 free_snapshot(tr);
debdd57f
HT
5342 break;
5343 case 1:
f1affcaa
SRRH
5344/* Only allow per-cpu swap if the ring buffer supports it */
5345#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5346 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5347 ret = -EINVAL;
5348 break;
5349 }
5350#endif
45ad21ca 5351 if (!tr->allocated_snapshot) {
3209cff4 5352 ret = alloc_snapshot(tr);
debdd57f
HT
5353 if (ret < 0)
5354 break;
debdd57f 5355 }
debdd57f
HT
5356 local_irq_disable();
5357 /* Now, we're going to swap */
f1affcaa 5358 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5359 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5360 else
ce9bae55 5361 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5362 local_irq_enable();
5363 break;
5364 default:
45ad21ca 5365 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5366 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5367 tracing_reset_online_cpus(&tr->max_buffer);
5368 else
5369 tracing_reset(&tr->max_buffer, iter->cpu_file);
5370 }
debdd57f
HT
5371 break;
5372 }
5373
5374 if (ret >= 0) {
5375 *ppos += cnt;
5376 ret = cnt;
5377 }
5378out:
5379 mutex_unlock(&trace_types_lock);
5380 return ret;
5381}
2b6080f2
SR
5382
5383static int tracing_snapshot_release(struct inode *inode, struct file *file)
5384{
5385 struct seq_file *m = file->private_data;
ff451961
SRRH
5386 int ret;
5387
5388 ret = tracing_release(inode, file);
2b6080f2
SR
5389
5390 if (file->f_mode & FMODE_READ)
ff451961 5391 return ret;
2b6080f2
SR
5392
5393 /* If write only, the seq_file is just a stub */
5394 if (m)
5395 kfree(m->private);
5396 kfree(m);
5397
5398 return 0;
5399}
5400
6de58e62
SRRH
5401static int tracing_buffers_open(struct inode *inode, struct file *filp);
5402static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5403 size_t count, loff_t *ppos);
5404static int tracing_buffers_release(struct inode *inode, struct file *file);
5405static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5406 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5407
5408static int snapshot_raw_open(struct inode *inode, struct file *filp)
5409{
5410 struct ftrace_buffer_info *info;
5411 int ret;
5412
5413 ret = tracing_buffers_open(inode, filp);
5414 if (ret < 0)
5415 return ret;
5416
5417 info = filp->private_data;
5418
5419 if (info->iter.trace->use_max_tr) {
5420 tracing_buffers_release(inode, filp);
5421 return -EBUSY;
5422 }
5423
5424 info->iter.snapshot = true;
5425 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5426
5427 return ret;
5428}
5429
debdd57f
HT
5430#endif /* CONFIG_TRACER_SNAPSHOT */
5431
5432
6508fa76
SF
5433static const struct file_operations tracing_thresh_fops = {
5434 .open = tracing_open_generic,
5435 .read = tracing_thresh_read,
5436 .write = tracing_thresh_write,
5437 .llseek = generic_file_llseek,
5438};
5439
5e2336a0 5440static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5441 .open = tracing_open_generic,
5442 .read = tracing_max_lat_read,
5443 .write = tracing_max_lat_write,
b444786f 5444 .llseek = generic_file_llseek,
bc0c38d1
SR
5445};
5446
5e2336a0 5447static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5448 .open = tracing_open_generic,
5449 .read = tracing_set_trace_read,
5450 .write = tracing_set_trace_write,
b444786f 5451 .llseek = generic_file_llseek,
bc0c38d1
SR
5452};
5453
5e2336a0 5454static const struct file_operations tracing_pipe_fops = {
4bf39a94 5455 .open = tracing_open_pipe,
2a2cc8f7 5456 .poll = tracing_poll_pipe,
4bf39a94 5457 .read = tracing_read_pipe,
3c56819b 5458 .splice_read = tracing_splice_read_pipe,
4bf39a94 5459 .release = tracing_release_pipe,
b444786f 5460 .llseek = no_llseek,
b3806b43
SR
5461};
5462
5e2336a0 5463static const struct file_operations tracing_entries_fops = {
0bc392ee 5464 .open = tracing_open_generic_tr,
a98a3c3f
SR
5465 .read = tracing_entries_read,
5466 .write = tracing_entries_write,
b444786f 5467 .llseek = generic_file_llseek,
0bc392ee 5468 .release = tracing_release_generic_tr,
a98a3c3f
SR
5469};
5470
f81ab074 5471static const struct file_operations tracing_total_entries_fops = {
7b85af63 5472 .open = tracing_open_generic_tr,
f81ab074
VN
5473 .read = tracing_total_entries_read,
5474 .llseek = generic_file_llseek,
7b85af63 5475 .release = tracing_release_generic_tr,
f81ab074
VN
5476};
5477
4f271a2a 5478static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5479 .open = tracing_open_generic_tr,
4f271a2a
VN
5480 .write = tracing_free_buffer_write,
5481 .release = tracing_free_buffer_release,
5482};
5483
5e2336a0 5484static const struct file_operations tracing_mark_fops = {
7b85af63 5485 .open = tracing_open_generic_tr,
5bf9a1ee 5486 .write = tracing_mark_write,
b444786f 5487 .llseek = generic_file_llseek,
7b85af63 5488 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5489};
5490
5079f326 5491static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5492 .open = tracing_clock_open,
5493 .read = seq_read,
5494 .llseek = seq_lseek,
7b85af63 5495 .release = tracing_single_release_tr,
5079f326
Z
5496 .write = tracing_clock_write,
5497};
5498
debdd57f
HT
5499#ifdef CONFIG_TRACER_SNAPSHOT
5500static const struct file_operations snapshot_fops = {
5501 .open = tracing_snapshot_open,
5502 .read = seq_read,
5503 .write = tracing_snapshot_write,
098c879e 5504 .llseek = tracing_lseek,
2b6080f2 5505 .release = tracing_snapshot_release,
debdd57f 5506};
debdd57f 5507
6de58e62
SRRH
5508static const struct file_operations snapshot_raw_fops = {
5509 .open = snapshot_raw_open,
5510 .read = tracing_buffers_read,
5511 .release = tracing_buffers_release,
5512 .splice_read = tracing_buffers_splice_read,
5513 .llseek = no_llseek,
2cadf913
SR
5514};
5515
6de58e62
SRRH
5516#endif /* CONFIG_TRACER_SNAPSHOT */
5517
2cadf913
SR
5518static int tracing_buffers_open(struct inode *inode, struct file *filp)
5519{
46ef2be0 5520 struct trace_array *tr = inode->i_private;
2cadf913 5521 struct ftrace_buffer_info *info;
7b85af63 5522 int ret;
2cadf913
SR
5523
5524 if (tracing_disabled)
5525 return -ENODEV;
5526
7b85af63
SRRH
5527 if (trace_array_get(tr) < 0)
5528 return -ENODEV;
5529
2cadf913 5530 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5531 if (!info) {
5532 trace_array_put(tr);
2cadf913 5533 return -ENOMEM;
7b85af63 5534 }
2cadf913 5535
a695cb58
SRRH
5536 mutex_lock(&trace_types_lock);
5537
cc60cdc9 5538 info->iter.tr = tr;
46ef2be0 5539 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5540 info->iter.trace = tr->current_trace;
12883efb 5541 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5542 info->spare = NULL;
2cadf913 5543 /* Force reading ring buffer for first read */
cc60cdc9 5544 info->read = (unsigned int)-1;
2cadf913
SR
5545
5546 filp->private_data = info;
5547
cf6ab6d9
SRRH
5548 tr->current_trace->ref++;
5549
a695cb58
SRRH
5550 mutex_unlock(&trace_types_lock);
5551
7b85af63
SRRH
5552 ret = nonseekable_open(inode, filp);
5553 if (ret < 0)
5554 trace_array_put(tr);
5555
5556 return ret;
2cadf913
SR
5557}
5558
cc60cdc9
SR
5559static unsigned int
5560tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5561{
5562 struct ftrace_buffer_info *info = filp->private_data;
5563 struct trace_iterator *iter = &info->iter;
5564
5565 return trace_poll(iter, filp, poll_table);
5566}
5567
2cadf913
SR
5568static ssize_t
5569tracing_buffers_read(struct file *filp, char __user *ubuf,
5570 size_t count, loff_t *ppos)
5571{
5572 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5573 struct trace_iterator *iter = &info->iter;
2cadf913 5574 ssize_t ret;
6de58e62 5575 ssize_t size;
2cadf913 5576
2dc5d12b
SR
5577 if (!count)
5578 return 0;
5579
6de58e62 5580#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5581 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5582 return -EBUSY;
6de58e62
SRRH
5583#endif
5584
ddd538f3 5585 if (!info->spare)
12883efb
SRRH
5586 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5587 iter->cpu_file);
ddd538f3 5588 if (!info->spare)
d716ff71 5589 return -ENOMEM;
ddd538f3 5590
2cadf913
SR
5591 /* Do we have previous read data to read? */
5592 if (info->read < PAGE_SIZE)
5593 goto read;
5594
b627344f 5595 again:
cc60cdc9 5596 trace_access_lock(iter->cpu_file);
12883efb 5597 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5598 &info->spare,
5599 count,
cc60cdc9
SR
5600 iter->cpu_file, 0);
5601 trace_access_unlock(iter->cpu_file);
2cadf913 5602
b627344f
SR
5603 if (ret < 0) {
5604 if (trace_empty(iter)) {
d716ff71
SRRH
5605 if ((filp->f_flags & O_NONBLOCK))
5606 return -EAGAIN;
5607
e30f53aa 5608 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5609 if (ret)
5610 return ret;
5611
b627344f
SR
5612 goto again;
5613 }
d716ff71 5614 return 0;
b627344f 5615 }
436fc280 5616
436fc280 5617 info->read = 0;
b627344f 5618 read:
2cadf913
SR
5619 size = PAGE_SIZE - info->read;
5620 if (size > count)
5621 size = count;
5622
5623 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5624 if (ret == size)
5625 return -EFAULT;
5626
2dc5d12b
SR
5627 size -= ret;
5628
2cadf913
SR
5629 *ppos += size;
5630 info->read += size;
5631
5632 return size;
5633}
5634
5635static int tracing_buffers_release(struct inode *inode, struct file *file)
5636{
5637 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5638 struct trace_iterator *iter = &info->iter;
2cadf913 5639
a695cb58
SRRH
5640 mutex_lock(&trace_types_lock);
5641
cf6ab6d9
SRRH
5642 iter->tr->current_trace->ref--;
5643
ff451961 5644 __trace_array_put(iter->tr);
2cadf913 5645
ddd538f3 5646 if (info->spare)
12883efb 5647 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5648 kfree(info);
5649
a695cb58
SRRH
5650 mutex_unlock(&trace_types_lock);
5651
2cadf913
SR
5652 return 0;
5653}
5654
5655struct buffer_ref {
5656 struct ring_buffer *buffer;
5657 void *page;
5658 int ref;
5659};
5660
5661static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5662 struct pipe_buffer *buf)
5663{
5664 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5665
5666 if (--ref->ref)
5667 return;
5668
5669 ring_buffer_free_read_page(ref->buffer, ref->page);
5670 kfree(ref);
5671 buf->private = 0;
5672}
5673
2cadf913
SR
5674static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5675 struct pipe_buffer *buf)
5676{
5677 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5678
5679 ref->ref++;
5680}
5681
5682/* Pipe buffer operations for a buffer. */
28dfef8f 5683static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5684 .can_merge = 0,
2cadf913
SR
5685 .confirm = generic_pipe_buf_confirm,
5686 .release = buffer_pipe_buf_release,
d55cb6cf 5687 .steal = generic_pipe_buf_steal,
2cadf913
SR
5688 .get = buffer_pipe_buf_get,
5689};
5690
5691/*
5692 * Callback from splice_to_pipe(), if we need to release some pages
5693 * at the end of the spd in case we error'ed out in filling the pipe.
5694 */
5695static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5696{
5697 struct buffer_ref *ref =
5698 (struct buffer_ref *)spd->partial[i].private;
5699
5700 if (--ref->ref)
5701 return;
5702
5703 ring_buffer_free_read_page(ref->buffer, ref->page);
5704 kfree(ref);
5705 spd->partial[i].private = 0;
5706}
5707
5708static ssize_t
5709tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5710 struct pipe_inode_info *pipe, size_t len,
5711 unsigned int flags)
5712{
5713 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5714 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5715 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5716 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5717 struct splice_pipe_desc spd = {
35f3d14d
JA
5718 .pages = pages_def,
5719 .partial = partial_def,
047fe360 5720 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5721 .flags = flags,
5722 .ops = &buffer_pipe_buf_ops,
5723 .spd_release = buffer_spd_release,
5724 };
5725 struct buffer_ref *ref;
93459c6c 5726 int entries, size, i;
07906da7 5727 ssize_t ret = 0;
2cadf913 5728
6de58e62 5729#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5730 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5731 return -EBUSY;
6de58e62
SRRH
5732#endif
5733
d716ff71
SRRH
5734 if (splice_grow_spd(pipe, &spd))
5735 return -ENOMEM;
35f3d14d 5736
d716ff71
SRRH
5737 if (*ppos & (PAGE_SIZE - 1))
5738 return -EINVAL;
93cfb3c9
LJ
5739
5740 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5741 if (len < PAGE_SIZE)
5742 return -EINVAL;
93cfb3c9
LJ
5743 len &= PAGE_MASK;
5744 }
5745
cc60cdc9
SR
5746 again:
5747 trace_access_lock(iter->cpu_file);
12883efb 5748 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5749
a786c06d 5750 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5751 struct page *page;
5752 int r;
5753
5754 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5755 if (!ref) {
5756 ret = -ENOMEM;
2cadf913 5757 break;
07906da7 5758 }
2cadf913 5759
7267fa68 5760 ref->ref = 1;
12883efb 5761 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5762 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5763 if (!ref->page) {
07906da7 5764 ret = -ENOMEM;
2cadf913
SR
5765 kfree(ref);
5766 break;
5767 }
5768
5769 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5770 len, iter->cpu_file, 1);
2cadf913 5771 if (r < 0) {
7ea59064 5772 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5773 kfree(ref);
5774 break;
5775 }
5776
5777 /*
5778 * zero out any left over data, this is going to
5779 * user land.
5780 */
5781 size = ring_buffer_page_len(ref->page);
5782 if (size < PAGE_SIZE)
5783 memset(ref->page + size, 0, PAGE_SIZE - size);
5784
5785 page = virt_to_page(ref->page);
5786
5787 spd.pages[i] = page;
5788 spd.partial[i].len = PAGE_SIZE;
5789 spd.partial[i].offset = 0;
5790 spd.partial[i].private = (unsigned long)ref;
5791 spd.nr_pages++;
93cfb3c9 5792 *ppos += PAGE_SIZE;
93459c6c 5793
12883efb 5794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5795 }
5796
cc60cdc9 5797 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5798 spd.nr_pages = i;
5799
5800 /* did we read anything? */
5801 if (!spd.nr_pages) {
07906da7 5802 if (ret)
d716ff71
SRRH
5803 return ret;
5804
5805 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5806 return -EAGAIN;
07906da7 5807
e30f53aa 5808 ret = wait_on_pipe(iter, true);
8b8b3683 5809 if (ret)
d716ff71 5810 return ret;
e30f53aa 5811
cc60cdc9 5812 goto again;
2cadf913
SR
5813 }
5814
5815 ret = splice_to_pipe(pipe, &spd);
047fe360 5816 splice_shrink_spd(&spd);
6de58e62 5817
2cadf913
SR
5818 return ret;
5819}
5820
5821static const struct file_operations tracing_buffers_fops = {
5822 .open = tracing_buffers_open,
5823 .read = tracing_buffers_read,
cc60cdc9 5824 .poll = tracing_buffers_poll,
2cadf913
SR
5825 .release = tracing_buffers_release,
5826 .splice_read = tracing_buffers_splice_read,
5827 .llseek = no_llseek,
5828};
5829
c8d77183
SR
5830static ssize_t
5831tracing_stats_read(struct file *filp, char __user *ubuf,
5832 size_t count, loff_t *ppos)
5833{
4d3435b8
ON
5834 struct inode *inode = file_inode(filp);
5835 struct trace_array *tr = inode->i_private;
12883efb 5836 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5837 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5838 struct trace_seq *s;
5839 unsigned long cnt;
c64e148a
VN
5840 unsigned long long t;
5841 unsigned long usec_rem;
c8d77183 5842
e4f2d10f 5843 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5844 if (!s)
a646365c 5845 return -ENOMEM;
c8d77183
SR
5846
5847 trace_seq_init(s);
5848
12883efb 5849 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5850 trace_seq_printf(s, "entries: %ld\n", cnt);
5851
12883efb 5852 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5853 trace_seq_printf(s, "overrun: %ld\n", cnt);
5854
12883efb 5855 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5856 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5857
12883efb 5858 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5859 trace_seq_printf(s, "bytes: %ld\n", cnt);
5860
58e8eedf 5861 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5862 /* local or global for trace_clock */
12883efb 5863 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5864 usec_rem = do_div(t, USEC_PER_SEC);
5865 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5866 t, usec_rem);
5867
12883efb 5868 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5869 usec_rem = do_div(t, USEC_PER_SEC);
5870 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5871 } else {
5872 /* counter or tsc mode for trace_clock */
5873 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5874 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5875
11043d8b 5876 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5877 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5878 }
c64e148a 5879
12883efb 5880 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5881 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5882
12883efb 5883 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5884 trace_seq_printf(s, "read events: %ld\n", cnt);
5885
5ac48378
SRRH
5886 count = simple_read_from_buffer(ubuf, count, ppos,
5887 s->buffer, trace_seq_used(s));
c8d77183
SR
5888
5889 kfree(s);
5890
5891 return count;
5892}
5893
5894static const struct file_operations tracing_stats_fops = {
4d3435b8 5895 .open = tracing_open_generic_tr,
c8d77183 5896 .read = tracing_stats_read,
b444786f 5897 .llseek = generic_file_llseek,
4d3435b8 5898 .release = tracing_release_generic_tr,
c8d77183
SR
5899};
5900
bc0c38d1
SR
5901#ifdef CONFIG_DYNAMIC_FTRACE
5902
b807c3d0
SR
5903int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5904{
5905 return 0;
5906}
5907
bc0c38d1 5908static ssize_t
b807c3d0 5909tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5910 size_t cnt, loff_t *ppos)
5911{
a26a2a27
SR
5912 static char ftrace_dyn_info_buffer[1024];
5913 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5914 unsigned long *p = filp->private_data;
b807c3d0 5915 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5916 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5917 int r;
5918
b807c3d0
SR
5919 mutex_lock(&dyn_info_mutex);
5920 r = sprintf(buf, "%ld ", *p);
4bf39a94 5921
a26a2a27 5922 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5923 buf[r++] = '\n';
5924
5925 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5926
5927 mutex_unlock(&dyn_info_mutex);
5928
5929 return r;
bc0c38d1
SR
5930}
5931
5e2336a0 5932static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5933 .open = tracing_open_generic,
b807c3d0 5934 .read = tracing_read_dyn_info,
b444786f 5935 .llseek = generic_file_llseek,
bc0c38d1 5936};
77fd5c15 5937#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5938
77fd5c15
SRRH
5939#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5940static void
5941ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5942{
5943 tracing_snapshot();
5944}
bc0c38d1 5945
77fd5c15
SRRH
5946static void
5947ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5948{
77fd5c15
SRRH
5949 unsigned long *count = (long *)data;
5950
5951 if (!*count)
5952 return;
bc0c38d1 5953
77fd5c15
SRRH
5954 if (*count != -1)
5955 (*count)--;
5956
5957 tracing_snapshot();
5958}
5959
5960static int
5961ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5962 struct ftrace_probe_ops *ops, void *data)
5963{
5964 long count = (long)data;
5965
5966 seq_printf(m, "%ps:", (void *)ip);
5967
fa6f0cc7 5968 seq_puts(m, "snapshot");
77fd5c15
SRRH
5969
5970 if (count == -1)
fa6f0cc7 5971 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5972 else
5973 seq_printf(m, ":count=%ld\n", count);
5974
5975 return 0;
5976}
5977
5978static struct ftrace_probe_ops snapshot_probe_ops = {
5979 .func = ftrace_snapshot,
5980 .print = ftrace_snapshot_print,
5981};
5982
5983static struct ftrace_probe_ops snapshot_count_probe_ops = {
5984 .func = ftrace_count_snapshot,
5985 .print = ftrace_snapshot_print,
5986};
5987
5988static int
5989ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5990 char *glob, char *cmd, char *param, int enable)
5991{
5992 struct ftrace_probe_ops *ops;
5993 void *count = (void *)-1;
5994 char *number;
5995 int ret;
5996
5997 /* hash funcs only work with set_ftrace_filter */
5998 if (!enable)
5999 return -EINVAL;
6000
6001 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6002
6003 if (glob[0] == '!') {
6004 unregister_ftrace_function_probe_func(glob+1, ops);
6005 return 0;
6006 }
6007
6008 if (!param)
6009 goto out_reg;
6010
6011 number = strsep(&param, ":");
6012
6013 if (!strlen(number))
6014 goto out_reg;
6015
6016 /*
6017 * We use the callback data field (which is a pointer)
6018 * as our counter.
6019 */
6020 ret = kstrtoul(number, 0, (unsigned long *)&count);
6021 if (ret)
6022 return ret;
6023
6024 out_reg:
6025 ret = register_ftrace_function_probe(glob, ops, count);
6026
6027 if (ret >= 0)
6028 alloc_snapshot(&global_trace);
6029
6030 return ret < 0 ? ret : 0;
6031}
6032
6033static struct ftrace_func_command ftrace_snapshot_cmd = {
6034 .name = "snapshot",
6035 .func = ftrace_trace_snapshot_callback,
6036};
6037
38de93ab 6038static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6039{
6040 return register_ftrace_command(&ftrace_snapshot_cmd);
6041}
6042#else
38de93ab 6043static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6044#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6045
7eeafbca 6046static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6047{
8434dc93
SRRH
6048 if (WARN_ON(!tr->dir))
6049 return ERR_PTR(-ENODEV);
6050
6051 /* Top directory uses NULL as the parent */
6052 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6053 return NULL;
6054
6055 /* All sub buffers have a descriptor */
2b6080f2 6056 return tr->dir;
bc0c38d1
SR
6057}
6058
2b6080f2 6059static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6060{
b04cc6b1
FW
6061 struct dentry *d_tracer;
6062
2b6080f2
SR
6063 if (tr->percpu_dir)
6064 return tr->percpu_dir;
b04cc6b1 6065
7eeafbca 6066 d_tracer = tracing_get_dentry(tr);
14a5ae40 6067 if (IS_ERR(d_tracer))
b04cc6b1
FW
6068 return NULL;
6069
8434dc93 6070 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6071
2b6080f2 6072 WARN_ONCE(!tr->percpu_dir,
8434dc93 6073 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6074
2b6080f2 6075 return tr->percpu_dir;
b04cc6b1
FW
6076}
6077
649e9c70
ON
6078static struct dentry *
6079trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6080 void *data, long cpu, const struct file_operations *fops)
6081{
6082 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6083
6084 if (ret) /* See tracing_get_cpu() */
7682c918 6085 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6086 return ret;
6087}
6088
2b6080f2 6089static void
8434dc93 6090tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6091{
2b6080f2 6092 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6093 struct dentry *d_cpu;
dd49a38c 6094 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6095
0a3d7ce7
NK
6096 if (!d_percpu)
6097 return;
6098
dd49a38c 6099 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6100 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6101 if (!d_cpu) {
8434dc93 6102 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6103 return;
6104 }
b04cc6b1 6105
8656e7a2 6106 /* per cpu trace_pipe */
649e9c70 6107 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6108 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6109
6110 /* per cpu trace */
649e9c70 6111 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6112 tr, cpu, &tracing_fops);
7f96f93f 6113
649e9c70 6114 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6115 tr, cpu, &tracing_buffers_fops);
7f96f93f 6116
649e9c70 6117 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6118 tr, cpu, &tracing_stats_fops);
438ced17 6119
649e9c70 6120 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6121 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6122
6123#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6124 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6125 tr, cpu, &snapshot_fops);
6de58e62 6126
649e9c70 6127 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6128 tr, cpu, &snapshot_raw_fops);
f1affcaa 6129#endif
b04cc6b1
FW
6130}
6131
60a11774
SR
6132#ifdef CONFIG_FTRACE_SELFTEST
6133/* Let selftest have access to static functions in this file */
6134#include "trace_selftest.c"
6135#endif
6136
577b785f
SR
6137static ssize_t
6138trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6139 loff_t *ppos)
6140{
6141 struct trace_option_dentry *topt = filp->private_data;
6142 char *buf;
6143
6144 if (topt->flags->val & topt->opt->bit)
6145 buf = "1\n";
6146 else
6147 buf = "0\n";
6148
6149 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6150}
6151
6152static ssize_t
6153trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6154 loff_t *ppos)
6155{
6156 struct trace_option_dentry *topt = filp->private_data;
6157 unsigned long val;
577b785f
SR
6158 int ret;
6159
22fe9b54
PH
6160 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6161 if (ret)
577b785f
SR
6162 return ret;
6163
8d18eaaf
LZ
6164 if (val != 0 && val != 1)
6165 return -EINVAL;
577b785f 6166
8d18eaaf 6167 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6168 mutex_lock(&trace_types_lock);
8c1a49ae 6169 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6170 topt->opt, !val);
577b785f
SR
6171 mutex_unlock(&trace_types_lock);
6172 if (ret)
6173 return ret;
577b785f
SR
6174 }
6175
6176 *ppos += cnt;
6177
6178 return cnt;
6179}
6180
6181
6182static const struct file_operations trace_options_fops = {
6183 .open = tracing_open_generic,
6184 .read = trace_options_read,
6185 .write = trace_options_write,
b444786f 6186 .llseek = generic_file_llseek,
577b785f
SR
6187};
6188
9a38a885
SRRH
6189/*
6190 * In order to pass in both the trace_array descriptor as well as the index
6191 * to the flag that the trace option file represents, the trace_array
6192 * has a character array of trace_flags_index[], which holds the index
6193 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6194 * The address of this character array is passed to the flag option file
6195 * read/write callbacks.
6196 *
6197 * In order to extract both the index and the trace_array descriptor,
6198 * get_tr_index() uses the following algorithm.
6199 *
6200 * idx = *ptr;
6201 *
6202 * As the pointer itself contains the address of the index (remember
6203 * index[1] == 1).
6204 *
6205 * Then to get the trace_array descriptor, by subtracting that index
6206 * from the ptr, we get to the start of the index itself.
6207 *
6208 * ptr - idx == &index[0]
6209 *
6210 * Then a simple container_of() from that pointer gets us to the
6211 * trace_array descriptor.
6212 */
6213static void get_tr_index(void *data, struct trace_array **ptr,
6214 unsigned int *pindex)
6215{
6216 *pindex = *(unsigned char *)data;
6217
6218 *ptr = container_of(data - *pindex, struct trace_array,
6219 trace_flags_index);
6220}
6221
a8259075
SR
6222static ssize_t
6223trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6224 loff_t *ppos)
6225{
9a38a885
SRRH
6226 void *tr_index = filp->private_data;
6227 struct trace_array *tr;
6228 unsigned int index;
a8259075
SR
6229 char *buf;
6230
9a38a885
SRRH
6231 get_tr_index(tr_index, &tr, &index);
6232
6233 if (tr->trace_flags & (1 << index))
a8259075
SR
6234 buf = "1\n";
6235 else
6236 buf = "0\n";
6237
6238 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6239}
6240
6241static ssize_t
6242trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6243 loff_t *ppos)
6244{
9a38a885
SRRH
6245 void *tr_index = filp->private_data;
6246 struct trace_array *tr;
6247 unsigned int index;
a8259075
SR
6248 unsigned long val;
6249 int ret;
6250
9a38a885
SRRH
6251 get_tr_index(tr_index, &tr, &index);
6252
22fe9b54
PH
6253 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6254 if (ret)
a8259075
SR
6255 return ret;
6256
f2d84b65 6257 if (val != 0 && val != 1)
a8259075 6258 return -EINVAL;
69d34da2
SRRH
6259
6260 mutex_lock(&trace_types_lock);
2b6080f2 6261 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6262 mutex_unlock(&trace_types_lock);
a8259075 6263
613f04a0
SRRH
6264 if (ret < 0)
6265 return ret;
6266
a8259075
SR
6267 *ppos += cnt;
6268
6269 return cnt;
6270}
6271
a8259075
SR
6272static const struct file_operations trace_options_core_fops = {
6273 .open = tracing_open_generic,
6274 .read = trace_options_core_read,
6275 .write = trace_options_core_write,
b444786f 6276 .llseek = generic_file_llseek,
a8259075
SR
6277};
6278
5452af66 6279struct dentry *trace_create_file(const char *name,
f4ae40a6 6280 umode_t mode,
5452af66
FW
6281 struct dentry *parent,
6282 void *data,
6283 const struct file_operations *fops)
6284{
6285 struct dentry *ret;
6286
8434dc93 6287 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6288 if (!ret)
8434dc93 6289 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6290
6291 return ret;
6292}
6293
6294
2b6080f2 6295static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6296{
6297 struct dentry *d_tracer;
a8259075 6298
2b6080f2
SR
6299 if (tr->options)
6300 return tr->options;
a8259075 6301
7eeafbca 6302 d_tracer = tracing_get_dentry(tr);
14a5ae40 6303 if (IS_ERR(d_tracer))
a8259075
SR
6304 return NULL;
6305
8434dc93 6306 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6307 if (!tr->options) {
8434dc93 6308 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6309 return NULL;
6310 }
6311
2b6080f2 6312 return tr->options;
a8259075
SR
6313}
6314
577b785f 6315static void
2b6080f2
SR
6316create_trace_option_file(struct trace_array *tr,
6317 struct trace_option_dentry *topt,
577b785f
SR
6318 struct tracer_flags *flags,
6319 struct tracer_opt *opt)
6320{
6321 struct dentry *t_options;
577b785f 6322
2b6080f2 6323 t_options = trace_options_init_dentry(tr);
577b785f
SR
6324 if (!t_options)
6325 return;
6326
6327 topt->flags = flags;
6328 topt->opt = opt;
2b6080f2 6329 topt->tr = tr;
577b785f 6330
5452af66 6331 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6332 &trace_options_fops);
6333
577b785f
SR
6334}
6335
6336static struct trace_option_dentry *
2b6080f2 6337create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6338{
6339 struct trace_option_dentry *topts;
6340 struct tracer_flags *flags;
6341 struct tracer_opt *opts;
6342 int cnt;
6343
6344 if (!tracer)
6345 return NULL;
6346
6347 flags = tracer->flags;
6348
6349 if (!flags || !flags->opts)
6350 return NULL;
6351
6352 opts = flags->opts;
6353
6354 for (cnt = 0; opts[cnt].name; cnt++)
6355 ;
6356
0cfe8245 6357 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6358 if (!topts)
6359 return NULL;
6360
41d9c0be 6361 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6362 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6363 &opts[cnt]);
41d9c0be
SRRH
6364 WARN_ONCE(topts[cnt].entry == NULL,
6365 "Failed to create trace option: %s",
6366 opts[cnt].name);
6367 }
577b785f
SR
6368
6369 return topts;
6370}
6371
a8259075 6372static struct dentry *
2b6080f2
SR
6373create_trace_option_core_file(struct trace_array *tr,
6374 const char *option, long index)
a8259075
SR
6375{
6376 struct dentry *t_options;
a8259075 6377
2b6080f2 6378 t_options = trace_options_init_dentry(tr);
a8259075
SR
6379 if (!t_options)
6380 return NULL;
6381
9a38a885
SRRH
6382 return trace_create_file(option, 0644, t_options,
6383 (void *)&tr->trace_flags_index[index],
6384 &trace_options_core_fops);
a8259075
SR
6385}
6386
2b6080f2 6387static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6388{
6389 struct dentry *t_options;
a8259075
SR
6390 int i;
6391
2b6080f2 6392 t_options = trace_options_init_dentry(tr);
a8259075
SR
6393 if (!t_options)
6394 return;
6395
5452af66 6396 for (i = 0; trace_options[i]; i++)
2b6080f2 6397 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6398}
6399
499e5470
SR
6400static ssize_t
6401rb_simple_read(struct file *filp, char __user *ubuf,
6402 size_t cnt, loff_t *ppos)
6403{
348f0fc2 6404 struct trace_array *tr = filp->private_data;
499e5470
SR
6405 char buf[64];
6406 int r;
6407
10246fa3 6408 r = tracer_tracing_is_on(tr);
499e5470
SR
6409 r = sprintf(buf, "%d\n", r);
6410
6411 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6412}
6413
6414static ssize_t
6415rb_simple_write(struct file *filp, const char __user *ubuf,
6416 size_t cnt, loff_t *ppos)
6417{
348f0fc2 6418 struct trace_array *tr = filp->private_data;
12883efb 6419 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6420 unsigned long val;
6421 int ret;
6422
6423 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6424 if (ret)
6425 return ret;
6426
6427 if (buffer) {
2df8f8a6
SR
6428 mutex_lock(&trace_types_lock);
6429 if (val) {
10246fa3 6430 tracer_tracing_on(tr);
2b6080f2
SR
6431 if (tr->current_trace->start)
6432 tr->current_trace->start(tr);
2df8f8a6 6433 } else {
10246fa3 6434 tracer_tracing_off(tr);
2b6080f2
SR
6435 if (tr->current_trace->stop)
6436 tr->current_trace->stop(tr);
2df8f8a6
SR
6437 }
6438 mutex_unlock(&trace_types_lock);
499e5470
SR
6439 }
6440
6441 (*ppos)++;
6442
6443 return cnt;
6444}
6445
6446static const struct file_operations rb_simple_fops = {
7b85af63 6447 .open = tracing_open_generic_tr,
499e5470
SR
6448 .read = rb_simple_read,
6449 .write = rb_simple_write,
7b85af63 6450 .release = tracing_release_generic_tr,
499e5470
SR
6451 .llseek = default_llseek,
6452};
6453
277ba044
SR
6454struct dentry *trace_instance_dir;
6455
6456static void
8434dc93 6457init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6458
55034cd6
SRRH
6459static int
6460allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6461{
6462 enum ring_buffer_flags rb_flags;
737223fb 6463
983f938a 6464 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 6465
dced341b
SRRH
6466 buf->tr = tr;
6467
55034cd6
SRRH
6468 buf->buffer = ring_buffer_alloc(size, rb_flags);
6469 if (!buf->buffer)
6470 return -ENOMEM;
737223fb 6471
55034cd6
SRRH
6472 buf->data = alloc_percpu(struct trace_array_cpu);
6473 if (!buf->data) {
6474 ring_buffer_free(buf->buffer);
6475 return -ENOMEM;
6476 }
737223fb 6477
737223fb
SRRH
6478 /* Allocate the first page for all buffers */
6479 set_buffer_entries(&tr->trace_buffer,
6480 ring_buffer_size(tr->trace_buffer.buffer, 0));
6481
55034cd6
SRRH
6482 return 0;
6483}
737223fb 6484
55034cd6
SRRH
6485static int allocate_trace_buffers(struct trace_array *tr, int size)
6486{
6487 int ret;
737223fb 6488
55034cd6
SRRH
6489 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6490 if (ret)
6491 return ret;
737223fb 6492
55034cd6
SRRH
6493#ifdef CONFIG_TRACER_MAX_TRACE
6494 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6495 allocate_snapshot ? size : 1);
6496 if (WARN_ON(ret)) {
737223fb 6497 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6498 free_percpu(tr->trace_buffer.data);
6499 return -ENOMEM;
6500 }
6501 tr->allocated_snapshot = allocate_snapshot;
737223fb 6502
55034cd6
SRRH
6503 /*
6504 * Only the top level trace array gets its snapshot allocated
6505 * from the kernel command line.
6506 */
6507 allocate_snapshot = false;
737223fb 6508#endif
55034cd6 6509 return 0;
737223fb
SRRH
6510}
6511
f0b70cc4
SRRH
6512static void free_trace_buffer(struct trace_buffer *buf)
6513{
6514 if (buf->buffer) {
6515 ring_buffer_free(buf->buffer);
6516 buf->buffer = NULL;
6517 free_percpu(buf->data);
6518 buf->data = NULL;
6519 }
6520}
6521
23aaa3c1
SRRH
6522static void free_trace_buffers(struct trace_array *tr)
6523{
6524 if (!tr)
6525 return;
6526
f0b70cc4 6527 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6528
6529#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6530 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6531#endif
6532}
6533
9a38a885
SRRH
6534static void init_trace_flags_index(struct trace_array *tr)
6535{
6536 int i;
6537
6538 /* Used by the trace options files */
6539 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6540 tr->trace_flags_index[i] = i;
6541}
6542
eae47358 6543static int instance_mkdir(const char *name)
737223fb 6544{
277ba044
SR
6545 struct trace_array *tr;
6546 int ret;
277ba044
SR
6547
6548 mutex_lock(&trace_types_lock);
6549
6550 ret = -EEXIST;
6551 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6552 if (tr->name && strcmp(tr->name, name) == 0)
6553 goto out_unlock;
6554 }
6555
6556 ret = -ENOMEM;
6557 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6558 if (!tr)
6559 goto out_unlock;
6560
6561 tr->name = kstrdup(name, GFP_KERNEL);
6562 if (!tr->name)
6563 goto out_free_tr;
6564
ccfe9e42
AL
6565 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6566 goto out_free_tr;
6567
983f938a
SRRH
6568 tr->trace_flags = global_trace.trace_flags;
6569
ccfe9e42
AL
6570 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6571
277ba044
SR
6572 raw_spin_lock_init(&tr->start_lock);
6573
0b9b12c1
SRRH
6574 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6575
277ba044
SR
6576 tr->current_trace = &nop_trace;
6577
6578 INIT_LIST_HEAD(&tr->systems);
6579 INIT_LIST_HEAD(&tr->events);
6580
737223fb 6581 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6582 goto out_free_tr;
6583
8434dc93 6584 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6585 if (!tr->dir)
6586 goto out_free_tr;
6587
6588 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6589 if (ret) {
8434dc93 6590 tracefs_remove_recursive(tr->dir);
277ba044 6591 goto out_free_tr;
609e85a7 6592 }
277ba044 6593
8434dc93 6594 init_tracer_tracefs(tr, tr->dir);
9a38a885 6595 init_trace_flags_index(tr);
277ba044
SR
6596
6597 list_add(&tr->list, &ftrace_trace_arrays);
6598
6599 mutex_unlock(&trace_types_lock);
6600
6601 return 0;
6602
6603 out_free_tr:
23aaa3c1 6604 free_trace_buffers(tr);
ccfe9e42 6605 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6606 kfree(tr->name);
6607 kfree(tr);
6608
6609 out_unlock:
6610 mutex_unlock(&trace_types_lock);
6611
6612 return ret;
6613
6614}
6615
eae47358 6616static int instance_rmdir(const char *name)
0c8916c3
SR
6617{
6618 struct trace_array *tr;
6619 int found = 0;
6620 int ret;
6621
6622 mutex_lock(&trace_types_lock);
6623
6624 ret = -ENODEV;
6625 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6626 if (tr->name && strcmp(tr->name, name) == 0) {
6627 found = 1;
6628 break;
6629 }
6630 }
6631 if (!found)
6632 goto out_unlock;
6633
a695cb58 6634 ret = -EBUSY;
cf6ab6d9 6635 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6636 goto out_unlock;
6637
0c8916c3
SR
6638 list_del(&tr->list);
6639
6b450d25 6640 tracing_set_nop(tr);
0c8916c3 6641 event_trace_del_tracer(tr);
591dffda 6642 ftrace_destroy_function_files(tr);
0c8916c3 6643 debugfs_remove_recursive(tr->dir);
a9fcaaac 6644 free_trace_buffers(tr);
0c8916c3
SR
6645
6646 kfree(tr->name);
6647 kfree(tr);
6648
6649 ret = 0;
6650
6651 out_unlock:
6652 mutex_unlock(&trace_types_lock);
6653
6654 return ret;
6655}
6656
277ba044
SR
6657static __init void create_trace_instances(struct dentry *d_tracer)
6658{
eae47358
SRRH
6659 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6660 instance_mkdir,
6661 instance_rmdir);
277ba044
SR
6662 if (WARN_ON(!trace_instance_dir))
6663 return;
277ba044
SR
6664}
6665
2b6080f2 6666static void
8434dc93 6667init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6668{
121aaee7 6669 int cpu;
2b6080f2 6670
607e2ea1
SRRH
6671 trace_create_file("available_tracers", 0444, d_tracer,
6672 tr, &show_traces_fops);
6673
6674 trace_create_file("current_tracer", 0644, d_tracer,
6675 tr, &set_tracer_fops);
6676
ccfe9e42
AL
6677 trace_create_file("tracing_cpumask", 0644, d_tracer,
6678 tr, &tracing_cpumask_fops);
6679
2b6080f2
SR
6680 trace_create_file("trace_options", 0644, d_tracer,
6681 tr, &tracing_iter_fops);
6682
6683 trace_create_file("trace", 0644, d_tracer,
6484c71c 6684 tr, &tracing_fops);
2b6080f2
SR
6685
6686 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6687 tr, &tracing_pipe_fops);
2b6080f2
SR
6688
6689 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6690 tr, &tracing_entries_fops);
2b6080f2
SR
6691
6692 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6693 tr, &tracing_total_entries_fops);
6694
238ae93d 6695 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6696 tr, &tracing_free_buffer_fops);
6697
6698 trace_create_file("trace_marker", 0220, d_tracer,
6699 tr, &tracing_mark_fops);
6700
6701 trace_create_file("trace_clock", 0644, d_tracer, tr,
6702 &trace_clock_fops);
6703
6704 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6705 tr, &rb_simple_fops);
ce9bae55 6706
6d9b3fa5
SRRH
6707#ifdef CONFIG_TRACER_MAX_TRACE
6708 trace_create_file("tracing_max_latency", 0644, d_tracer,
6709 &tr->max_latency, &tracing_max_lat_fops);
6710#endif
6711
591dffda
SRRH
6712 if (ftrace_create_function_files(tr, d_tracer))
6713 WARN(1, "Could not allocate function filter files");
6714
ce9bae55
SRRH
6715#ifdef CONFIG_TRACER_SNAPSHOT
6716 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6717 tr, &snapshot_fops);
ce9bae55 6718#endif
121aaee7
SRRH
6719
6720 for_each_tracing_cpu(cpu)
8434dc93 6721 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6722
2b6080f2
SR
6723}
6724
f76180bc
SRRH
6725static struct vfsmount *trace_automount(void *ingore)
6726{
6727 struct vfsmount *mnt;
6728 struct file_system_type *type;
6729
6730 /*
6731 * To maintain backward compatibility for tools that mount
6732 * debugfs to get to the tracing facility, tracefs is automatically
6733 * mounted to the debugfs/tracing directory.
6734 */
6735 type = get_fs_type("tracefs");
6736 if (!type)
6737 return NULL;
6738 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6739 put_filesystem(type);
6740 if (IS_ERR(mnt))
6741 return NULL;
6742 mntget(mnt);
6743
6744 return mnt;
6745}
6746
7eeafbca
SRRH
6747/**
6748 * tracing_init_dentry - initialize top level trace array
6749 *
6750 * This is called when creating files or directories in the tracing
6751 * directory. It is called via fs_initcall() by any of the boot up code
6752 * and expects to return the dentry of the top level tracing directory.
6753 */
6754struct dentry *tracing_init_dentry(void)
6755{
6756 struct trace_array *tr = &global_trace;
6757
f76180bc 6758 /* The top level trace array uses NULL as parent */
7eeafbca 6759 if (tr->dir)
f76180bc 6760 return NULL;
7eeafbca
SRRH
6761
6762 if (WARN_ON(!debugfs_initialized()))
6763 return ERR_PTR(-ENODEV);
6764
f76180bc
SRRH
6765 /*
6766 * As there may still be users that expect the tracing
6767 * files to exist in debugfs/tracing, we must automount
6768 * the tracefs file system there, so older tools still
6769 * work with the newer kerenl.
6770 */
6771 tr->dir = debugfs_create_automount("tracing", NULL,
6772 trace_automount, NULL);
7eeafbca
SRRH
6773 if (!tr->dir) {
6774 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6775 return ERR_PTR(-ENOMEM);
6776 }
6777
8434dc93 6778 return NULL;
7eeafbca
SRRH
6779}
6780
0c564a53
SRRH
6781extern struct trace_enum_map *__start_ftrace_enum_maps[];
6782extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6783
6784static void __init trace_enum_init(void)
6785{
3673b8e4
SRRH
6786 int len;
6787
6788 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6789 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6790}
6791
6792#ifdef CONFIG_MODULES
6793static void trace_module_add_enums(struct module *mod)
6794{
6795 if (!mod->num_trace_enums)
6796 return;
6797
6798 /*
6799 * Modules with bad taint do not have events created, do
6800 * not bother with enums either.
6801 */
6802 if (trace_module_has_bad_taint(mod))
6803 return;
6804
9828413d 6805 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6806}
6807
9828413d
SRRH
6808#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6809static void trace_module_remove_enums(struct module *mod)
6810{
6811 union trace_enum_map_item *map;
6812 union trace_enum_map_item **last = &trace_enum_maps;
6813
6814 if (!mod->num_trace_enums)
6815 return;
6816
6817 mutex_lock(&trace_enum_mutex);
6818
6819 map = trace_enum_maps;
6820
6821 while (map) {
6822 if (map->head.mod == mod)
6823 break;
6824 map = trace_enum_jmp_to_tail(map);
6825 last = &map->tail.next;
6826 map = map->tail.next;
6827 }
6828 if (!map)
6829 goto out;
6830
6831 *last = trace_enum_jmp_to_tail(map)->tail.next;
6832 kfree(map);
6833 out:
6834 mutex_unlock(&trace_enum_mutex);
6835}
6836#else
6837static inline void trace_module_remove_enums(struct module *mod) { }
6838#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6839
3673b8e4
SRRH
6840static int trace_module_notify(struct notifier_block *self,
6841 unsigned long val, void *data)
6842{
6843 struct module *mod = data;
6844
6845 switch (val) {
6846 case MODULE_STATE_COMING:
6847 trace_module_add_enums(mod);
6848 break;
9828413d
SRRH
6849 case MODULE_STATE_GOING:
6850 trace_module_remove_enums(mod);
6851 break;
3673b8e4
SRRH
6852 }
6853
6854 return 0;
0c564a53
SRRH
6855}
6856
3673b8e4
SRRH
6857static struct notifier_block trace_module_nb = {
6858 .notifier_call = trace_module_notify,
6859 .priority = 0,
6860};
9828413d 6861#endif /* CONFIG_MODULES */
3673b8e4 6862
8434dc93 6863static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6864{
6865 struct dentry *d_tracer;
41d9c0be 6866 struct tracer *t;
bc0c38d1 6867
7e53bd42
LJ
6868 trace_access_lock_init();
6869
bc0c38d1 6870 d_tracer = tracing_init_dentry();
14a5ae40 6871 if (IS_ERR(d_tracer))
ed6f1c99 6872 return 0;
bc0c38d1 6873
8434dc93 6874 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6875
5452af66 6876 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6877 &global_trace, &tracing_thresh_fops);
a8259075 6878
339ae5d3 6879 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6880 NULL, &tracing_readme_fops);
6881
69abe6a5
AP
6882 trace_create_file("saved_cmdlines", 0444, d_tracer,
6883 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6884
939c7a4f
YY
6885 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6886 NULL, &tracing_saved_cmdlines_size_fops);
6887
0c564a53
SRRH
6888 trace_enum_init();
6889
9828413d
SRRH
6890 trace_create_enum_file(d_tracer);
6891
3673b8e4
SRRH
6892#ifdef CONFIG_MODULES
6893 register_module_notifier(&trace_module_nb);
6894#endif
6895
bc0c38d1 6896#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6897 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6898 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6899#endif
b04cc6b1 6900
277ba044 6901 create_trace_instances(d_tracer);
5452af66 6902
2b6080f2 6903 create_trace_options_dir(&global_trace);
b04cc6b1 6904
41d9c0be
SRRH
6905 mutex_lock(&trace_types_lock);
6906 for (t = trace_types; t; t = t->next)
6907 add_tracer_options(&global_trace, t);
6908 mutex_unlock(&trace_types_lock);
09d23a1d 6909
b5ad384e 6910 return 0;
bc0c38d1
SR
6911}
6912
3f5a54e3
SR
6913static int trace_panic_handler(struct notifier_block *this,
6914 unsigned long event, void *unused)
6915{
944ac425 6916 if (ftrace_dump_on_oops)
cecbca96 6917 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6918 return NOTIFY_OK;
6919}
6920
6921static struct notifier_block trace_panic_notifier = {
6922 .notifier_call = trace_panic_handler,
6923 .next = NULL,
6924 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6925};
6926
6927static int trace_die_handler(struct notifier_block *self,
6928 unsigned long val,
6929 void *data)
6930{
6931 switch (val) {
6932 case DIE_OOPS:
944ac425 6933 if (ftrace_dump_on_oops)
cecbca96 6934 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6935 break;
6936 default:
6937 break;
6938 }
6939 return NOTIFY_OK;
6940}
6941
6942static struct notifier_block trace_die_notifier = {
6943 .notifier_call = trace_die_handler,
6944 .priority = 200
6945};
6946
6947/*
6948 * printk is set to max of 1024, we really don't need it that big.
6949 * Nothing should be printing 1000 characters anyway.
6950 */
6951#define TRACE_MAX_PRINT 1000
6952
6953/*
6954 * Define here KERN_TRACE so that we have one place to modify
6955 * it if we decide to change what log level the ftrace dump
6956 * should be at.
6957 */
428aee14 6958#define KERN_TRACE KERN_EMERG
3f5a54e3 6959
955b61e5 6960void
3f5a54e3
SR
6961trace_printk_seq(struct trace_seq *s)
6962{
6963 /* Probably should print a warning here. */
3a161d99
SRRH
6964 if (s->seq.len >= TRACE_MAX_PRINT)
6965 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6966
820b75f6
SRRH
6967 /*
6968 * More paranoid code. Although the buffer size is set to
6969 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6970 * an extra layer of protection.
6971 */
6972 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6973 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6974
6975 /* should be zero ended, but we are paranoid. */
3a161d99 6976 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6977
6978 printk(KERN_TRACE "%s", s->buffer);
6979
f9520750 6980 trace_seq_init(s);
3f5a54e3
SR
6981}
6982
955b61e5
JW
6983void trace_init_global_iter(struct trace_iterator *iter)
6984{
6985 iter->tr = &global_trace;
2b6080f2 6986 iter->trace = iter->tr->current_trace;
ae3b5093 6987 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6988 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6989
6990 if (iter->trace && iter->trace->open)
6991 iter->trace->open(iter);
6992
6993 /* Annotate start of buffers if we had overruns */
6994 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6995 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6996
6997 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6998 if (trace_clocks[iter->tr->clock_id].in_ns)
6999 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7000}
7001
7fe70b57 7002void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7003{
3f5a54e3
SR
7004 /* use static because iter can be a bit big for the stack */
7005 static struct trace_iterator iter;
7fe70b57 7006 static atomic_t dump_running;
983f938a 7007 struct trace_array *tr = &global_trace;
cf586b61 7008 unsigned int old_userobj;
d769041f
SR
7009 unsigned long flags;
7010 int cnt = 0, cpu;
3f5a54e3 7011
7fe70b57
SRRH
7012 /* Only allow one dump user at a time. */
7013 if (atomic_inc_return(&dump_running) != 1) {
7014 atomic_dec(&dump_running);
7015 return;
7016 }
3f5a54e3 7017
7fe70b57
SRRH
7018 /*
7019 * Always turn off tracing when we dump.
7020 * We don't need to show trace output of what happens
7021 * between multiple crashes.
7022 *
7023 * If the user does a sysrq-z, then they can re-enable
7024 * tracing with echo 1 > tracing_on.
7025 */
0ee6b6cf 7026 tracing_off();
cf586b61 7027
7fe70b57 7028 local_irq_save(flags);
3f5a54e3 7029
38dbe0b1 7030 /* Simulate the iterator */
955b61e5
JW
7031 trace_init_global_iter(&iter);
7032
d769041f 7033 for_each_tracing_cpu(cpu) {
5e2d5ef8 7034 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7035 }
7036
983f938a 7037 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7038
b54d3de9 7039 /* don't look at user memory in panic mode */
983f938a 7040 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7041
cecbca96
FW
7042 switch (oops_dump_mode) {
7043 case DUMP_ALL:
ae3b5093 7044 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7045 break;
7046 case DUMP_ORIG:
7047 iter.cpu_file = raw_smp_processor_id();
7048 break;
7049 case DUMP_NONE:
7050 goto out_enable;
7051 default:
7052 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7053 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7054 }
7055
7056 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7057
7fe70b57
SRRH
7058 /* Did function tracer already get disabled? */
7059 if (ftrace_is_dead()) {
7060 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7061 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7062 }
7063
3f5a54e3
SR
7064 /*
7065 * We need to stop all tracing on all CPUS to read the
7066 * the next buffer. This is a bit expensive, but is
7067 * not done often. We fill all what we can read,
7068 * and then release the locks again.
7069 */
7070
3f5a54e3
SR
7071 while (!trace_empty(&iter)) {
7072
7073 if (!cnt)
7074 printk(KERN_TRACE "---------------------------------\n");
7075
7076 cnt++;
7077
7078 /* reset all but tr, trace, and overruns */
7079 memset(&iter.seq, 0,
7080 sizeof(struct trace_iterator) -
7081 offsetof(struct trace_iterator, seq));
7082 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7083 iter.pos = -1;
7084
955b61e5 7085 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7086 int ret;
7087
7088 ret = print_trace_line(&iter);
7089 if (ret != TRACE_TYPE_NO_CONSUME)
7090 trace_consume(&iter);
3f5a54e3 7091 }
b892e5c8 7092 touch_nmi_watchdog();
3f5a54e3
SR
7093
7094 trace_printk_seq(&iter.seq);
7095 }
7096
7097 if (!cnt)
7098 printk(KERN_TRACE " (ftrace buffer empty)\n");
7099 else
7100 printk(KERN_TRACE "---------------------------------\n");
7101
cecbca96 7102 out_enable:
983f938a 7103 tr->trace_flags |= old_userobj;
cf586b61 7104
7fe70b57
SRRH
7105 for_each_tracing_cpu(cpu) {
7106 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7107 }
7fe70b57 7108 atomic_dec(&dump_running);
cd891ae0 7109 local_irq_restore(flags);
3f5a54e3 7110}
a8eecf22 7111EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7112
3928a8a2 7113__init static int tracer_alloc_buffers(void)
bc0c38d1 7114{
73c5162a 7115 int ring_buf_size;
9e01c1b7 7116 int ret = -ENOMEM;
4c11d7ae 7117
b5e87c05
SRRH
7118 /*
7119 * Make sure we don't accidently add more trace options
7120 * than we have bits for.
7121 */
9a38a885 7122 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 7123
9e01c1b7
RR
7124 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7125 goto out;
7126
ccfe9e42 7127 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7128 goto out_free_buffer_mask;
4c11d7ae 7129
07d777fe
SR
7130 /* Only allocate trace_printk buffers if a trace_printk exists */
7131 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7132 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7133 trace_printk_init_buffers();
7134
73c5162a
SR
7135 /* To save memory, keep the ring buffer size to its minimum */
7136 if (ring_buffer_expanded)
7137 ring_buf_size = trace_buf_size;
7138 else
7139 ring_buf_size = 1;
7140
9e01c1b7 7141 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7142 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7143
2b6080f2
SR
7144 raw_spin_lock_init(&global_trace.start_lock);
7145
2c4a33ab
SRRH
7146 /* Used for event triggers */
7147 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7148 if (!temp_buffer)
7149 goto out_free_cpumask;
7150
939c7a4f
YY
7151 if (trace_create_savedcmd() < 0)
7152 goto out_free_temp_buffer;
7153
9e01c1b7 7154 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7155 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7156 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7157 WARN_ON(1);
939c7a4f 7158 goto out_free_savedcmd;
4c11d7ae 7159 }
a7603ff4 7160
499e5470
SR
7161 if (global_trace.buffer_disabled)
7162 tracing_off();
4c11d7ae 7163
e1e232ca
SR
7164 if (trace_boot_clock) {
7165 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7166 if (ret < 0)
7167 pr_warning("Trace clock %s not defined, going back to default\n",
7168 trace_boot_clock);
7169 }
7170
ca164318
SRRH
7171 /*
7172 * register_tracer() might reference current_trace, so it
7173 * needs to be set before we register anything. This is
7174 * just a bootstrap of current_trace anyway.
7175 */
2b6080f2
SR
7176 global_trace.current_trace = &nop_trace;
7177
0b9b12c1
SRRH
7178 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7179
4104d326
SRRH
7180 ftrace_init_global_array_ops(&global_trace);
7181
9a38a885
SRRH
7182 init_trace_flags_index(&global_trace);
7183
ca164318
SRRH
7184 register_tracer(&nop_trace);
7185
60a11774
SR
7186 /* All seems OK, enable tracing */
7187 tracing_disabled = 0;
3928a8a2 7188
3f5a54e3
SR
7189 atomic_notifier_chain_register(&panic_notifier_list,
7190 &trace_panic_notifier);
7191
7192 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7193
ae63b31e
SR
7194 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7195
7196 INIT_LIST_HEAD(&global_trace.systems);
7197 INIT_LIST_HEAD(&global_trace.events);
7198 list_add(&global_trace.list, &ftrace_trace_arrays);
7199
7bcfaf54
SR
7200 while (trace_boot_options) {
7201 char *option;
7202
7203 option = strsep(&trace_boot_options, ",");
2b6080f2 7204 trace_set_options(&global_trace, option);
7bcfaf54
SR
7205 }
7206
77fd5c15
SRRH
7207 register_snapshot_cmd();
7208
2fc1dfbe 7209 return 0;
3f5a54e3 7210
939c7a4f
YY
7211out_free_savedcmd:
7212 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7213out_free_temp_buffer:
7214 ring_buffer_free(temp_buffer);
9e01c1b7 7215out_free_cpumask:
ccfe9e42 7216 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7217out_free_buffer_mask:
7218 free_cpumask_var(tracing_buffer_mask);
7219out:
7220 return ret;
bc0c38d1 7221}
b2821ae6 7222
5f893b26
SRRH
7223void __init trace_init(void)
7224{
0daa2302
SRRH
7225 if (tracepoint_printk) {
7226 tracepoint_print_iter =
7227 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7228 if (WARN_ON(!tracepoint_print_iter))
7229 tracepoint_printk = 0;
7230 }
5f893b26 7231 tracer_alloc_buffers();
0c564a53 7232 trace_event_init();
5f893b26
SRRH
7233}
7234
b2821ae6
SR
7235__init static int clear_boot_tracer(void)
7236{
7237 /*
7238 * The default tracer at boot buffer is an init section.
7239 * This function is called in lateinit. If we did not
7240 * find the boot tracer, then clear it out, to prevent
7241 * later registration from accessing the buffer that is
7242 * about to be freed.
7243 */
7244 if (!default_bootup_tracer)
7245 return 0;
7246
7247 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7248 default_bootup_tracer);
7249 default_bootup_tracer = NULL;
7250
7251 return 0;
7252}
7253
8434dc93 7254fs_initcall(tracer_init_tracefs);
b2821ae6 7255late_initcall(clear_boot_tracer);