]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
ftrace: Use kasprintf() in ftrace_profile_tracefs()
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
8c1a49ae
SRRH
77static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
79{
80 return 0;
81}
0f048701 82
7ffbd48d
SR
83/*
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
0f048701
SR
90/*
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
4fd27358 96static int tracing_disabled = 1;
0f048701 97
955b61e5 98cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 99
944ac425
SR
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 114 */
cecbca96
FW
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 117
de7edd31
SRRH
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
9828413d
SRRH
121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
607e2ea1 157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 158
ee6c2c1b
LZ
159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 161static char *default_bootup_tracer;
d9e54076 162
55034cd6
SRRH
163static bool allocate_snapshot;
164
1beee96b 165static int __init set_cmdline_ftrace(char *str)
d9e54076 166{
67012ab1 167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 168 default_bootup_tracer = bootup_tracer_buf;
73c5162a 169 /* We are using ftrace early, expand it */
55034cd6 170 ring_buffer_expanded = true;
d9e54076
PZ
171 return 1;
172}
1beee96b 173__setup("ftrace=", set_cmdline_ftrace);
d9e54076 174
944ac425
SR
175static int __init set_ftrace_dump_on_oops(char *str)
176{
cecbca96
FW
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
944ac425
SR
188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 190
de7edd31
SRRH
191static int __init stop_trace_on_warning(char *str)
192{
933ff9f2
LCG
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
de7edd31
SRRH
195 return 1;
196}
933ff9f2 197__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 198
3209cff4 199static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
3209cff4 206__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 207
7bcfaf54
SR
208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
210
211static int __init set_trace_boot_options(char *str)
212{
67012ab1 213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
e1e232ca
SR
218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
0daa2302
SRRH
229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
de7edd31 236
cf8e3474 237unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
983f938a
SRRH
244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
16270145
SRRH
252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
256
4fcdae83
SR
257/*
258 * The global_trace is the descriptor that holds the tracing
259 * buffers for the live tracing. For each CPU, it contains
260 * a link list of pages that will store trace entries. The
261 * page descriptor of the pages in the memory is used to hold
262 * the link list by linking the lru item in the page descriptor
263 * to each of the pages in the buffer per CPU.
264 *
265 * For each active CPU there is a data field that holds the
266 * pages for the buffer for that CPU. Each CPU has the same number
267 * of pages allocated for its buffer.
268 */
983f938a
SRRH
269static struct trace_array global_trace = {
270 .trace_flags = TRACE_DEFAULT_FLAGS,
271};
bc0c38d1 272
ae63b31e 273LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 274
ff451961
SRRH
275int trace_array_get(struct trace_array *this_tr)
276{
277 struct trace_array *tr;
278 int ret = -ENODEV;
279
280 mutex_lock(&trace_types_lock);
281 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
282 if (tr == this_tr) {
283 tr->ref++;
284 ret = 0;
285 break;
286 }
287 }
288 mutex_unlock(&trace_types_lock);
289
290 return ret;
291}
292
293static void __trace_array_put(struct trace_array *this_tr)
294{
295 WARN_ON(!this_tr->ref);
296 this_tr->ref--;
297}
298
299void trace_array_put(struct trace_array *this_tr)
300{
301 mutex_lock(&trace_types_lock);
302 __trace_array_put(this_tr);
303 mutex_unlock(&trace_types_lock);
304}
305
7f1d2f82 306int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
307 struct ring_buffer *buffer,
308 struct ring_buffer_event *event)
eb02ce01 309{
5d6ad960 310 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
311 !filter_match_preds(file->filter, rec)) {
312 ring_buffer_discard_commit(buffer, event);
313 return 1;
314 }
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(filter_check_discard);
319
2425bcb9 320int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
321 struct ring_buffer *buffer,
322 struct ring_buffer_event *event)
323{
324 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
325 !filter_match_preds(call->filter, rec)) {
326 ring_buffer_discard_commit(buffer, event);
327 return 1;
328 }
329
330 return 0;
eb02ce01 331}
f306cc82 332EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 333
ad1438a0 334static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
335{
336 u64 ts;
337
338 /* Early boot up does not have a buffer yet */
9457158b 339 if (!buf->buffer)
37886f6a
SR
340 return trace_clock_local();
341
9457158b
AL
342 ts = ring_buffer_time_stamp(buf->buffer, cpu);
343 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
344
345 return ts;
346}
bc0c38d1 347
9457158b
AL
348cycle_t ftrace_now(int cpu)
349{
350 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
351}
352
10246fa3
SRRH
353/**
354 * tracing_is_enabled - Show if global_trace has been disabled
355 *
356 * Shows if the global trace has been enabled or not. It uses the
357 * mirror flag "buffer_disabled" to be used in fast paths such as for
358 * the irqsoff tracer. But it may be inaccurate due to races. If you
359 * need to know the accurate state, use tracing_is_on() which is a little
360 * slower, but accurate.
361 */
9036990d
SR
362int tracing_is_enabled(void)
363{
10246fa3
SRRH
364 /*
365 * For quick access (irqsoff uses this in fast path), just
366 * return the mirror variable of the state of the ring buffer.
367 * It's a little racy, but we don't really care.
368 */
369 smp_rmb();
370 return !global_trace.buffer_disabled;
9036990d
SR
371}
372
4fcdae83 373/*
3928a8a2
SR
374 * trace_buf_size is the size in bytes that is allocated
375 * for a buffer. Note, the number of bytes is always rounded
376 * to page size.
3f5a54e3
SR
377 *
378 * This number is purposely set to a low number of 16384.
379 * If the dump on oops happens, it will be much appreciated
380 * to not have to wait for all that output. Anyway this can be
381 * boot time and run time configurable.
4fcdae83 382 */
3928a8a2 383#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 384
3928a8a2 385static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 386
4fcdae83 387/* trace_types holds a link list of available tracers. */
bc0c38d1 388static struct tracer *trace_types __read_mostly;
4fcdae83 389
4fcdae83
SR
390/*
391 * trace_types_lock is used to protect the trace_types list.
4fcdae83 392 */
a8227415 393DEFINE_MUTEX(trace_types_lock);
4fcdae83 394
7e53bd42
LJ
395/*
396 * serialize the access of the ring buffer
397 *
398 * ring buffer serializes readers, but it is low level protection.
399 * The validity of the events (which returns by ring_buffer_peek() ..etc)
400 * are not protected by ring buffer.
401 *
402 * The content of events may become garbage if we allow other process consumes
403 * these events concurrently:
404 * A) the page of the consumed events may become a normal page
405 * (not reader page) in ring buffer, and this page will be rewrited
406 * by events producer.
407 * B) The page of the consumed events may become a page for splice_read,
408 * and this page will be returned to system.
409 *
410 * These primitives allow multi process access to different cpu ring buffer
411 * concurrently.
412 *
413 * These primitives don't distinguish read-only and read-consume access.
414 * Multi read-only access are also serialized.
415 */
416
417#ifdef CONFIG_SMP
418static DECLARE_RWSEM(all_cpu_access_lock);
419static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
420
421static inline void trace_access_lock(int cpu)
422{
ae3b5093 423 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
424 /* gain it for accessing the whole ring buffer. */
425 down_write(&all_cpu_access_lock);
426 } else {
427 /* gain it for accessing a cpu ring buffer. */
428
ae3b5093 429 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
430 down_read(&all_cpu_access_lock);
431
432 /* Secondly block other access to this @cpu ring buffer. */
433 mutex_lock(&per_cpu(cpu_access_lock, cpu));
434 }
435}
436
437static inline void trace_access_unlock(int cpu)
438{
ae3b5093 439 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
440 up_write(&all_cpu_access_lock);
441 } else {
442 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
443 up_read(&all_cpu_access_lock);
444 }
445}
446
447static inline void trace_access_lock_init(void)
448{
449 int cpu;
450
451 for_each_possible_cpu(cpu)
452 mutex_init(&per_cpu(cpu_access_lock, cpu));
453}
454
455#else
456
457static DEFINE_MUTEX(access_lock);
458
459static inline void trace_access_lock(int cpu)
460{
461 (void)cpu;
462 mutex_lock(&access_lock);
463}
464
465static inline void trace_access_unlock(int cpu)
466{
467 (void)cpu;
468 mutex_unlock(&access_lock);
469}
470
471static inline void trace_access_lock_init(void)
472{
473}
474
475#endif
476
d78a4614
SRRH
477#ifdef CONFIG_STACKTRACE
478static void __ftrace_trace_stack(struct ring_buffer *buffer,
479 unsigned long flags,
480 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
481static inline void ftrace_trace_stack(struct trace_array *tr,
482 struct ring_buffer *buffer,
73dddbb5
SRRH
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
ca475e83 485
d78a4614
SRRH
486#else
487static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs)
490{
491}
2d34f489
SRRH
492static inline void ftrace_trace_stack(struct trace_array *tr,
493 struct ring_buffer *buffer,
73dddbb5
SRRH
494 unsigned long flags,
495 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
496{
497}
498
d78a4614
SRRH
499#endif
500
5280bcef 501static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
502{
503 if (tr->trace_buffer.buffer)
504 ring_buffer_record_on(tr->trace_buffer.buffer);
505 /*
506 * This flag is looked at when buffers haven't been allocated
507 * yet, or by some tracers (like irqsoff), that just want to
508 * know if the ring buffer has been disabled, but it can handle
509 * races of where it gets disabled but we still do a record.
510 * As the check is in the fast path of the tracers, it is more
511 * important to be fast than accurate.
512 */
513 tr->buffer_disabled = 0;
514 /* Make the flag seen by readers */
515 smp_wmb();
516}
517
499e5470
SR
518/**
519 * tracing_on - enable tracing buffers
520 *
521 * This function enables tracing buffers that may have been
522 * disabled with tracing_off.
523 */
524void tracing_on(void)
525{
10246fa3 526 tracer_tracing_on(&global_trace);
499e5470
SR
527}
528EXPORT_SYMBOL_GPL(tracing_on);
529
09ae7234
SRRH
530/**
531 * __trace_puts - write a constant string into the trace buffer.
532 * @ip: The address of the caller
533 * @str: The constant string to write
534 * @size: The size of the string.
535 */
536int __trace_puts(unsigned long ip, const char *str, int size)
537{
538 struct ring_buffer_event *event;
539 struct ring_buffer *buffer;
540 struct print_entry *entry;
541 unsigned long irq_flags;
542 int alloc;
8abfb872
J
543 int pc;
544
983f938a 545 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
546 return 0;
547
8abfb872 548 pc = preempt_count();
09ae7234 549
3132e107
SRRH
550 if (unlikely(tracing_selftest_running || tracing_disabled))
551 return 0;
552
09ae7234
SRRH
553 alloc = sizeof(*entry) + size + 2; /* possible \n added */
554
555 local_save_flags(irq_flags);
556 buffer = global_trace.trace_buffer.buffer;
557 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 558 irq_flags, pc);
09ae7234
SRRH
559 if (!event)
560 return 0;
561
562 entry = ring_buffer_event_data(event);
563 entry->ip = ip;
564
565 memcpy(&entry->buf, str, size);
566
567 /* Add a newline if necessary */
568 if (entry->buf[size - 1] != '\n') {
569 entry->buf[size] = '\n';
570 entry->buf[size + 1] = '\0';
571 } else
572 entry->buf[size] = '\0';
573
574 __buffer_unlock_commit(buffer, event);
2d34f489 575 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
576
577 return size;
578}
579EXPORT_SYMBOL_GPL(__trace_puts);
580
581/**
582 * __trace_bputs - write the pointer to a constant string into trace buffer
583 * @ip: The address of the caller
584 * @str: The constant string to write to the buffer to
585 */
586int __trace_bputs(unsigned long ip, const char *str)
587{
588 struct ring_buffer_event *event;
589 struct ring_buffer *buffer;
590 struct bputs_entry *entry;
591 unsigned long irq_flags;
592 int size = sizeof(struct bputs_entry);
8abfb872
J
593 int pc;
594
983f938a 595 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
596 return 0;
597
8abfb872 598 pc = preempt_count();
09ae7234 599
3132e107
SRRH
600 if (unlikely(tracing_selftest_running || tracing_disabled))
601 return 0;
602
09ae7234
SRRH
603 local_save_flags(irq_flags);
604 buffer = global_trace.trace_buffer.buffer;
605 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 606 irq_flags, pc);
09ae7234
SRRH
607 if (!event)
608 return 0;
609
610 entry = ring_buffer_event_data(event);
611 entry->ip = ip;
612 entry->str = str;
613
614 __buffer_unlock_commit(buffer, event);
2d34f489 615 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
616
617 return 1;
618}
619EXPORT_SYMBOL_GPL(__trace_bputs);
620
ad909e21
SRRH
621#ifdef CONFIG_TRACER_SNAPSHOT
622/**
623 * trace_snapshot - take a snapshot of the current buffer.
624 *
625 * This causes a swap between the snapshot buffer and the current live
626 * tracing buffer. You can use this to take snapshots of the live
627 * trace when some condition is triggered, but continue to trace.
628 *
629 * Note, make sure to allocate the snapshot with either
630 * a tracing_snapshot_alloc(), or by doing it manually
631 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
632 *
633 * If the snapshot buffer is not allocated, it will stop tracing.
634 * Basically making a permanent snapshot.
635 */
636void tracing_snapshot(void)
637{
638 struct trace_array *tr = &global_trace;
639 struct tracer *tracer = tr->current_trace;
640 unsigned long flags;
641
1b22e382
SRRH
642 if (in_nmi()) {
643 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
644 internal_trace_puts("*** snapshot is being ignored ***\n");
645 return;
646 }
647
ad909e21 648 if (!tr->allocated_snapshot) {
ca268da6
SRRH
649 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
650 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
651 tracing_off();
652 return;
653 }
654
655 /* Note, snapshot can not be used when the tracer uses it */
656 if (tracer->use_max_tr) {
ca268da6
SRRH
657 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
658 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
659 return;
660 }
661
662 local_irq_save(flags);
663 update_max_tr(tr, current, smp_processor_id());
664 local_irq_restore(flags);
665}
1b22e382 666EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
667
668static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
669 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
671
672static int alloc_snapshot(struct trace_array *tr)
673{
674 int ret;
675
676 if (!tr->allocated_snapshot) {
677
678 /* allocate spare buffer */
679 ret = resize_buffer_duplicate_size(&tr->max_buffer,
680 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
681 if (ret < 0)
682 return ret;
683
684 tr->allocated_snapshot = true;
685 }
686
687 return 0;
688}
689
ad1438a0 690static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
691{
692 /*
693 * We don't free the ring buffer. instead, resize it because
694 * The max_tr ring buffer has some state (e.g. ring->clock) and
695 * we want preserve it.
696 */
697 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
698 set_buffer_entries(&tr->max_buffer, 1);
699 tracing_reset_online_cpus(&tr->max_buffer);
700 tr->allocated_snapshot = false;
701}
ad909e21 702
93e31ffb
TZ
703/**
704 * tracing_alloc_snapshot - allocate snapshot buffer.
705 *
706 * This only allocates the snapshot buffer if it isn't already
707 * allocated - it doesn't also take a snapshot.
708 *
709 * This is meant to be used in cases where the snapshot buffer needs
710 * to be set up for events that can't sleep but need to be able to
711 * trigger a snapshot.
712 */
713int tracing_alloc_snapshot(void)
714{
715 struct trace_array *tr = &global_trace;
716 int ret;
717
718 ret = alloc_snapshot(tr);
719 WARN_ON(ret < 0);
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
724
ad909e21
SRRH
725/**
726 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
727 *
728 * This is similar to trace_snapshot(), but it will allocate the
729 * snapshot buffer if it isn't already allocated. Use this only
730 * where it is safe to sleep, as the allocation may sleep.
731 *
732 * This causes a swap between the snapshot buffer and the current live
733 * tracing buffer. You can use this to take snapshots of the live
734 * trace when some condition is triggered, but continue to trace.
735 */
736void tracing_snapshot_alloc(void)
737{
ad909e21
SRRH
738 int ret;
739
93e31ffb
TZ
740 ret = tracing_alloc_snapshot();
741 if (ret < 0)
3209cff4 742 return;
ad909e21
SRRH
743
744 tracing_snapshot();
745}
1b22e382 746EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
747#else
748void tracing_snapshot(void)
749{
750 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
751}
1b22e382 752EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
753int tracing_alloc_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
756 return -ENODEV;
757}
758EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
759void tracing_snapshot_alloc(void)
760{
761 /* Give warning */
762 tracing_snapshot();
763}
1b22e382 764EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
765#endif /* CONFIG_TRACER_SNAPSHOT */
766
5280bcef 767static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
768{
769 if (tr->trace_buffer.buffer)
770 ring_buffer_record_off(tr->trace_buffer.buffer);
771 /*
772 * This flag is looked at when buffers haven't been allocated
773 * yet, or by some tracers (like irqsoff), that just want to
774 * know if the ring buffer has been disabled, but it can handle
775 * races of where it gets disabled but we still do a record.
776 * As the check is in the fast path of the tracers, it is more
777 * important to be fast than accurate.
778 */
779 tr->buffer_disabled = 1;
780 /* Make the flag seen by readers */
781 smp_wmb();
782}
783
499e5470
SR
784/**
785 * tracing_off - turn off tracing buffers
786 *
787 * This function stops the tracing buffers from recording data.
788 * It does not disable any overhead the tracers themselves may
789 * be causing. This function simply causes all recording to
790 * the ring buffers to fail.
791 */
792void tracing_off(void)
793{
10246fa3 794 tracer_tracing_off(&global_trace);
499e5470
SR
795}
796EXPORT_SYMBOL_GPL(tracing_off);
797
de7edd31
SRRH
798void disable_trace_on_warning(void)
799{
800 if (__disable_trace_on_warning)
801 tracing_off();
802}
803
10246fa3
SRRH
804/**
805 * tracer_tracing_is_on - show real state of ring buffer enabled
806 * @tr : the trace array to know if ring buffer is enabled
807 *
808 * Shows real state of the ring buffer if it is enabled or not.
809 */
5280bcef 810static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
811{
812 if (tr->trace_buffer.buffer)
813 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
814 return !tr->buffer_disabled;
815}
816
499e5470
SR
817/**
818 * tracing_is_on - show state of ring buffers enabled
819 */
820int tracing_is_on(void)
821{
10246fa3 822 return tracer_tracing_is_on(&global_trace);
499e5470
SR
823}
824EXPORT_SYMBOL_GPL(tracing_is_on);
825
3928a8a2 826static int __init set_buf_size(char *str)
bc0c38d1 827{
3928a8a2 828 unsigned long buf_size;
c6caeeb1 829
bc0c38d1
SR
830 if (!str)
831 return 0;
9d612bef 832 buf_size = memparse(str, &str);
c6caeeb1 833 /* nr_entries can not be zero */
9d612bef 834 if (buf_size == 0)
c6caeeb1 835 return 0;
3928a8a2 836 trace_buf_size = buf_size;
bc0c38d1
SR
837 return 1;
838}
3928a8a2 839__setup("trace_buf_size=", set_buf_size);
bc0c38d1 840
0e950173
TB
841static int __init set_tracing_thresh(char *str)
842{
87abb3b1 843 unsigned long threshold;
0e950173
TB
844 int ret;
845
846 if (!str)
847 return 0;
bcd83ea6 848 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
849 if (ret < 0)
850 return 0;
87abb3b1 851 tracing_thresh = threshold * 1000;
0e950173
TB
852 return 1;
853}
854__setup("tracing_thresh=", set_tracing_thresh);
855
57f50be1
SR
856unsigned long nsecs_to_usecs(unsigned long nsecs)
857{
858 return nsecs / 1000;
859}
860
a3418a36
SRRH
861/*
862 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
863 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
864 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
865 * of strings in the order that the enums were defined.
866 */
867#undef C
868#define C(a, b) b
869
4fcdae83 870/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 871static const char *trace_options[] = {
a3418a36 872 TRACE_FLAGS
bc0c38d1
SR
873 NULL
874};
875
5079f326
Z
876static struct {
877 u64 (*func)(void);
878 const char *name;
8be0709f 879 int in_ns; /* is this clock in nanoseconds? */
5079f326 880} trace_clocks[] = {
1b3e5c09
TG
881 { trace_clock_local, "local", 1 },
882 { trace_clock_global, "global", 1 },
883 { trace_clock_counter, "counter", 0 },
e7fda6c4 884 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
885 { trace_clock, "perf", 1 },
886 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 887 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 888 ARCH_TRACE_CLOCKS
5079f326
Z
889};
890
b63f39ea 891/*
892 * trace_parser_get_init - gets the buffer for trace parser
893 */
894int trace_parser_get_init(struct trace_parser *parser, int size)
895{
896 memset(parser, 0, sizeof(*parser));
897
898 parser->buffer = kmalloc(size, GFP_KERNEL);
899 if (!parser->buffer)
900 return 1;
901
902 parser->size = size;
903 return 0;
904}
905
906/*
907 * trace_parser_put - frees the buffer for trace parser
908 */
909void trace_parser_put(struct trace_parser *parser)
910{
911 kfree(parser->buffer);
912}
913
914/*
915 * trace_get_user - reads the user input string separated by space
916 * (matched by isspace(ch))
917 *
918 * For each string found the 'struct trace_parser' is updated,
919 * and the function returns.
920 *
921 * Returns number of bytes read.
922 *
923 * See kernel/trace/trace.h for 'struct trace_parser' details.
924 */
925int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
926 size_t cnt, loff_t *ppos)
927{
928 char ch;
929 size_t read = 0;
930 ssize_t ret;
931
932 if (!*ppos)
933 trace_parser_clear(parser);
934
935 ret = get_user(ch, ubuf++);
936 if (ret)
937 goto out;
938
939 read++;
940 cnt--;
941
942 /*
943 * The parser is not finished with the last write,
944 * continue reading the user input without skipping spaces.
945 */
946 if (!parser->cont) {
947 /* skip white space */
948 while (cnt && isspace(ch)) {
949 ret = get_user(ch, ubuf++);
950 if (ret)
951 goto out;
952 read++;
953 cnt--;
954 }
955
956 /* only spaces were written */
957 if (isspace(ch)) {
958 *ppos += read;
959 ret = read;
960 goto out;
961 }
962
963 parser->idx = 0;
964 }
965
966 /* read the non-space input */
967 while (cnt && !isspace(ch)) {
3c235a33 968 if (parser->idx < parser->size - 1)
b63f39ea 969 parser->buffer[parser->idx++] = ch;
970 else {
971 ret = -EINVAL;
972 goto out;
973 }
974 ret = get_user(ch, ubuf++);
975 if (ret)
976 goto out;
977 read++;
978 cnt--;
979 }
980
981 /* We either got finished input or we have to wait for another call. */
982 if (isspace(ch)) {
983 parser->buffer[parser->idx] = 0;
984 parser->cont = false;
057db848 985 } else if (parser->idx < parser->size - 1) {
b63f39ea 986 parser->cont = true;
987 parser->buffer[parser->idx++] = ch;
057db848
SR
988 } else {
989 ret = -EINVAL;
990 goto out;
b63f39ea 991 }
992
993 *ppos += read;
994 ret = read;
995
996out:
997 return ret;
998}
999
3a161d99 1000/* TODO add a seq_buf_to_buffer() */
b8b94265 1001static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1002{
1003 int len;
3c56819b 1004
5ac48378 1005 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1006 return -EBUSY;
1007
5ac48378 1008 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1009 if (cnt > len)
1010 cnt = len;
3a161d99 1011 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1012
3a161d99 1013 s->seq.readpos += cnt;
3c56819b
EGM
1014 return cnt;
1015}
1016
0e950173
TB
1017unsigned long __read_mostly tracing_thresh;
1018
5d4a9dba 1019#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1020/*
1021 * Copy the new maximum trace into the separate maximum-trace
1022 * structure. (this way the maximum trace is permanently saved,
1023 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1024 */
1025static void
1026__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027{
12883efb
SRRH
1028 struct trace_buffer *trace_buf = &tr->trace_buffer;
1029 struct trace_buffer *max_buf = &tr->max_buffer;
1030 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1031 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1032
12883efb
SRRH
1033 max_buf->cpu = cpu;
1034 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1035
6d9b3fa5 1036 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1037 max_data->critical_start = data->critical_start;
1038 max_data->critical_end = data->critical_end;
5d4a9dba 1039
1acaa1b2 1040 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1041 max_data->pid = tsk->pid;
f17a5194
SRRH
1042 /*
1043 * If tsk == current, then use current_uid(), as that does not use
1044 * RCU. The irq tracer can be called out of RCU scope.
1045 */
1046 if (tsk == current)
1047 max_data->uid = current_uid();
1048 else
1049 max_data->uid = task_uid(tsk);
1050
8248ac05
SR
1051 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1052 max_data->policy = tsk->policy;
1053 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1054
1055 /* record this tasks comm */
1056 tracing_record_cmdline(tsk);
1057}
1058
4fcdae83
SR
1059/**
1060 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1061 * @tr: tracer
1062 * @tsk: the task with the latency
1063 * @cpu: The cpu that initiated the trace.
1064 *
1065 * Flip the buffers between the @tr and the max_tr and record information
1066 * about which task was the cause of this latency.
1067 */
e309b41d 1068void
bc0c38d1
SR
1069update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1070{
2721e72d 1071 struct ring_buffer *buf;
bc0c38d1 1072
2b6080f2 1073 if (tr->stop_count)
b8de7bd1
SR
1074 return;
1075
4c11d7ae 1076 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1077
45ad21ca 1078 if (!tr->allocated_snapshot) {
debdd57f 1079 /* Only the nop tracer should hit this when disabling */
2b6080f2 1080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1081 return;
debdd57f 1082 }
34600f0e 1083
0b9b12c1 1084 arch_spin_lock(&tr->max_lock);
3928a8a2 1085
12883efb
SRRH
1086 buf = tr->trace_buffer.buffer;
1087 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1088 tr->max_buffer.buffer = buf;
3928a8a2 1089
bc0c38d1 1090 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1091 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1092}
1093
1094/**
1095 * update_max_tr_single - only copy one trace over, and reset the rest
1096 * @tr - tracer
1097 * @tsk - task with the latency
1098 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1099 *
1100 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1101 */
e309b41d 1102void
bc0c38d1
SR
1103update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1104{
3928a8a2 1105 int ret;
bc0c38d1 1106
2b6080f2 1107 if (tr->stop_count)
b8de7bd1
SR
1108 return;
1109
4c11d7ae 1110 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1111 if (!tr->allocated_snapshot) {
2930e04d 1112 /* Only the nop tracer should hit this when disabling */
9e8529af 1113 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1114 return;
2930e04d 1115 }
ef710e10 1116
0b9b12c1 1117 arch_spin_lock(&tr->max_lock);
bc0c38d1 1118
12883efb 1119 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1120
e8165dbb
SR
1121 if (ret == -EBUSY) {
1122 /*
1123 * We failed to swap the buffer due to a commit taking
1124 * place on this CPU. We fail to record, but we reset
1125 * the max trace buffer (no one writes directly to it)
1126 * and flag that it failed.
1127 */
12883efb 1128 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1129 "Failed to swap buffers due to commit in progress\n");
1130 }
1131
e8165dbb 1132 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1133
1134 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1135 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1136}
5d4a9dba 1137#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1138
e30f53aa 1139static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1140{
15693458
SRRH
1141 /* Iterators are static, they should be filled or empty */
1142 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1143 return 0;
0d5c6e1c 1144
e30f53aa
RV
1145 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1146 full);
0d5c6e1c
SR
1147}
1148
f4e781c0
SRRH
1149#ifdef CONFIG_FTRACE_STARTUP_TEST
1150static int run_tracer_selftest(struct tracer *type)
1151{
1152 struct trace_array *tr = &global_trace;
1153 struct tracer *saved_tracer = tr->current_trace;
1154 int ret;
0d5c6e1c 1155
f4e781c0
SRRH
1156 if (!type->selftest || tracing_selftest_disabled)
1157 return 0;
0d5c6e1c
SR
1158
1159 /*
f4e781c0
SRRH
1160 * Run a selftest on this tracer.
1161 * Here we reset the trace buffer, and set the current
1162 * tracer to be this tracer. The tracer can then run some
1163 * internal tracing to verify that everything is in order.
1164 * If we fail, we do not register this tracer.
0d5c6e1c 1165 */
f4e781c0 1166 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1167
f4e781c0
SRRH
1168 tr->current_trace = type;
1169
1170#ifdef CONFIG_TRACER_MAX_TRACE
1171 if (type->use_max_tr) {
1172 /* If we expanded the buffers, make sure the max is expanded too */
1173 if (ring_buffer_expanded)
1174 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1175 RING_BUFFER_ALL_CPUS);
1176 tr->allocated_snapshot = true;
1177 }
1178#endif
1179
1180 /* the test is responsible for initializing and enabling */
1181 pr_info("Testing tracer %s: ", type->name);
1182 ret = type->selftest(type, tr);
1183 /* the test is responsible for resetting too */
1184 tr->current_trace = saved_tracer;
1185 if (ret) {
1186 printk(KERN_CONT "FAILED!\n");
1187 /* Add the warning after printing 'FAILED' */
1188 WARN_ON(1);
1189 return -1;
1190 }
1191 /* Only reset on passing, to avoid touching corrupted buffers */
1192 tracing_reset_online_cpus(&tr->trace_buffer);
1193
1194#ifdef CONFIG_TRACER_MAX_TRACE
1195 if (type->use_max_tr) {
1196 tr->allocated_snapshot = false;
0d5c6e1c 1197
f4e781c0
SRRH
1198 /* Shrink the max buffer again */
1199 if (ring_buffer_expanded)
1200 ring_buffer_resize(tr->max_buffer.buffer, 1,
1201 RING_BUFFER_ALL_CPUS);
1202 }
1203#endif
1204
1205 printk(KERN_CONT "PASSED\n");
1206 return 0;
1207}
1208#else
1209static inline int run_tracer_selftest(struct tracer *type)
1210{
1211 return 0;
0d5c6e1c 1212}
f4e781c0 1213#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1214
41d9c0be
SRRH
1215static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1216
a4d1e688
JW
1217static void __init apply_trace_boot_options(void);
1218
4fcdae83
SR
1219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
a4d1e688 1225int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1226{
1227 struct tracer *t;
bc0c38d1
SR
1228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
24a461d5 1235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
bc0c38d1 1240 mutex_lock(&trace_types_lock);
86fa2f60 1241
8e1b82e0
FW
1242 tracing_selftest_running = true;
1243
bc0c38d1
SR
1244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246 /* already found */
ee6c2c1b 1247 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
adf9f195
FW
1254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1256 if (!type->flags) {
1257 /*allocate a dummy tracer_flags*/
1258 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1259 if (!type->flags)
1260 return -ENOMEM;
1261 type->flags->val = 0;
1262 type->flags->opts = dummy_tracer_opt;
1263 } else
adf9f195
FW
1264 if (!type->flags->opts)
1265 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1266
d39cdd20
CH
1267 /* store the tracer for __set_tracer_option */
1268 type->flags->trace = type;
1269
f4e781c0
SRRH
1270 ret = run_tracer_selftest(type);
1271 if (ret < 0)
1272 goto out;
60a11774 1273
bc0c38d1
SR
1274 type->next = trace_types;
1275 trace_types = type;
41d9c0be 1276 add_tracer_options(&global_trace, type);
60a11774 1277
bc0c38d1 1278 out:
8e1b82e0 1279 tracing_selftest_running = false;
bc0c38d1
SR
1280 mutex_unlock(&trace_types_lock);
1281
dac74940
SR
1282 if (ret || !default_bootup_tracer)
1283 goto out_unlock;
1284
ee6c2c1b 1285 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1286 goto out_unlock;
1287
1288 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1289 /* Do we want this tracer to start on bootup? */
607e2ea1 1290 tracing_set_tracer(&global_trace, type->name);
dac74940 1291 default_bootup_tracer = NULL;
a4d1e688
JW
1292
1293 apply_trace_boot_options();
1294
dac74940 1295 /* disable other selftests, since this will break it. */
55034cd6 1296 tracing_selftest_disabled = true;
b2821ae6 1297#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1298 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1299 type->name);
b2821ae6 1300#endif
b2821ae6 1301
dac74940 1302 out_unlock:
bc0c38d1
SR
1303 return ret;
1304}
1305
12883efb 1306void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1307{
12883efb 1308 struct ring_buffer *buffer = buf->buffer;
f633903a 1309
a5416411
HT
1310 if (!buffer)
1311 return;
1312
f633903a
SR
1313 ring_buffer_record_disable(buffer);
1314
1315 /* Make sure all commits have finished */
1316 synchronize_sched();
68179686 1317 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1318
1319 ring_buffer_record_enable(buffer);
1320}
1321
12883efb 1322void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1323{
12883efb 1324 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1325 int cpu;
1326
a5416411
HT
1327 if (!buffer)
1328 return;
1329
621968cd
SR
1330 ring_buffer_record_disable(buffer);
1331
1332 /* Make sure all commits have finished */
1333 synchronize_sched();
1334
9457158b 1335 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1336
1337 for_each_online_cpu(cpu)
68179686 1338 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1339
1340 ring_buffer_record_enable(buffer);
213cc060
PE
1341}
1342
09d8091c 1343/* Must have trace_types_lock held */
873c642f 1344void tracing_reset_all_online_cpus(void)
9456f0fa 1345{
873c642f
SRRH
1346 struct trace_array *tr;
1347
873c642f 1348 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1349 tracing_reset_online_cpus(&tr->trace_buffer);
1350#ifdef CONFIG_TRACER_MAX_TRACE
1351 tracing_reset_online_cpus(&tr->max_buffer);
1352#endif
873c642f 1353 }
9456f0fa
SR
1354}
1355
939c7a4f 1356#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1357#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1358static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1359struct saved_cmdlines_buffer {
1360 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1361 unsigned *map_cmdline_to_pid;
1362 unsigned cmdline_num;
1363 int cmdline_idx;
1364 char *saved_cmdlines;
1365};
1366static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1367
25b0b44a 1368/* temporary disable recording */
4fd27358 1369static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1370
939c7a4f
YY
1371static inline char *get_saved_cmdlines(int idx)
1372{
1373 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1374}
1375
1376static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1377{
939c7a4f
YY
1378 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1379}
1380
1381static int allocate_cmdlines_buffer(unsigned int val,
1382 struct saved_cmdlines_buffer *s)
1383{
1384 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1385 GFP_KERNEL);
1386 if (!s->map_cmdline_to_pid)
1387 return -ENOMEM;
1388
1389 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1390 if (!s->saved_cmdlines) {
1391 kfree(s->map_cmdline_to_pid);
1392 return -ENOMEM;
1393 }
1394
1395 s->cmdline_idx = 0;
1396 s->cmdline_num = val;
1397 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1398 sizeof(s->map_pid_to_cmdline));
1399 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1400 val * sizeof(*s->map_cmdline_to_pid));
1401
1402 return 0;
1403}
1404
1405static int trace_create_savedcmd(void)
1406{
1407 int ret;
1408
a6af8fbf 1409 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1410 if (!savedcmd)
1411 return -ENOMEM;
1412
1413 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1414 if (ret < 0) {
1415 kfree(savedcmd);
1416 savedcmd = NULL;
1417 return -ENOMEM;
1418 }
1419
1420 return 0;
bc0c38d1
SR
1421}
1422
b5130b1e
CE
1423int is_tracing_stopped(void)
1424{
2b6080f2 1425 return global_trace.stop_count;
b5130b1e
CE
1426}
1427
0f048701
SR
1428/**
1429 * tracing_start - quick start of the tracer
1430 *
1431 * If tracing is enabled but was stopped by tracing_stop,
1432 * this will start the tracer back up.
1433 */
1434void tracing_start(void)
1435{
1436 struct ring_buffer *buffer;
1437 unsigned long flags;
1438
1439 if (tracing_disabled)
1440 return;
1441
2b6080f2
SR
1442 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1443 if (--global_trace.stop_count) {
1444 if (global_trace.stop_count < 0) {
b06a8301
SR
1445 /* Someone screwed up their debugging */
1446 WARN_ON_ONCE(1);
2b6080f2 1447 global_trace.stop_count = 0;
b06a8301 1448 }
0f048701
SR
1449 goto out;
1450 }
1451
a2f80714 1452 /* Prevent the buffers from switching */
0b9b12c1 1453 arch_spin_lock(&global_trace.max_lock);
0f048701 1454
12883efb 1455 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1456 if (buffer)
1457 ring_buffer_record_enable(buffer);
1458
12883efb
SRRH
1459#ifdef CONFIG_TRACER_MAX_TRACE
1460 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1461 if (buffer)
1462 ring_buffer_record_enable(buffer);
12883efb 1463#endif
0f048701 1464
0b9b12c1 1465 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1466
0f048701 1467 out:
2b6080f2
SR
1468 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1469}
1470
1471static void tracing_start_tr(struct trace_array *tr)
1472{
1473 struct ring_buffer *buffer;
1474 unsigned long flags;
1475
1476 if (tracing_disabled)
1477 return;
1478
1479 /* If global, we need to also start the max tracer */
1480 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1481 return tracing_start();
1482
1483 raw_spin_lock_irqsave(&tr->start_lock, flags);
1484
1485 if (--tr->stop_count) {
1486 if (tr->stop_count < 0) {
1487 /* Someone screwed up their debugging */
1488 WARN_ON_ONCE(1);
1489 tr->stop_count = 0;
1490 }
1491 goto out;
1492 }
1493
12883efb 1494 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1495 if (buffer)
1496 ring_buffer_record_enable(buffer);
1497
1498 out:
1499 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1500}
1501
1502/**
1503 * tracing_stop - quick stop of the tracer
1504 *
1505 * Light weight way to stop tracing. Use in conjunction with
1506 * tracing_start.
1507 */
1508void tracing_stop(void)
1509{
1510 struct ring_buffer *buffer;
1511 unsigned long flags;
1512
2b6080f2
SR
1513 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1514 if (global_trace.stop_count++)
0f048701
SR
1515 goto out;
1516
a2f80714 1517 /* Prevent the buffers from switching */
0b9b12c1 1518 arch_spin_lock(&global_trace.max_lock);
a2f80714 1519
12883efb 1520 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1521 if (buffer)
1522 ring_buffer_record_disable(buffer);
1523
12883efb
SRRH
1524#ifdef CONFIG_TRACER_MAX_TRACE
1525 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1526 if (buffer)
1527 ring_buffer_record_disable(buffer);
12883efb 1528#endif
0f048701 1529
0b9b12c1 1530 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1531
0f048701 1532 out:
2b6080f2
SR
1533 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1534}
1535
1536static void tracing_stop_tr(struct trace_array *tr)
1537{
1538 struct ring_buffer *buffer;
1539 unsigned long flags;
1540
1541 /* If global, we need to also stop the max tracer */
1542 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1543 return tracing_stop();
1544
1545 raw_spin_lock_irqsave(&tr->start_lock, flags);
1546 if (tr->stop_count++)
1547 goto out;
1548
12883efb 1549 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1550 if (buffer)
1551 ring_buffer_record_disable(buffer);
1552
1553 out:
1554 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1555}
1556
e309b41d 1557void trace_stop_cmdline_recording(void);
bc0c38d1 1558
379cfdac 1559static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1560{
a635cf04 1561 unsigned pid, idx;
bc0c38d1
SR
1562
1563 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1564 return 0;
bc0c38d1
SR
1565
1566 /*
1567 * It's not the end of the world if we don't get
1568 * the lock, but we also don't want to spin
1569 * nor do we want to disable interrupts,
1570 * so if we miss here, then better luck next time.
1571 */
0199c4e6 1572 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1573 return 0;
bc0c38d1 1574
939c7a4f 1575 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1576 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1577 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1578
a635cf04
CE
1579 /*
1580 * Check whether the cmdline buffer at idx has a pid
1581 * mapped. We are going to overwrite that entry so we
1582 * need to clear the map_pid_to_cmdline. Otherwise we
1583 * would read the new comm for the old pid.
1584 */
939c7a4f 1585 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1586 if (pid != NO_CMDLINE_MAP)
939c7a4f 1587 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1588
939c7a4f
YY
1589 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1590 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1591
939c7a4f 1592 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1593 }
1594
939c7a4f 1595 set_cmdline(idx, tsk->comm);
bc0c38d1 1596
0199c4e6 1597 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1598
1599 return 1;
bc0c38d1
SR
1600}
1601
4c27e756 1602static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1603{
bc0c38d1
SR
1604 unsigned map;
1605
4ca53085
SR
1606 if (!pid) {
1607 strcpy(comm, "<idle>");
1608 return;
1609 }
bc0c38d1 1610
74bf4076
SR
1611 if (WARN_ON_ONCE(pid < 0)) {
1612 strcpy(comm, "<XXX>");
1613 return;
1614 }
1615
4ca53085
SR
1616 if (pid > PID_MAX_DEFAULT) {
1617 strcpy(comm, "<...>");
1618 return;
1619 }
bc0c38d1 1620
939c7a4f 1621 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1622 if (map != NO_CMDLINE_MAP)
939c7a4f 1623 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1624 else
1625 strcpy(comm, "<...>");
4c27e756
SRRH
1626}
1627
1628void trace_find_cmdline(int pid, char comm[])
1629{
1630 preempt_disable();
1631 arch_spin_lock(&trace_cmdline_lock);
1632
1633 __trace_find_cmdline(pid, comm);
bc0c38d1 1634
0199c4e6 1635 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1636 preempt_enable();
bc0c38d1
SR
1637}
1638
e309b41d 1639void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1640{
0fb9656d 1641 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1642 return;
1643
7ffbd48d
SR
1644 if (!__this_cpu_read(trace_cmdline_save))
1645 return;
1646
379cfdac
SRRH
1647 if (trace_save_cmdline(tsk))
1648 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1649}
1650
45dcd8b8 1651void
38697053
SR
1652tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1653 int pc)
bc0c38d1
SR
1654{
1655 struct task_struct *tsk = current;
bc0c38d1 1656
777e208d
SR
1657 entry->preempt_count = pc & 0xff;
1658 entry->pid = (tsk) ? tsk->pid : 0;
1659 entry->flags =
9244489a 1660#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1661 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1662#else
1663 TRACE_FLAG_IRQS_NOSUPPORT |
1664#endif
bc0c38d1
SR
1665 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1666 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1667 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1668 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1669}
f413cdb8 1670EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1671
e77405ad
SR
1672struct ring_buffer_event *
1673trace_buffer_lock_reserve(struct ring_buffer *buffer,
1674 int type,
1675 unsigned long len,
1676 unsigned long flags, int pc)
51a763dd
ACM
1677{
1678 struct ring_buffer_event *event;
1679
e77405ad 1680 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1681 if (event != NULL) {
1682 struct trace_entry *ent = ring_buffer_event_data(event);
1683
1684 tracing_generic_entry_update(ent, flags, pc);
1685 ent->type = type;
1686 }
1687
1688 return event;
1689}
51a763dd 1690
7ffbd48d
SR
1691void
1692__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1693{
1694 __this_cpu_write(trace_cmdline_save, true);
1695 ring_buffer_unlock_commit(buffer, event);
1696}
1697
b7f0c959
SRRH
1698void trace_buffer_unlock_commit(struct trace_array *tr,
1699 struct ring_buffer *buffer,
1700 struct ring_buffer_event *event,
1701 unsigned long flags, int pc)
51a763dd 1702{
7ffbd48d 1703 __buffer_unlock_commit(buffer, event);
51a763dd 1704
2d34f489 1705 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
e77405ad 1706 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1707}
0d5c6e1c 1708EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1709
2c4a33ab
SRRH
1710static struct ring_buffer *temp_buffer;
1711
ccb469a1
SR
1712struct ring_buffer_event *
1713trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1714 struct trace_event_file *trace_file,
ccb469a1
SR
1715 int type, unsigned long len,
1716 unsigned long flags, int pc)
1717{
2c4a33ab
SRRH
1718 struct ring_buffer_event *entry;
1719
7f1d2f82 1720 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1721 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1722 type, len, flags, pc);
2c4a33ab
SRRH
1723 /*
1724 * If tracing is off, but we have triggers enabled
1725 * we still need to look at the event data. Use the temp_buffer
1726 * to store the trace event for the tigger to use. It's recusive
1727 * safe and will not be recorded anywhere.
1728 */
5d6ad960 1729 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1730 *current_rb = temp_buffer;
1731 entry = trace_buffer_lock_reserve(*current_rb,
1732 type, len, flags, pc);
1733 }
1734 return entry;
ccb469a1
SR
1735}
1736EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1737
ef5580d0 1738struct ring_buffer_event *
e77405ad
SR
1739trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1740 int type, unsigned long len,
ef5580d0
SR
1741 unsigned long flags, int pc)
1742{
12883efb 1743 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1744 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1745 type, len, flags, pc);
1746}
94487d6d 1747EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1748
b7f0c959
SRRH
1749void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1750 struct ring_buffer *buffer,
0d5c6e1c
SR
1751 struct ring_buffer_event *event,
1752 unsigned long flags, int pc,
1753 struct pt_regs *regs)
1fd8df2c 1754{
7ffbd48d 1755 __buffer_unlock_commit(buffer, event);
1fd8df2c 1756
7717c6be 1757 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1fd8df2c
MH
1758 ftrace_trace_userstack(buffer, flags, pc);
1759}
0d5c6e1c 1760EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1761
e77405ad
SR
1762void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1763 struct ring_buffer_event *event)
77d9f465 1764{
e77405ad 1765 ring_buffer_discard_commit(buffer, event);
ef5580d0 1766}
12acd473 1767EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1768
e309b41d 1769void
7be42151 1770trace_function(struct trace_array *tr,
38697053
SR
1771 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1772 int pc)
bc0c38d1 1773{
2425bcb9 1774 struct trace_event_call *call = &event_function;
12883efb 1775 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1776 struct ring_buffer_event *event;
777e208d 1777 struct ftrace_entry *entry;
bc0c38d1 1778
e77405ad 1779 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1780 flags, pc);
3928a8a2
SR
1781 if (!event)
1782 return;
1783 entry = ring_buffer_event_data(event);
777e208d
SR
1784 entry->ip = ip;
1785 entry->parent_ip = parent_ip;
e1112b4d 1786
f306cc82 1787 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1788 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1789}
1790
c0a0d0d3 1791#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1792
1793#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1794struct ftrace_stack {
1795 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1796};
1797
1798static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1799static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1800
e77405ad 1801static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1802 unsigned long flags,
1fd8df2c 1803 int skip, int pc, struct pt_regs *regs)
86387f7e 1804{
2425bcb9 1805 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1806 struct ring_buffer_event *event;
777e208d 1807 struct stack_entry *entry;
86387f7e 1808 struct stack_trace trace;
4a9bd3f1
SR
1809 int use_stack;
1810 int size = FTRACE_STACK_ENTRIES;
1811
1812 trace.nr_entries = 0;
1813 trace.skip = skip;
1814
1815 /*
1816 * Since events can happen in NMIs there's no safe way to
1817 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1818 * or NMI comes in, it will just have to use the default
1819 * FTRACE_STACK_SIZE.
1820 */
1821 preempt_disable_notrace();
1822
82146529 1823 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1824 /*
1825 * We don't need any atomic variables, just a barrier.
1826 * If an interrupt comes in, we don't care, because it would
1827 * have exited and put the counter back to what we want.
1828 * We just need a barrier to keep gcc from moving things
1829 * around.
1830 */
1831 barrier();
1832 if (use_stack == 1) {
bdffd893 1833 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1834 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1835
1836 if (regs)
1837 save_stack_trace_regs(regs, &trace);
1838 else
1839 save_stack_trace(&trace);
1840
1841 if (trace.nr_entries > size)
1842 size = trace.nr_entries;
1843 } else
1844 /* From now on, use_stack is a boolean */
1845 use_stack = 0;
1846
1847 size *= sizeof(unsigned long);
86387f7e 1848
e77405ad 1849 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1850 sizeof(*entry) + size, flags, pc);
3928a8a2 1851 if (!event)
4a9bd3f1
SR
1852 goto out;
1853 entry = ring_buffer_event_data(event);
86387f7e 1854
4a9bd3f1
SR
1855 memset(&entry->caller, 0, size);
1856
1857 if (use_stack)
1858 memcpy(&entry->caller, trace.entries,
1859 trace.nr_entries * sizeof(unsigned long));
1860 else {
1861 trace.max_entries = FTRACE_STACK_ENTRIES;
1862 trace.entries = entry->caller;
1863 if (regs)
1864 save_stack_trace_regs(regs, &trace);
1865 else
1866 save_stack_trace(&trace);
1867 }
1868
1869 entry->size = trace.nr_entries;
86387f7e 1870
f306cc82 1871 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1872 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1873
1874 out:
1875 /* Again, don't let gcc optimize things here */
1876 barrier();
82146529 1877 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1878 preempt_enable_notrace();
1879
f0a920d5
IM
1880}
1881
2d34f489
SRRH
1882static inline void ftrace_trace_stack(struct trace_array *tr,
1883 struct ring_buffer *buffer,
73dddbb5
SRRH
1884 unsigned long flags,
1885 int skip, int pc, struct pt_regs *regs)
53614991 1886{
2d34f489 1887 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
1888 return;
1889
73dddbb5 1890 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1891}
1892
c0a0d0d3
FW
1893void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1894 int pc)
38697053 1895{
12883efb 1896 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1897}
1898
03889384
SR
1899/**
1900 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1901 * @skip: Number of functions to skip (helper handlers)
03889384 1902 */
c142be8e 1903void trace_dump_stack(int skip)
03889384
SR
1904{
1905 unsigned long flags;
1906
1907 if (tracing_disabled || tracing_selftest_running)
e36c5458 1908 return;
03889384
SR
1909
1910 local_save_flags(flags);
1911
c142be8e
SRRH
1912 /*
1913 * Skip 3 more, seems to get us at the caller of
1914 * this function.
1915 */
1916 skip += 3;
1917 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1918 flags, skip, preempt_count(), NULL);
03889384
SR
1919}
1920
91e86e56
SR
1921static DEFINE_PER_CPU(int, user_stack_count);
1922
e77405ad
SR
1923void
1924ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1925{
2425bcb9 1926 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1927 struct ring_buffer_event *event;
02b67518
TE
1928 struct userstack_entry *entry;
1929 struct stack_trace trace;
02b67518 1930
983f938a 1931 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
1932 return;
1933
b6345879
SR
1934 /*
1935 * NMIs can not handle page faults, even with fix ups.
1936 * The save user stack can (and often does) fault.
1937 */
1938 if (unlikely(in_nmi()))
1939 return;
02b67518 1940
91e86e56
SR
1941 /*
1942 * prevent recursion, since the user stack tracing may
1943 * trigger other kernel events.
1944 */
1945 preempt_disable();
1946 if (__this_cpu_read(user_stack_count))
1947 goto out;
1948
1949 __this_cpu_inc(user_stack_count);
1950
e77405ad 1951 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1952 sizeof(*entry), flags, pc);
02b67518 1953 if (!event)
1dbd1951 1954 goto out_drop_count;
02b67518 1955 entry = ring_buffer_event_data(event);
02b67518 1956
48659d31 1957 entry->tgid = current->tgid;
02b67518
TE
1958 memset(&entry->caller, 0, sizeof(entry->caller));
1959
1960 trace.nr_entries = 0;
1961 trace.max_entries = FTRACE_STACK_ENTRIES;
1962 trace.skip = 0;
1963 trace.entries = entry->caller;
1964
1965 save_stack_trace_user(&trace);
f306cc82 1966 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1967 __buffer_unlock_commit(buffer, event);
91e86e56 1968
1dbd1951 1969 out_drop_count:
91e86e56 1970 __this_cpu_dec(user_stack_count);
91e86e56
SR
1971 out:
1972 preempt_enable();
02b67518
TE
1973}
1974
4fd27358
HE
1975#ifdef UNUSED
1976static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1977{
7be42151 1978 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1979}
4fd27358 1980#endif /* UNUSED */
02b67518 1981
c0a0d0d3
FW
1982#endif /* CONFIG_STACKTRACE */
1983
07d777fe
SR
1984/* created for use with alloc_percpu */
1985struct trace_buffer_struct {
1986 char buffer[TRACE_BUF_SIZE];
1987};
1988
1989static struct trace_buffer_struct *trace_percpu_buffer;
1990static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1991static struct trace_buffer_struct *trace_percpu_irq_buffer;
1992static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1993
1994/*
1995 * The buffer used is dependent on the context. There is a per cpu
1996 * buffer for normal context, softirq contex, hard irq context and
1997 * for NMI context. Thise allows for lockless recording.
1998 *
1999 * Note, if the buffers failed to be allocated, then this returns NULL
2000 */
2001static char *get_trace_buf(void)
2002{
2003 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
2004
2005 /*
2006 * If we have allocated per cpu buffers, then we do not
2007 * need to do any locking.
2008 */
2009 if (in_nmi())
2010 percpu_buffer = trace_percpu_nmi_buffer;
2011 else if (in_irq())
2012 percpu_buffer = trace_percpu_irq_buffer;
2013 else if (in_softirq())
2014 percpu_buffer = trace_percpu_sirq_buffer;
2015 else
2016 percpu_buffer = trace_percpu_buffer;
2017
2018 if (!percpu_buffer)
2019 return NULL;
2020
d8a0349c 2021 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2022}
2023
2024static int alloc_percpu_trace_buffer(void)
2025{
2026 struct trace_buffer_struct *buffers;
2027 struct trace_buffer_struct *sirq_buffers;
2028 struct trace_buffer_struct *irq_buffers;
2029 struct trace_buffer_struct *nmi_buffers;
2030
2031 buffers = alloc_percpu(struct trace_buffer_struct);
2032 if (!buffers)
2033 goto err_warn;
2034
2035 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2036 if (!sirq_buffers)
2037 goto err_sirq;
2038
2039 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2040 if (!irq_buffers)
2041 goto err_irq;
2042
2043 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2044 if (!nmi_buffers)
2045 goto err_nmi;
2046
2047 trace_percpu_buffer = buffers;
2048 trace_percpu_sirq_buffer = sirq_buffers;
2049 trace_percpu_irq_buffer = irq_buffers;
2050 trace_percpu_nmi_buffer = nmi_buffers;
2051
2052 return 0;
2053
2054 err_nmi:
2055 free_percpu(irq_buffers);
2056 err_irq:
2057 free_percpu(sirq_buffers);
2058 err_sirq:
2059 free_percpu(buffers);
2060 err_warn:
2061 WARN(1, "Could not allocate percpu trace_printk buffer");
2062 return -ENOMEM;
2063}
2064
81698831
SR
2065static int buffers_allocated;
2066
07d777fe
SR
2067void trace_printk_init_buffers(void)
2068{
07d777fe
SR
2069 if (buffers_allocated)
2070 return;
2071
2072 if (alloc_percpu_trace_buffer())
2073 return;
2074
2184db46
SR
2075 /* trace_printk() is for debug use only. Don't use it in production. */
2076
69a1c994
BP
2077 pr_warning("\n");
2078 pr_warning("**********************************************************\n");
2184db46
SR
2079 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2080 pr_warning("** **\n");
2081 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2082 pr_warning("** **\n");
2083 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2084 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2085 pr_warning("** **\n");
2086 pr_warning("** If you see this message and you are not debugging **\n");
2087 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2088 pr_warning("** **\n");
2089 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2090 pr_warning("**********************************************************\n");
07d777fe 2091
b382ede6
SR
2092 /* Expand the buffers to set size */
2093 tracing_update_buffers();
2094
07d777fe 2095 buffers_allocated = 1;
81698831
SR
2096
2097 /*
2098 * trace_printk_init_buffers() can be called by modules.
2099 * If that happens, then we need to start cmdline recording
2100 * directly here. If the global_trace.buffer is already
2101 * allocated here, then this was called by module code.
2102 */
12883efb 2103 if (global_trace.trace_buffer.buffer)
81698831
SR
2104 tracing_start_cmdline_record();
2105}
2106
2107void trace_printk_start_comm(void)
2108{
2109 /* Start tracing comms if trace printk is set */
2110 if (!buffers_allocated)
2111 return;
2112 tracing_start_cmdline_record();
2113}
2114
2115static void trace_printk_start_stop_comm(int enabled)
2116{
2117 if (!buffers_allocated)
2118 return;
2119
2120 if (enabled)
2121 tracing_start_cmdline_record();
2122 else
2123 tracing_stop_cmdline_record();
07d777fe
SR
2124}
2125
769b0441 2126/**
48ead020 2127 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2128 *
2129 */
40ce74f1 2130int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2131{
2425bcb9 2132 struct trace_event_call *call = &event_bprint;
769b0441 2133 struct ring_buffer_event *event;
e77405ad 2134 struct ring_buffer *buffer;
769b0441 2135 struct trace_array *tr = &global_trace;
48ead020 2136 struct bprint_entry *entry;
769b0441 2137 unsigned long flags;
07d777fe
SR
2138 char *tbuffer;
2139 int len = 0, size, pc;
769b0441
FW
2140
2141 if (unlikely(tracing_selftest_running || tracing_disabled))
2142 return 0;
2143
2144 /* Don't pollute graph traces with trace_vprintk internals */
2145 pause_graph_tracing();
2146
2147 pc = preempt_count();
5168ae50 2148 preempt_disable_notrace();
769b0441 2149
07d777fe
SR
2150 tbuffer = get_trace_buf();
2151 if (!tbuffer) {
2152 len = 0;
769b0441 2153 goto out;
07d777fe 2154 }
769b0441 2155
07d777fe 2156 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2157
07d777fe
SR
2158 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2159 goto out;
769b0441 2160
07d777fe 2161 local_save_flags(flags);
769b0441 2162 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2163 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2164 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2165 flags, pc);
769b0441 2166 if (!event)
07d777fe 2167 goto out;
769b0441
FW
2168 entry = ring_buffer_event_data(event);
2169 entry->ip = ip;
769b0441
FW
2170 entry->fmt = fmt;
2171
07d777fe 2172 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2173 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2174 __buffer_unlock_commit(buffer, event);
2d34f489 2175 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2176 }
769b0441 2177
769b0441 2178out:
5168ae50 2179 preempt_enable_notrace();
769b0441
FW
2180 unpause_graph_tracing();
2181
2182 return len;
2183}
48ead020
FW
2184EXPORT_SYMBOL_GPL(trace_vbprintk);
2185
12883efb
SRRH
2186static int
2187__trace_array_vprintk(struct ring_buffer *buffer,
2188 unsigned long ip, const char *fmt, va_list args)
48ead020 2189{
2425bcb9 2190 struct trace_event_call *call = &event_print;
48ead020 2191 struct ring_buffer_event *event;
07d777fe 2192 int len = 0, size, pc;
48ead020 2193 struct print_entry *entry;
07d777fe
SR
2194 unsigned long flags;
2195 char *tbuffer;
48ead020
FW
2196
2197 if (tracing_disabled || tracing_selftest_running)
2198 return 0;
2199
07d777fe
SR
2200 /* Don't pollute graph traces with trace_vprintk internals */
2201 pause_graph_tracing();
2202
48ead020
FW
2203 pc = preempt_count();
2204 preempt_disable_notrace();
48ead020 2205
07d777fe
SR
2206
2207 tbuffer = get_trace_buf();
2208 if (!tbuffer) {
2209 len = 0;
48ead020 2210 goto out;
07d777fe 2211 }
48ead020 2212
3558a5ac 2213 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2214
07d777fe 2215 local_save_flags(flags);
48ead020 2216 size = sizeof(*entry) + len + 1;
e77405ad 2217 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2218 flags, pc);
48ead020 2219 if (!event)
07d777fe 2220 goto out;
48ead020 2221 entry = ring_buffer_event_data(event);
c13d2f7c 2222 entry->ip = ip;
48ead020 2223
3558a5ac 2224 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2225 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2226 __buffer_unlock_commit(buffer, event);
2d34f489 2227 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2228 }
48ead020
FW
2229 out:
2230 preempt_enable_notrace();
07d777fe 2231 unpause_graph_tracing();
48ead020
FW
2232
2233 return len;
2234}
659372d3 2235
12883efb
SRRH
2236int trace_array_vprintk(struct trace_array *tr,
2237 unsigned long ip, const char *fmt, va_list args)
2238{
2239 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2240}
2241
2242int trace_array_printk(struct trace_array *tr,
2243 unsigned long ip, const char *fmt, ...)
2244{
2245 int ret;
2246 va_list ap;
2247
983f938a 2248 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2249 return 0;
2250
2251 va_start(ap, fmt);
2252 ret = trace_array_vprintk(tr, ip, fmt, ap);
2253 va_end(ap);
2254 return ret;
2255}
2256
2257int trace_array_printk_buf(struct ring_buffer *buffer,
2258 unsigned long ip, const char *fmt, ...)
2259{
2260 int ret;
2261 va_list ap;
2262
983f938a 2263 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2264 return 0;
2265
2266 va_start(ap, fmt);
2267 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2268 va_end(ap);
2269 return ret;
2270}
2271
659372d3
SR
2272int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2273{
a813a159 2274 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2275}
769b0441
FW
2276EXPORT_SYMBOL_GPL(trace_vprintk);
2277
e2ac8ef5 2278static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2279{
6d158a81
SR
2280 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2281
5a90f577 2282 iter->idx++;
6d158a81
SR
2283 if (buf_iter)
2284 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2285}
2286
e309b41d 2287static struct trace_entry *
bc21b478
SR
2288peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2289 unsigned long *lost_events)
dd0e545f 2290{
3928a8a2 2291 struct ring_buffer_event *event;
6d158a81 2292 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2293
d769041f
SR
2294 if (buf_iter)
2295 event = ring_buffer_iter_peek(buf_iter, ts);
2296 else
12883efb 2297 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2298 lost_events);
d769041f 2299
4a9bd3f1
SR
2300 if (event) {
2301 iter->ent_size = ring_buffer_event_length(event);
2302 return ring_buffer_event_data(event);
2303 }
2304 iter->ent_size = 0;
2305 return NULL;
dd0e545f 2306}
d769041f 2307
dd0e545f 2308static struct trace_entry *
bc21b478
SR
2309__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2310 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2311{
12883efb 2312 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2313 struct trace_entry *ent, *next = NULL;
aa27497c 2314 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2315 int cpu_file = iter->cpu_file;
3928a8a2 2316 u64 next_ts = 0, ts;
bc0c38d1 2317 int next_cpu = -1;
12b5da34 2318 int next_size = 0;
bc0c38d1
SR
2319 int cpu;
2320
b04cc6b1
FW
2321 /*
2322 * If we are in a per_cpu trace file, don't bother by iterating over
2323 * all cpu and peek directly.
2324 */
ae3b5093 2325 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2326 if (ring_buffer_empty_cpu(buffer, cpu_file))
2327 return NULL;
bc21b478 2328 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2329 if (ent_cpu)
2330 *ent_cpu = cpu_file;
2331
2332 return ent;
2333 }
2334
ab46428c 2335 for_each_tracing_cpu(cpu) {
dd0e545f 2336
3928a8a2
SR
2337 if (ring_buffer_empty_cpu(buffer, cpu))
2338 continue;
dd0e545f 2339
bc21b478 2340 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2341
cdd31cd2
IM
2342 /*
2343 * Pick the entry with the smallest timestamp:
2344 */
3928a8a2 2345 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2346 next = ent;
2347 next_cpu = cpu;
3928a8a2 2348 next_ts = ts;
bc21b478 2349 next_lost = lost_events;
12b5da34 2350 next_size = iter->ent_size;
bc0c38d1
SR
2351 }
2352 }
2353
12b5da34
SR
2354 iter->ent_size = next_size;
2355
bc0c38d1
SR
2356 if (ent_cpu)
2357 *ent_cpu = next_cpu;
2358
3928a8a2
SR
2359 if (ent_ts)
2360 *ent_ts = next_ts;
2361
bc21b478
SR
2362 if (missing_events)
2363 *missing_events = next_lost;
2364
bc0c38d1
SR
2365 return next;
2366}
2367
dd0e545f 2368/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2369struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2370 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2371{
bc21b478 2372 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2373}
2374
2375/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2376void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2377{
bc21b478
SR
2378 iter->ent = __find_next_entry(iter, &iter->cpu,
2379 &iter->lost_events, &iter->ts);
dd0e545f 2380
3928a8a2 2381 if (iter->ent)
e2ac8ef5 2382 trace_iterator_increment(iter);
dd0e545f 2383
3928a8a2 2384 return iter->ent ? iter : NULL;
b3806b43 2385}
bc0c38d1 2386
e309b41d 2387static void trace_consume(struct trace_iterator *iter)
b3806b43 2388{
12883efb 2389 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2390 &iter->lost_events);
bc0c38d1
SR
2391}
2392
e309b41d 2393static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2394{
2395 struct trace_iterator *iter = m->private;
bc0c38d1 2396 int i = (int)*pos;
4e3c3333 2397 void *ent;
bc0c38d1 2398
a63ce5b3
SR
2399 WARN_ON_ONCE(iter->leftover);
2400
bc0c38d1
SR
2401 (*pos)++;
2402
2403 /* can't go backwards */
2404 if (iter->idx > i)
2405 return NULL;
2406
2407 if (iter->idx < 0)
955b61e5 2408 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2409 else
2410 ent = iter;
2411
2412 while (ent && iter->idx < i)
955b61e5 2413 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2414
2415 iter->pos = *pos;
2416
bc0c38d1
SR
2417 return ent;
2418}
2419
955b61e5 2420void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2421{
2f26ebd5
SR
2422 struct ring_buffer_event *event;
2423 struct ring_buffer_iter *buf_iter;
2424 unsigned long entries = 0;
2425 u64 ts;
2426
12883efb 2427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2428
6d158a81
SR
2429 buf_iter = trace_buffer_iter(iter, cpu);
2430 if (!buf_iter)
2f26ebd5
SR
2431 return;
2432
2f26ebd5
SR
2433 ring_buffer_iter_reset(buf_iter);
2434
2435 /*
2436 * We could have the case with the max latency tracers
2437 * that a reset never took place on a cpu. This is evident
2438 * by the timestamp being before the start of the buffer.
2439 */
2440 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2441 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2442 break;
2443 entries++;
2444 ring_buffer_read(buf_iter, NULL);
2445 }
2446
12883efb 2447 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2448}
2449
d7350c3f 2450/*
d7350c3f
FW
2451 * The current tracer is copied to avoid a global locking
2452 * all around.
2453 */
bc0c38d1
SR
2454static void *s_start(struct seq_file *m, loff_t *pos)
2455{
2456 struct trace_iterator *iter = m->private;
2b6080f2 2457 struct trace_array *tr = iter->tr;
b04cc6b1 2458 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2459 void *p = NULL;
2460 loff_t l = 0;
3928a8a2 2461 int cpu;
bc0c38d1 2462
2fd196ec
HT
2463 /*
2464 * copy the tracer to avoid using a global lock all around.
2465 * iter->trace is a copy of current_trace, the pointer to the
2466 * name may be used instead of a strcmp(), as iter->trace->name
2467 * will point to the same string as current_trace->name.
2468 */
bc0c38d1 2469 mutex_lock(&trace_types_lock);
2b6080f2
SR
2470 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2471 *iter->trace = *tr->current_trace;
d7350c3f 2472 mutex_unlock(&trace_types_lock);
bc0c38d1 2473
12883efb 2474#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2475 if (iter->snapshot && iter->trace->use_max_tr)
2476 return ERR_PTR(-EBUSY);
12883efb 2477#endif
debdd57f
HT
2478
2479 if (!iter->snapshot)
2480 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2481
bc0c38d1
SR
2482 if (*pos != iter->pos) {
2483 iter->ent = NULL;
2484 iter->cpu = 0;
2485 iter->idx = -1;
2486
ae3b5093 2487 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2488 for_each_tracing_cpu(cpu)
2f26ebd5 2489 tracing_iter_reset(iter, cpu);
b04cc6b1 2490 } else
2f26ebd5 2491 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2492
ac91d854 2493 iter->leftover = 0;
bc0c38d1
SR
2494 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2495 ;
2496
2497 } else {
a63ce5b3
SR
2498 /*
2499 * If we overflowed the seq_file before, then we want
2500 * to just reuse the trace_seq buffer again.
2501 */
2502 if (iter->leftover)
2503 p = iter;
2504 else {
2505 l = *pos - 1;
2506 p = s_next(m, p, &l);
2507 }
bc0c38d1
SR
2508 }
2509
4f535968 2510 trace_event_read_lock();
7e53bd42 2511 trace_access_lock(cpu_file);
bc0c38d1
SR
2512 return p;
2513}
2514
2515static void s_stop(struct seq_file *m, void *p)
2516{
7e53bd42
LJ
2517 struct trace_iterator *iter = m->private;
2518
12883efb 2519#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2520 if (iter->snapshot && iter->trace->use_max_tr)
2521 return;
12883efb 2522#endif
debdd57f
HT
2523
2524 if (!iter->snapshot)
2525 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2526
7e53bd42 2527 trace_access_unlock(iter->cpu_file);
4f535968 2528 trace_event_read_unlock();
bc0c38d1
SR
2529}
2530
39eaf7ef 2531static void
12883efb
SRRH
2532get_total_entries(struct trace_buffer *buf,
2533 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2534{
2535 unsigned long count;
2536 int cpu;
2537
2538 *total = 0;
2539 *entries = 0;
2540
2541 for_each_tracing_cpu(cpu) {
12883efb 2542 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2543 /*
2544 * If this buffer has skipped entries, then we hold all
2545 * entries for the trace and we need to ignore the
2546 * ones before the time stamp.
2547 */
12883efb
SRRH
2548 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2549 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2550 /* total is the same as the entries */
2551 *total += count;
2552 } else
2553 *total += count +
12883efb 2554 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2555 *entries += count;
2556 }
2557}
2558
e309b41d 2559static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2560{
d79ac28f
RV
2561 seq_puts(m, "# _------=> CPU# \n"
2562 "# / _-----=> irqs-off \n"
2563 "# | / _----=> need-resched \n"
2564 "# || / _---=> hardirq/softirq \n"
2565 "# ||| / _--=> preempt-depth \n"
2566 "# |||| / delay \n"
2567 "# cmd pid ||||| time | caller \n"
2568 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2569}
2570
12883efb 2571static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2572{
39eaf7ef
SR
2573 unsigned long total;
2574 unsigned long entries;
2575
12883efb 2576 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2577 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2578 entries, total, num_online_cpus());
2579 seq_puts(m, "#\n");
2580}
2581
12883efb 2582static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2583{
12883efb 2584 print_event_info(buf, m);
d79ac28f
RV
2585 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2586 "# | | | | |\n");
bc0c38d1
SR
2587}
2588
12883efb 2589static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2590{
12883efb 2591 print_event_info(buf, m);
d79ac28f
RV
2592 seq_puts(m, "# _-----=> irqs-off\n"
2593 "# / _----=> need-resched\n"
2594 "# | / _---=> hardirq/softirq\n"
2595 "# || / _--=> preempt-depth\n"
2596 "# ||| / delay\n"
2597 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2598 "# | | | |||| | |\n");
77271ce4 2599}
bc0c38d1 2600
62b915f1 2601void
bc0c38d1
SR
2602print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2603{
983f938a 2604 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2605 struct trace_buffer *buf = iter->trace_buffer;
2606 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2607 struct tracer *type = iter->trace;
39eaf7ef
SR
2608 unsigned long entries;
2609 unsigned long total;
bc0c38d1
SR
2610 const char *name = "preemption";
2611
d840f718 2612 name = type->name;
bc0c38d1 2613
12883efb 2614 get_total_entries(buf, &total, &entries);
bc0c38d1 2615
888b55dc 2616 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2617 name, UTS_RELEASE);
888b55dc 2618 seq_puts(m, "# -----------------------------------"
bc0c38d1 2619 "---------------------------------\n");
888b55dc 2620 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2621 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2622 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2623 entries,
4c11d7ae 2624 total,
12883efb 2625 buf->cpu,
bc0c38d1
SR
2626#if defined(CONFIG_PREEMPT_NONE)
2627 "server",
2628#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2629 "desktop",
b5c21b45 2630#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2631 "preempt",
2632#else
2633 "unknown",
2634#endif
2635 /* These are reserved for later use */
2636 0, 0, 0, 0);
2637#ifdef CONFIG_SMP
2638 seq_printf(m, " #P:%d)\n", num_online_cpus());
2639#else
2640 seq_puts(m, ")\n");
2641#endif
888b55dc
KM
2642 seq_puts(m, "# -----------------\n");
2643 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2644 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2645 data->comm, data->pid,
2646 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2647 data->policy, data->rt_priority);
888b55dc 2648 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2649
2650 if (data->critical_start) {
888b55dc 2651 seq_puts(m, "# => started at: ");
214023c3
SR
2652 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2653 trace_print_seq(m, &iter->seq);
888b55dc 2654 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2655 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2656 trace_print_seq(m, &iter->seq);
8248ac05 2657 seq_puts(m, "\n#\n");
bc0c38d1
SR
2658 }
2659
888b55dc 2660 seq_puts(m, "#\n");
bc0c38d1
SR
2661}
2662
a309720c
SR
2663static void test_cpu_buff_start(struct trace_iterator *iter)
2664{
2665 struct trace_seq *s = &iter->seq;
983f938a 2666 struct trace_array *tr = iter->tr;
a309720c 2667
983f938a 2668 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
2669 return;
2670
2671 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2672 return;
2673
919cd979 2674 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2675 return;
2676
12883efb 2677 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2678 return;
2679
919cd979
SL
2680 if (iter->started)
2681 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2682
2683 /* Don't print started cpu buffer for the first entry of the trace */
2684 if (iter->idx > 1)
2685 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2686 iter->cpu);
a309720c
SR
2687}
2688
2c4f035f 2689static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2690{
983f938a 2691 struct trace_array *tr = iter->tr;
214023c3 2692 struct trace_seq *s = &iter->seq;
983f938a 2693 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2694 struct trace_entry *entry;
f633cef0 2695 struct trace_event *event;
bc0c38d1 2696
4e3c3333 2697 entry = iter->ent;
dd0e545f 2698
a309720c
SR
2699 test_cpu_buff_start(iter);
2700
c4a8e8be 2701 event = ftrace_find_event(entry->type);
bc0c38d1 2702
983f938a 2703 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2704 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2705 trace_print_lat_context(iter);
2706 else
2707 trace_print_context(iter);
c4a8e8be 2708 }
bc0c38d1 2709
19a7fe20
SRRH
2710 if (trace_seq_has_overflowed(s))
2711 return TRACE_TYPE_PARTIAL_LINE;
2712
268ccda0 2713 if (event)
a9a57763 2714 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2715
19a7fe20 2716 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2717
19a7fe20 2718 return trace_handle_return(s);
bc0c38d1
SR
2719}
2720
2c4f035f 2721static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 2722{
983f938a 2723 struct trace_array *tr = iter->tr;
f9896bf3
IM
2724 struct trace_seq *s = &iter->seq;
2725 struct trace_entry *entry;
f633cef0 2726 struct trace_event *event;
f9896bf3
IM
2727
2728 entry = iter->ent;
dd0e545f 2729
983f938a 2730 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
2731 trace_seq_printf(s, "%d %d %llu ",
2732 entry->pid, iter->cpu, iter->ts);
2733
2734 if (trace_seq_has_overflowed(s))
2735 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2736
f633cef0 2737 event = ftrace_find_event(entry->type);
268ccda0 2738 if (event)
a9a57763 2739 return event->funcs->raw(iter, 0, event);
d9793bd8 2740
19a7fe20 2741 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2742
19a7fe20 2743 return trace_handle_return(s);
f9896bf3
IM
2744}
2745
2c4f035f 2746static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 2747{
983f938a 2748 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
2749 struct trace_seq *s = &iter->seq;
2750 unsigned char newline = '\n';
2751 struct trace_entry *entry;
f633cef0 2752 struct trace_event *event;
5e3ca0ec
IM
2753
2754 entry = iter->ent;
dd0e545f 2755
983f938a 2756 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2757 SEQ_PUT_HEX_FIELD(s, entry->pid);
2758 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2759 SEQ_PUT_HEX_FIELD(s, iter->ts);
2760 if (trace_seq_has_overflowed(s))
2761 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2762 }
5e3ca0ec 2763
f633cef0 2764 event = ftrace_find_event(entry->type);
268ccda0 2765 if (event) {
a9a57763 2766 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2767 if (ret != TRACE_TYPE_HANDLED)
2768 return ret;
2769 }
7104f300 2770
19a7fe20 2771 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2772
19a7fe20 2773 return trace_handle_return(s);
5e3ca0ec
IM
2774}
2775
2c4f035f 2776static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 2777{
983f938a 2778 struct trace_array *tr = iter->tr;
cb0f12aa
IM
2779 struct trace_seq *s = &iter->seq;
2780 struct trace_entry *entry;
f633cef0 2781 struct trace_event *event;
cb0f12aa
IM
2782
2783 entry = iter->ent;
dd0e545f 2784
983f938a 2785 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2786 SEQ_PUT_FIELD(s, entry->pid);
2787 SEQ_PUT_FIELD(s, iter->cpu);
2788 SEQ_PUT_FIELD(s, iter->ts);
2789 if (trace_seq_has_overflowed(s))
2790 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2791 }
cb0f12aa 2792
f633cef0 2793 event = ftrace_find_event(entry->type);
a9a57763
SR
2794 return event ? event->funcs->binary(iter, 0, event) :
2795 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2796}
2797
62b915f1 2798int trace_empty(struct trace_iterator *iter)
bc0c38d1 2799{
6d158a81 2800 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2801 int cpu;
2802
9aba60fe 2803 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2804 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2805 cpu = iter->cpu_file;
6d158a81
SR
2806 buf_iter = trace_buffer_iter(iter, cpu);
2807 if (buf_iter) {
2808 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2809 return 0;
2810 } else {
12883efb 2811 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2812 return 0;
2813 }
2814 return 1;
2815 }
2816
ab46428c 2817 for_each_tracing_cpu(cpu) {
6d158a81
SR
2818 buf_iter = trace_buffer_iter(iter, cpu);
2819 if (buf_iter) {
2820 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2821 return 0;
2822 } else {
12883efb 2823 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2824 return 0;
2825 }
bc0c38d1 2826 }
d769041f 2827
797d3712 2828 return 1;
bc0c38d1
SR
2829}
2830
4f535968 2831/* Called with trace_event_read_lock() held. */
955b61e5 2832enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2833{
983f938a
SRRH
2834 struct trace_array *tr = iter->tr;
2835 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
2836 enum print_line_t ret;
2837
19a7fe20
SRRH
2838 if (iter->lost_events) {
2839 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2840 iter->cpu, iter->lost_events);
2841 if (trace_seq_has_overflowed(&iter->seq))
2842 return TRACE_TYPE_PARTIAL_LINE;
2843 }
bc21b478 2844
2c4f035f
FW
2845 if (iter->trace && iter->trace->print_line) {
2846 ret = iter->trace->print_line(iter);
2847 if (ret != TRACE_TYPE_UNHANDLED)
2848 return ret;
2849 }
72829bc3 2850
09ae7234
SRRH
2851 if (iter->ent->type == TRACE_BPUTS &&
2852 trace_flags & TRACE_ITER_PRINTK &&
2853 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2854 return trace_print_bputs_msg_only(iter);
2855
48ead020
FW
2856 if (iter->ent->type == TRACE_BPRINT &&
2857 trace_flags & TRACE_ITER_PRINTK &&
2858 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2859 return trace_print_bprintk_msg_only(iter);
48ead020 2860
66896a85
FW
2861 if (iter->ent->type == TRACE_PRINT &&
2862 trace_flags & TRACE_ITER_PRINTK &&
2863 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2864 return trace_print_printk_msg_only(iter);
66896a85 2865
cb0f12aa
IM
2866 if (trace_flags & TRACE_ITER_BIN)
2867 return print_bin_fmt(iter);
2868
5e3ca0ec
IM
2869 if (trace_flags & TRACE_ITER_HEX)
2870 return print_hex_fmt(iter);
2871
f9896bf3
IM
2872 if (trace_flags & TRACE_ITER_RAW)
2873 return print_raw_fmt(iter);
2874
f9896bf3
IM
2875 return print_trace_fmt(iter);
2876}
2877
7e9a49ef
JO
2878void trace_latency_header(struct seq_file *m)
2879{
2880 struct trace_iterator *iter = m->private;
983f938a 2881 struct trace_array *tr = iter->tr;
7e9a49ef
JO
2882
2883 /* print nothing if the buffers are empty */
2884 if (trace_empty(iter))
2885 return;
2886
2887 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2888 print_trace_header(m, iter);
2889
983f938a 2890 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
2891 print_lat_help_header(m);
2892}
2893
62b915f1
JO
2894void trace_default_header(struct seq_file *m)
2895{
2896 struct trace_iterator *iter = m->private;
983f938a
SRRH
2897 struct trace_array *tr = iter->tr;
2898 unsigned long trace_flags = tr->trace_flags;
62b915f1 2899
f56e7f8e
JO
2900 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2901 return;
2902
62b915f1
JO
2903 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2904 /* print nothing if the buffers are empty */
2905 if (trace_empty(iter))
2906 return;
2907 print_trace_header(m, iter);
2908 if (!(trace_flags & TRACE_ITER_VERBOSE))
2909 print_lat_help_header(m);
2910 } else {
77271ce4
SR
2911 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2912 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2913 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2914 else
12883efb 2915 print_func_help_header(iter->trace_buffer, m);
77271ce4 2916 }
62b915f1
JO
2917 }
2918}
2919
e0a413f6
SR
2920static void test_ftrace_alive(struct seq_file *m)
2921{
2922 if (!ftrace_is_dead())
2923 return;
d79ac28f
RV
2924 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2925 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2926}
2927
d8741e2e 2928#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2929static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2930{
d79ac28f
RV
2931 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2932 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2933 "# Takes a snapshot of the main buffer.\n"
2934 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2935 "# (Doesn't have to be '2' works with any number that\n"
2936 "# is not a '0' or '1')\n");
d8741e2e 2937}
f1affcaa
SRRH
2938
2939static void show_snapshot_percpu_help(struct seq_file *m)
2940{
fa6f0cc7 2941 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2942#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2943 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2944 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2945#else
d79ac28f
RV
2946 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2947 "# Must use main snapshot file to allocate.\n");
f1affcaa 2948#endif
d79ac28f
RV
2949 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2950 "# (Doesn't have to be '2' works with any number that\n"
2951 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2952}
2953
d8741e2e
SRRH
2954static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2955{
45ad21ca 2956 if (iter->tr->allocated_snapshot)
fa6f0cc7 2957 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2958 else
fa6f0cc7 2959 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2960
fa6f0cc7 2961 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2962 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2963 show_snapshot_main_help(m);
2964 else
2965 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2966}
2967#else
2968/* Should never be called */
2969static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2970#endif
2971
bc0c38d1
SR
2972static int s_show(struct seq_file *m, void *v)
2973{
2974 struct trace_iterator *iter = v;
a63ce5b3 2975 int ret;
bc0c38d1
SR
2976
2977 if (iter->ent == NULL) {
2978 if (iter->tr) {
2979 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2980 seq_puts(m, "#\n");
e0a413f6 2981 test_ftrace_alive(m);
bc0c38d1 2982 }
d8741e2e
SRRH
2983 if (iter->snapshot && trace_empty(iter))
2984 print_snapshot_help(m, iter);
2985 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2986 iter->trace->print_header(m);
62b915f1
JO
2987 else
2988 trace_default_header(m);
2989
a63ce5b3
SR
2990 } else if (iter->leftover) {
2991 /*
2992 * If we filled the seq_file buffer earlier, we
2993 * want to just show it now.
2994 */
2995 ret = trace_print_seq(m, &iter->seq);
2996
2997 /* ret should this time be zero, but you never know */
2998 iter->leftover = ret;
2999
bc0c38d1 3000 } else {
f9896bf3 3001 print_trace_line(iter);
a63ce5b3
SR
3002 ret = trace_print_seq(m, &iter->seq);
3003 /*
3004 * If we overflow the seq_file buffer, then it will
3005 * ask us for this data again at start up.
3006 * Use that instead.
3007 * ret is 0 if seq_file write succeeded.
3008 * -1 otherwise.
3009 */
3010 iter->leftover = ret;
bc0c38d1
SR
3011 }
3012
3013 return 0;
3014}
3015
649e9c70
ON
3016/*
3017 * Should be used after trace_array_get(), trace_types_lock
3018 * ensures that i_cdev was already initialized.
3019 */
3020static inline int tracing_get_cpu(struct inode *inode)
3021{
3022 if (inode->i_cdev) /* See trace_create_cpu_file() */
3023 return (long)inode->i_cdev - 1;
3024 return RING_BUFFER_ALL_CPUS;
3025}
3026
88e9d34c 3027static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3028 .start = s_start,
3029 .next = s_next,
3030 .stop = s_stop,
3031 .show = s_show,
bc0c38d1
SR
3032};
3033
e309b41d 3034static struct trace_iterator *
6484c71c 3035__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3036{
6484c71c 3037 struct trace_array *tr = inode->i_private;
bc0c38d1 3038 struct trace_iterator *iter;
50e18b94 3039 int cpu;
bc0c38d1 3040
85a2f9b4
SR
3041 if (tracing_disabled)
3042 return ERR_PTR(-ENODEV);
60a11774 3043
50e18b94 3044 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3045 if (!iter)
3046 return ERR_PTR(-ENOMEM);
bc0c38d1 3047
72917235 3048 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3049 GFP_KERNEL);
93574fcc
DC
3050 if (!iter->buffer_iter)
3051 goto release;
3052
d7350c3f
FW
3053 /*
3054 * We make a copy of the current tracer to avoid concurrent
3055 * changes on it while we are reading.
3056 */
bc0c38d1 3057 mutex_lock(&trace_types_lock);
d7350c3f 3058 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3059 if (!iter->trace)
d7350c3f 3060 goto fail;
85a2f9b4 3061
2b6080f2 3062 *iter->trace = *tr->current_trace;
d7350c3f 3063
79f55997 3064 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3065 goto fail;
3066
12883efb
SRRH
3067 iter->tr = tr;
3068
3069#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3070 /* Currently only the top directory has a snapshot */
3071 if (tr->current_trace->print_max || snapshot)
12883efb 3072 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3073 else
12883efb
SRRH
3074#endif
3075 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3076 iter->snapshot = snapshot;
bc0c38d1 3077 iter->pos = -1;
6484c71c 3078 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3079 mutex_init(&iter->mutex);
bc0c38d1 3080
8bba1bf5
MM
3081 /* Notify the tracer early; before we stop tracing. */
3082 if (iter->trace && iter->trace->open)
a93751ca 3083 iter->trace->open(iter);
8bba1bf5 3084
12ef7d44 3085 /* Annotate start of buffers if we had overruns */
12883efb 3086 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3087 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3088
8be0709f 3089 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3090 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3091 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3092
debdd57f
HT
3093 /* stop the trace while dumping if we are not opening "snapshot" */
3094 if (!iter->snapshot)
2b6080f2 3095 tracing_stop_tr(tr);
2f26ebd5 3096
ae3b5093 3097 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3098 for_each_tracing_cpu(cpu) {
b04cc6b1 3099 iter->buffer_iter[cpu] =
12883efb 3100 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3101 }
3102 ring_buffer_read_prepare_sync();
3103 for_each_tracing_cpu(cpu) {
3104 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3105 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3106 }
3107 } else {
3108 cpu = iter->cpu_file;
3928a8a2 3109 iter->buffer_iter[cpu] =
12883efb 3110 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3111 ring_buffer_read_prepare_sync();
3112 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3113 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3114 }
3115
bc0c38d1
SR
3116 mutex_unlock(&trace_types_lock);
3117
bc0c38d1 3118 return iter;
3928a8a2 3119
d7350c3f 3120 fail:
3928a8a2 3121 mutex_unlock(&trace_types_lock);
d7350c3f 3122 kfree(iter->trace);
6d158a81 3123 kfree(iter->buffer_iter);
93574fcc 3124release:
50e18b94
JO
3125 seq_release_private(inode, file);
3126 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3127}
3128
3129int tracing_open_generic(struct inode *inode, struct file *filp)
3130{
60a11774
SR
3131 if (tracing_disabled)
3132 return -ENODEV;
3133
bc0c38d1
SR
3134 filp->private_data = inode->i_private;
3135 return 0;
3136}
3137
2e86421d
GB
3138bool tracing_is_disabled(void)
3139{
3140 return (tracing_disabled) ? true: false;
3141}
3142
7b85af63
SRRH
3143/*
3144 * Open and update trace_array ref count.
3145 * Must have the current trace_array passed to it.
3146 */
dcc30223 3147static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3148{
3149 struct trace_array *tr = inode->i_private;
3150
3151 if (tracing_disabled)
3152 return -ENODEV;
3153
3154 if (trace_array_get(tr) < 0)
3155 return -ENODEV;
3156
3157 filp->private_data = inode->i_private;
3158
3159 return 0;
7b85af63
SRRH
3160}
3161
4fd27358 3162static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3163{
6484c71c 3164 struct trace_array *tr = inode->i_private;
907f2784 3165 struct seq_file *m = file->private_data;
4acd4d00 3166 struct trace_iterator *iter;
3928a8a2 3167 int cpu;
bc0c38d1 3168
ff451961 3169 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3170 trace_array_put(tr);
4acd4d00 3171 return 0;
ff451961 3172 }
4acd4d00 3173
6484c71c 3174 /* Writes do not use seq_file */
4acd4d00 3175 iter = m->private;
bc0c38d1 3176 mutex_lock(&trace_types_lock);
a695cb58 3177
3928a8a2
SR
3178 for_each_tracing_cpu(cpu) {
3179 if (iter->buffer_iter[cpu])
3180 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3181 }
3182
bc0c38d1
SR
3183 if (iter->trace && iter->trace->close)
3184 iter->trace->close(iter);
3185
debdd57f
HT
3186 if (!iter->snapshot)
3187 /* reenable tracing if it was previously enabled */
2b6080f2 3188 tracing_start_tr(tr);
f77d09a3
AL
3189
3190 __trace_array_put(tr);
3191
bc0c38d1
SR
3192 mutex_unlock(&trace_types_lock);
3193
d7350c3f 3194 mutex_destroy(&iter->mutex);
b0dfa978 3195 free_cpumask_var(iter->started);
d7350c3f 3196 kfree(iter->trace);
6d158a81 3197 kfree(iter->buffer_iter);
50e18b94 3198 seq_release_private(inode, file);
ff451961 3199
bc0c38d1
SR
3200 return 0;
3201}
3202
7b85af63
SRRH
3203static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3204{
3205 struct trace_array *tr = inode->i_private;
3206
3207 trace_array_put(tr);
bc0c38d1
SR
3208 return 0;
3209}
3210
7b85af63
SRRH
3211static int tracing_single_release_tr(struct inode *inode, struct file *file)
3212{
3213 struct trace_array *tr = inode->i_private;
3214
3215 trace_array_put(tr);
3216
3217 return single_release(inode, file);
3218}
3219
bc0c38d1
SR
3220static int tracing_open(struct inode *inode, struct file *file)
3221{
6484c71c 3222 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3223 struct trace_iterator *iter;
3224 int ret = 0;
bc0c38d1 3225
ff451961
SRRH
3226 if (trace_array_get(tr) < 0)
3227 return -ENODEV;
3228
4acd4d00 3229 /* If this file was open for write, then erase contents */
6484c71c
ON
3230 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3231 int cpu = tracing_get_cpu(inode);
3232
3233 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3234 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3235 else
6484c71c 3236 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3237 }
bc0c38d1 3238
4acd4d00 3239 if (file->f_mode & FMODE_READ) {
6484c71c 3240 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3241 if (IS_ERR(iter))
3242 ret = PTR_ERR(iter);
983f938a 3243 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3244 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3245 }
ff451961
SRRH
3246
3247 if (ret < 0)
3248 trace_array_put(tr);
3249
bc0c38d1
SR
3250 return ret;
3251}
3252
607e2ea1
SRRH
3253/*
3254 * Some tracers are not suitable for instance buffers.
3255 * A tracer is always available for the global array (toplevel)
3256 * or if it explicitly states that it is.
3257 */
3258static bool
3259trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3260{
3261 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3262}
3263
3264/* Find the next tracer that this trace array may use */
3265static struct tracer *
3266get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3267{
3268 while (t && !trace_ok_for_array(t, tr))
3269 t = t->next;
3270
3271 return t;
3272}
3273
e309b41d 3274static void *
bc0c38d1
SR
3275t_next(struct seq_file *m, void *v, loff_t *pos)
3276{
607e2ea1 3277 struct trace_array *tr = m->private;
f129e965 3278 struct tracer *t = v;
bc0c38d1
SR
3279
3280 (*pos)++;
3281
3282 if (t)
607e2ea1 3283 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3284
bc0c38d1
SR
3285 return t;
3286}
3287
3288static void *t_start(struct seq_file *m, loff_t *pos)
3289{
607e2ea1 3290 struct trace_array *tr = m->private;
f129e965 3291 struct tracer *t;
bc0c38d1
SR
3292 loff_t l = 0;
3293
3294 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3295
3296 t = get_tracer_for_array(tr, trace_types);
3297 for (; t && l < *pos; t = t_next(m, t, &l))
3298 ;
bc0c38d1
SR
3299
3300 return t;
3301}
3302
3303static void t_stop(struct seq_file *m, void *p)
3304{
3305 mutex_unlock(&trace_types_lock);
3306}
3307
3308static int t_show(struct seq_file *m, void *v)
3309{
3310 struct tracer *t = v;
3311
3312 if (!t)
3313 return 0;
3314
fa6f0cc7 3315 seq_puts(m, t->name);
bc0c38d1
SR
3316 if (t->next)
3317 seq_putc(m, ' ');
3318 else
3319 seq_putc(m, '\n');
3320
3321 return 0;
3322}
3323
88e9d34c 3324static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3325 .start = t_start,
3326 .next = t_next,
3327 .stop = t_stop,
3328 .show = t_show,
bc0c38d1
SR
3329};
3330
3331static int show_traces_open(struct inode *inode, struct file *file)
3332{
607e2ea1
SRRH
3333 struct trace_array *tr = inode->i_private;
3334 struct seq_file *m;
3335 int ret;
3336
60a11774
SR
3337 if (tracing_disabled)
3338 return -ENODEV;
3339
607e2ea1
SRRH
3340 ret = seq_open(file, &show_traces_seq_ops);
3341 if (ret)
3342 return ret;
3343
3344 m = file->private_data;
3345 m->private = tr;
3346
3347 return 0;
bc0c38d1
SR
3348}
3349
4acd4d00
SR
3350static ssize_t
3351tracing_write_stub(struct file *filp, const char __user *ubuf,
3352 size_t count, loff_t *ppos)
3353{
3354 return count;
3355}
3356
098c879e 3357loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3358{
098c879e
SRRH
3359 int ret;
3360
364829b1 3361 if (file->f_mode & FMODE_READ)
098c879e 3362 ret = seq_lseek(file, offset, whence);
364829b1 3363 else
098c879e
SRRH
3364 file->f_pos = ret = 0;
3365
3366 return ret;
364829b1
SP
3367}
3368
5e2336a0 3369static const struct file_operations tracing_fops = {
4bf39a94
IM
3370 .open = tracing_open,
3371 .read = seq_read,
4acd4d00 3372 .write = tracing_write_stub,
098c879e 3373 .llseek = tracing_lseek,
4bf39a94 3374 .release = tracing_release,
bc0c38d1
SR
3375};
3376
5e2336a0 3377static const struct file_operations show_traces_fops = {
c7078de1
IM
3378 .open = show_traces_open,
3379 .read = seq_read,
3380 .release = seq_release,
b444786f 3381 .llseek = seq_lseek,
c7078de1
IM
3382};
3383
36dfe925
IM
3384/*
3385 * The tracer itself will not take this lock, but still we want
3386 * to provide a consistent cpumask to user-space:
3387 */
3388static DEFINE_MUTEX(tracing_cpumask_update_lock);
3389
3390/*
3391 * Temporary storage for the character representation of the
3392 * CPU bitmask (and one more byte for the newline):
3393 */
3394static char mask_str[NR_CPUS + 1];
3395
c7078de1
IM
3396static ssize_t
3397tracing_cpumask_read(struct file *filp, char __user *ubuf,
3398 size_t count, loff_t *ppos)
3399{
ccfe9e42 3400 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3401 int len;
c7078de1
IM
3402
3403 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3404
1a40243b
TH
3405 len = snprintf(mask_str, count, "%*pb\n",
3406 cpumask_pr_args(tr->tracing_cpumask));
3407 if (len >= count) {
36dfe925
IM
3408 count = -EINVAL;
3409 goto out_err;
3410 }
36dfe925
IM
3411 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3412
3413out_err:
c7078de1
IM
3414 mutex_unlock(&tracing_cpumask_update_lock);
3415
3416 return count;
3417}
3418
3419static ssize_t
3420tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3421 size_t count, loff_t *ppos)
3422{
ccfe9e42 3423 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3424 cpumask_var_t tracing_cpumask_new;
2b6080f2 3425 int err, cpu;
9e01c1b7
RR
3426
3427 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3428 return -ENOMEM;
c7078de1 3429
9e01c1b7 3430 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3431 if (err)
36dfe925
IM
3432 goto err_unlock;
3433
215368e8
LZ
3434 mutex_lock(&tracing_cpumask_update_lock);
3435
a5e25883 3436 local_irq_disable();
0b9b12c1 3437 arch_spin_lock(&tr->max_lock);
ab46428c 3438 for_each_tracing_cpu(cpu) {
36dfe925
IM
3439 /*
3440 * Increase/decrease the disabled counter if we are
3441 * about to flip a bit in the cpumask:
3442 */
ccfe9e42 3443 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3444 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3445 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3446 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3447 }
ccfe9e42 3448 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3449 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3450 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3451 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3452 }
3453 }
0b9b12c1 3454 arch_spin_unlock(&tr->max_lock);
a5e25883 3455 local_irq_enable();
36dfe925 3456
ccfe9e42 3457 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3458
3459 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3460 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3461
3462 return count;
36dfe925
IM
3463
3464err_unlock:
215368e8 3465 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3466
3467 return err;
c7078de1
IM
3468}
3469
5e2336a0 3470static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3471 .open = tracing_open_generic_tr,
c7078de1
IM
3472 .read = tracing_cpumask_read,
3473 .write = tracing_cpumask_write,
ccfe9e42 3474 .release = tracing_release_generic_tr,
b444786f 3475 .llseek = generic_file_llseek,
bc0c38d1
SR
3476};
3477
fdb372ed 3478static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3479{
d8e83d26 3480 struct tracer_opt *trace_opts;
2b6080f2 3481 struct trace_array *tr = m->private;
d8e83d26 3482 u32 tracer_flags;
d8e83d26 3483 int i;
adf9f195 3484
d8e83d26 3485 mutex_lock(&trace_types_lock);
2b6080f2
SR
3486 tracer_flags = tr->current_trace->flags->val;
3487 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3488
bc0c38d1 3489 for (i = 0; trace_options[i]; i++) {
983f938a 3490 if (tr->trace_flags & (1 << i))
fdb372ed 3491 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3492 else
fdb372ed 3493 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3494 }
3495
adf9f195
FW
3496 for (i = 0; trace_opts[i].name; i++) {
3497 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3498 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3499 else
fdb372ed 3500 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3501 }
d8e83d26 3502 mutex_unlock(&trace_types_lock);
adf9f195 3503
fdb372ed 3504 return 0;
bc0c38d1 3505}
bc0c38d1 3506
8c1a49ae 3507static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3508 struct tracer_flags *tracer_flags,
3509 struct tracer_opt *opts, int neg)
3510{
d39cdd20 3511 struct tracer *trace = tracer_flags->trace;
8d18eaaf 3512 int ret;
bc0c38d1 3513
8c1a49ae 3514 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3515 if (ret)
3516 return ret;
3517
3518 if (neg)
3519 tracer_flags->val &= ~opts->bit;
3520 else
3521 tracer_flags->val |= opts->bit;
3522 return 0;
bc0c38d1
SR
3523}
3524
adf9f195 3525/* Try to assign a tracer specific option */
8c1a49ae 3526static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3527{
8c1a49ae 3528 struct tracer *trace = tr->current_trace;
7770841e 3529 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3530 struct tracer_opt *opts = NULL;
8d18eaaf 3531 int i;
adf9f195 3532
7770841e
Z
3533 for (i = 0; tracer_flags->opts[i].name; i++) {
3534 opts = &tracer_flags->opts[i];
adf9f195 3535
8d18eaaf 3536 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3537 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3538 }
adf9f195 3539
8d18eaaf 3540 return -EINVAL;
adf9f195
FW
3541}
3542
613f04a0
SRRH
3543/* Some tracers require overwrite to stay enabled */
3544int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3545{
3546 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3547 return -1;
3548
3549 return 0;
3550}
3551
2b6080f2 3552int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3553{
3554 /* do nothing if flag is already set */
983f938a 3555 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
3556 return 0;
3557
3558 /* Give the tracer a chance to approve the change */
2b6080f2 3559 if (tr->current_trace->flag_changed)
bf6065b5 3560 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3561 return -EINVAL;
af4617bd
SR
3562
3563 if (enabled)
983f938a 3564 tr->trace_flags |= mask;
af4617bd 3565 else
983f938a 3566 tr->trace_flags &= ~mask;
e870e9a1
LZ
3567
3568 if (mask == TRACE_ITER_RECORD_CMD)
3569 trace_event_enable_cmd_record(enabled);
750912fa 3570
80902822 3571 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3572 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3573#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3574 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3575#endif
3576 }
81698831 3577
b9f9108c 3578 if (mask == TRACE_ITER_PRINTK) {
81698831 3579 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3580 trace_printk_control(enabled);
3581 }
613f04a0
SRRH
3582
3583 return 0;
af4617bd
SR
3584}
3585
2b6080f2 3586static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3587{
8d18eaaf 3588 char *cmp;
bc0c38d1 3589 int neg = 0;
613f04a0 3590 int ret = -ENODEV;
bc0c38d1 3591 int i;
a4d1e688 3592 size_t orig_len = strlen(option);
bc0c38d1 3593
7bcfaf54 3594 cmp = strstrip(option);
bc0c38d1 3595
8d18eaaf 3596 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3597 neg = 1;
3598 cmp += 2;
3599 }
3600
69d34da2
SRRH
3601 mutex_lock(&trace_types_lock);
3602
bc0c38d1 3603 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3604 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3605 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3606 break;
3607 }
3608 }
adf9f195
FW
3609
3610 /* If no option could be set, test the specific tracer options */
69d34da2 3611 if (!trace_options[i])
8c1a49ae 3612 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3613
3614 mutex_unlock(&trace_types_lock);
bc0c38d1 3615
a4d1e688
JW
3616 /*
3617 * If the first trailing whitespace is replaced with '\0' by strstrip,
3618 * turn it back into a space.
3619 */
3620 if (orig_len > strlen(option))
3621 option[strlen(option)] = ' ';
3622
7bcfaf54
SR
3623 return ret;
3624}
3625
a4d1e688
JW
3626static void __init apply_trace_boot_options(void)
3627{
3628 char *buf = trace_boot_options_buf;
3629 char *option;
3630
3631 while (true) {
3632 option = strsep(&buf, ",");
3633
3634 if (!option)
3635 break;
a4d1e688 3636
43ed3843
SRRH
3637 if (*option)
3638 trace_set_options(&global_trace, option);
a4d1e688
JW
3639
3640 /* Put back the comma to allow this to be called again */
3641 if (buf)
3642 *(buf - 1) = ',';
3643 }
3644}
3645
7bcfaf54
SR
3646static ssize_t
3647tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3648 size_t cnt, loff_t *ppos)
3649{
2b6080f2
SR
3650 struct seq_file *m = filp->private_data;
3651 struct trace_array *tr = m->private;
7bcfaf54 3652 char buf[64];
613f04a0 3653 int ret;
7bcfaf54
SR
3654
3655 if (cnt >= sizeof(buf))
3656 return -EINVAL;
3657
3658 if (copy_from_user(&buf, ubuf, cnt))
3659 return -EFAULT;
3660
a8dd2176
SR
3661 buf[cnt] = 0;
3662
2b6080f2 3663 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3664 if (ret < 0)
3665 return ret;
7bcfaf54 3666
cf8517cf 3667 *ppos += cnt;
bc0c38d1
SR
3668
3669 return cnt;
3670}
3671
fdb372ed
LZ
3672static int tracing_trace_options_open(struct inode *inode, struct file *file)
3673{
7b85af63 3674 struct trace_array *tr = inode->i_private;
f77d09a3 3675 int ret;
7b85af63 3676
fdb372ed
LZ
3677 if (tracing_disabled)
3678 return -ENODEV;
2b6080f2 3679
7b85af63
SRRH
3680 if (trace_array_get(tr) < 0)
3681 return -ENODEV;
3682
f77d09a3
AL
3683 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3684 if (ret < 0)
3685 trace_array_put(tr);
3686
3687 return ret;
fdb372ed
LZ
3688}
3689
5e2336a0 3690static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3691 .open = tracing_trace_options_open,
3692 .read = seq_read,
3693 .llseek = seq_lseek,
7b85af63 3694 .release = tracing_single_release_tr,
ee6bce52 3695 .write = tracing_trace_options_write,
bc0c38d1
SR
3696};
3697
7bd2f24c
IM
3698static const char readme_msg[] =
3699 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3700 "# echo 0 > tracing_on : quick way to disable tracing\n"
3701 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3702 " Important files:\n"
3703 " trace\t\t\t- The static contents of the buffer\n"
3704 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3705 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3706 " current_tracer\t- function and latency tracers\n"
3707 " available_tracers\t- list of configured tracers for current_tracer\n"
3708 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3709 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3710 " trace_clock\t\t-change the clock used to order events\n"
3711 " local: Per cpu clock but may not be synced across CPUs\n"
3712 " global: Synced across CPUs but slows tracing down.\n"
3713 " counter: Not a clock, but just an increment\n"
3714 " uptime: Jiffy counter from time of boot\n"
3715 " perf: Same clock that perf events use\n"
3716#ifdef CONFIG_X86_64
3717 " x86-tsc: TSC cycle counter\n"
3718#endif
3719 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3720 " tracing_cpumask\t- Limit which CPUs to trace\n"
3721 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3722 "\t\t\t Remove sub-buffer with rmdir\n"
3723 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3724 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3725 "\t\t\t option name\n"
939c7a4f 3726 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3727#ifdef CONFIG_DYNAMIC_FTRACE
3728 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3729 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3730 "\t\t\t functions\n"
3731 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3732 "\t modules: Can select a group via module\n"
3733 "\t Format: :mod:<module-name>\n"
3734 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3735 "\t triggers: a command to perform when function is hit\n"
3736 "\t Format: <function>:<trigger>[:count]\n"
3737 "\t trigger: traceon, traceoff\n"
3738 "\t\t enable_event:<system>:<event>\n"
3739 "\t\t disable_event:<system>:<event>\n"
22f45649 3740#ifdef CONFIG_STACKTRACE
71485c45 3741 "\t\t stacktrace\n"
22f45649
SRRH
3742#endif
3743#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3744 "\t\t snapshot\n"
22f45649 3745#endif
17a280ea
SRRH
3746 "\t\t dump\n"
3747 "\t\t cpudump\n"
71485c45
SRRH
3748 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3749 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3750 "\t The first one will disable tracing every time do_fault is hit\n"
3751 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3752 "\t The first time do trap is hit and it disables tracing, the\n"
3753 "\t counter will decrement to 2. If tracing is already disabled,\n"
3754 "\t the counter will not decrement. It only decrements when the\n"
3755 "\t trigger did work\n"
3756 "\t To remove trigger without count:\n"
3757 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3758 "\t To remove trigger with a count:\n"
3759 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3760 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3761 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3762 "\t modules: Can select a group via module command :mod:\n"
3763 "\t Does not accept triggers\n"
22f45649
SRRH
3764#endif /* CONFIG_DYNAMIC_FTRACE */
3765#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3766 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3767 "\t\t (function)\n"
22f45649
SRRH
3768#endif
3769#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3770 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3771 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3772 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3773#endif
3774#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3775 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3776 "\t\t\t snapshot buffer. Read the contents for more\n"
3777 "\t\t\t information\n"
22f45649 3778#endif
991821c8 3779#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3780 " stack_trace\t\t- Shows the max stack trace when active\n"
3781 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3782 "\t\t\t Write into this file to reset the max size (trigger a\n"
3783 "\t\t\t new trace)\n"
22f45649 3784#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3785 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3786 "\t\t\t traces\n"
22f45649 3787#endif
991821c8 3788#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3789 " events/\t\t- Directory containing all trace event subsystems:\n"
3790 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3791 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3792 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3793 "\t\t\t events\n"
26f25564 3794 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3795 " events/<system>/<event>/\t- Directory containing control files for\n"
3796 "\t\t\t <event>:\n"
26f25564
TZ
3797 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3798 " filter\t\t- If set, only events passing filter are traced\n"
3799 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3800 "\t Format: <trigger>[:count][if <filter>]\n"
3801 "\t trigger: traceon, traceoff\n"
3802 "\t enable_event:<system>:<event>\n"
3803 "\t disable_event:<system>:<event>\n"
26f25564 3804#ifdef CONFIG_STACKTRACE
71485c45 3805 "\t\t stacktrace\n"
26f25564
TZ
3806#endif
3807#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3808 "\t\t snapshot\n"
26f25564 3809#endif
71485c45
SRRH
3810 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3811 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3812 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3813 "\t events/block/block_unplug/trigger\n"
3814 "\t The first disables tracing every time block_unplug is hit.\n"
3815 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3816 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3817 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3818 "\t Like function triggers, the counter is only decremented if it\n"
3819 "\t enabled or disabled tracing.\n"
3820 "\t To remove a trigger without a count:\n"
3821 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3822 "\t To remove a trigger with a count:\n"
3823 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3824 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3825;
3826
3827static ssize_t
3828tracing_readme_read(struct file *filp, char __user *ubuf,
3829 size_t cnt, loff_t *ppos)
3830{
3831 return simple_read_from_buffer(ubuf, cnt, ppos,
3832 readme_msg, strlen(readme_msg));
3833}
3834
5e2336a0 3835static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3836 .open = tracing_open_generic,
3837 .read = tracing_readme_read,
b444786f 3838 .llseek = generic_file_llseek,
7bd2f24c
IM
3839};
3840
42584c81
YY
3841static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3842{
3843 unsigned int *ptr = v;
69abe6a5 3844
42584c81
YY
3845 if (*pos || m->count)
3846 ptr++;
69abe6a5 3847
42584c81 3848 (*pos)++;
69abe6a5 3849
939c7a4f
YY
3850 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3851 ptr++) {
42584c81
YY
3852 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3853 continue;
69abe6a5 3854
42584c81
YY
3855 return ptr;
3856 }
69abe6a5 3857
42584c81
YY
3858 return NULL;
3859}
3860
3861static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3862{
3863 void *v;
3864 loff_t l = 0;
69abe6a5 3865
4c27e756
SRRH
3866 preempt_disable();
3867 arch_spin_lock(&trace_cmdline_lock);
3868
939c7a4f 3869 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3870 while (l <= *pos) {
3871 v = saved_cmdlines_next(m, v, &l);
3872 if (!v)
3873 return NULL;
69abe6a5
AP
3874 }
3875
42584c81
YY
3876 return v;
3877}
3878
3879static void saved_cmdlines_stop(struct seq_file *m, void *v)
3880{
4c27e756
SRRH
3881 arch_spin_unlock(&trace_cmdline_lock);
3882 preempt_enable();
42584c81 3883}
69abe6a5 3884
42584c81
YY
3885static int saved_cmdlines_show(struct seq_file *m, void *v)
3886{
3887 char buf[TASK_COMM_LEN];
3888 unsigned int *pid = v;
69abe6a5 3889
4c27e756 3890 __trace_find_cmdline(*pid, buf);
42584c81
YY
3891 seq_printf(m, "%d %s\n", *pid, buf);
3892 return 0;
3893}
3894
3895static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3896 .start = saved_cmdlines_start,
3897 .next = saved_cmdlines_next,
3898 .stop = saved_cmdlines_stop,
3899 .show = saved_cmdlines_show,
3900};
3901
3902static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3903{
3904 if (tracing_disabled)
3905 return -ENODEV;
3906
3907 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3908}
3909
3910static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3911 .open = tracing_saved_cmdlines_open,
3912 .read = seq_read,
3913 .llseek = seq_lseek,
3914 .release = seq_release,
69abe6a5
AP
3915};
3916
939c7a4f
YY
3917static ssize_t
3918tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3919 size_t cnt, loff_t *ppos)
3920{
3921 char buf[64];
3922 int r;
3923
3924 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3925 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3926 arch_spin_unlock(&trace_cmdline_lock);
3927
3928 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3929}
3930
3931static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3932{
3933 kfree(s->saved_cmdlines);
3934 kfree(s->map_cmdline_to_pid);
3935 kfree(s);
3936}
3937
3938static int tracing_resize_saved_cmdlines(unsigned int val)
3939{
3940 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3941
a6af8fbf 3942 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3943 if (!s)
3944 return -ENOMEM;
3945
3946 if (allocate_cmdlines_buffer(val, s) < 0) {
3947 kfree(s);
3948 return -ENOMEM;
3949 }
3950
3951 arch_spin_lock(&trace_cmdline_lock);
3952 savedcmd_temp = savedcmd;
3953 savedcmd = s;
3954 arch_spin_unlock(&trace_cmdline_lock);
3955 free_saved_cmdlines_buffer(savedcmd_temp);
3956
3957 return 0;
3958}
3959
3960static ssize_t
3961tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3962 size_t cnt, loff_t *ppos)
3963{
3964 unsigned long val;
3965 int ret;
3966
3967 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3968 if (ret)
3969 return ret;
3970
3971 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3972 if (!val || val > PID_MAX_DEFAULT)
3973 return -EINVAL;
3974
3975 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3976 if (ret < 0)
3977 return ret;
3978
3979 *ppos += cnt;
3980
3981 return cnt;
3982}
3983
3984static const struct file_operations tracing_saved_cmdlines_size_fops = {
3985 .open = tracing_open_generic,
3986 .read = tracing_saved_cmdlines_size_read,
3987 .write = tracing_saved_cmdlines_size_write,
3988};
3989
9828413d
SRRH
3990#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3991static union trace_enum_map_item *
3992update_enum_map(union trace_enum_map_item *ptr)
3993{
3994 if (!ptr->map.enum_string) {
3995 if (ptr->tail.next) {
3996 ptr = ptr->tail.next;
3997 /* Set ptr to the next real item (skip head) */
3998 ptr++;
3999 } else
4000 return NULL;
4001 }
4002 return ptr;
4003}
4004
4005static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4006{
4007 union trace_enum_map_item *ptr = v;
4008
4009 /*
4010 * Paranoid! If ptr points to end, we don't want to increment past it.
4011 * This really should never happen.
4012 */
4013 ptr = update_enum_map(ptr);
4014 if (WARN_ON_ONCE(!ptr))
4015 return NULL;
4016
4017 ptr++;
4018
4019 (*pos)++;
4020
4021 ptr = update_enum_map(ptr);
4022
4023 return ptr;
4024}
4025
4026static void *enum_map_start(struct seq_file *m, loff_t *pos)
4027{
4028 union trace_enum_map_item *v;
4029 loff_t l = 0;
4030
4031 mutex_lock(&trace_enum_mutex);
4032
4033 v = trace_enum_maps;
4034 if (v)
4035 v++;
4036
4037 while (v && l < *pos) {
4038 v = enum_map_next(m, v, &l);
4039 }
4040
4041 return v;
4042}
4043
4044static void enum_map_stop(struct seq_file *m, void *v)
4045{
4046 mutex_unlock(&trace_enum_mutex);
4047}
4048
4049static int enum_map_show(struct seq_file *m, void *v)
4050{
4051 union trace_enum_map_item *ptr = v;
4052
4053 seq_printf(m, "%s %ld (%s)\n",
4054 ptr->map.enum_string, ptr->map.enum_value,
4055 ptr->map.system);
4056
4057 return 0;
4058}
4059
4060static const struct seq_operations tracing_enum_map_seq_ops = {
4061 .start = enum_map_start,
4062 .next = enum_map_next,
4063 .stop = enum_map_stop,
4064 .show = enum_map_show,
4065};
4066
4067static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4068{
4069 if (tracing_disabled)
4070 return -ENODEV;
4071
4072 return seq_open(filp, &tracing_enum_map_seq_ops);
4073}
4074
4075static const struct file_operations tracing_enum_map_fops = {
4076 .open = tracing_enum_map_open,
4077 .read = seq_read,
4078 .llseek = seq_lseek,
4079 .release = seq_release,
4080};
4081
4082static inline union trace_enum_map_item *
4083trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4084{
4085 /* Return tail of array given the head */
4086 return ptr + ptr->head.length + 1;
4087}
4088
4089static void
4090trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4091 int len)
4092{
4093 struct trace_enum_map **stop;
4094 struct trace_enum_map **map;
4095 union trace_enum_map_item *map_array;
4096 union trace_enum_map_item *ptr;
4097
4098 stop = start + len;
4099
4100 /*
4101 * The trace_enum_maps contains the map plus a head and tail item,
4102 * where the head holds the module and length of array, and the
4103 * tail holds a pointer to the next list.
4104 */
4105 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4106 if (!map_array) {
4107 pr_warning("Unable to allocate trace enum mapping\n");
4108 return;
4109 }
4110
4111 mutex_lock(&trace_enum_mutex);
4112
4113 if (!trace_enum_maps)
4114 trace_enum_maps = map_array;
4115 else {
4116 ptr = trace_enum_maps;
4117 for (;;) {
4118 ptr = trace_enum_jmp_to_tail(ptr);
4119 if (!ptr->tail.next)
4120 break;
4121 ptr = ptr->tail.next;
4122
4123 }
4124 ptr->tail.next = map_array;
4125 }
4126 map_array->head.mod = mod;
4127 map_array->head.length = len;
4128 map_array++;
4129
4130 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4131 map_array->map = **map;
4132 map_array++;
4133 }
4134 memset(map_array, 0, sizeof(*map_array));
4135
4136 mutex_unlock(&trace_enum_mutex);
4137}
4138
4139static void trace_create_enum_file(struct dentry *d_tracer)
4140{
4141 trace_create_file("enum_map", 0444, d_tracer,
4142 NULL, &tracing_enum_map_fops);
4143}
4144
4145#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4146static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4147static inline void trace_insert_enum_map_file(struct module *mod,
4148 struct trace_enum_map **start, int len) { }
4149#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4150
4151static void trace_insert_enum_map(struct module *mod,
4152 struct trace_enum_map **start, int len)
0c564a53
SRRH
4153{
4154 struct trace_enum_map **map;
0c564a53
SRRH
4155
4156 if (len <= 0)
4157 return;
4158
4159 map = start;
4160
4161 trace_event_enum_update(map, len);
9828413d
SRRH
4162
4163 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4164}
4165
bc0c38d1
SR
4166static ssize_t
4167tracing_set_trace_read(struct file *filp, char __user *ubuf,
4168 size_t cnt, loff_t *ppos)
4169{
2b6080f2 4170 struct trace_array *tr = filp->private_data;
ee6c2c1b 4171 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4172 int r;
4173
4174 mutex_lock(&trace_types_lock);
2b6080f2 4175 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4176 mutex_unlock(&trace_types_lock);
4177
4bf39a94 4178 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4179}
4180
b6f11df2
ACM
4181int tracer_init(struct tracer *t, struct trace_array *tr)
4182{
12883efb 4183 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4184 return t->init(tr);
4185}
4186
12883efb 4187static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4188{
4189 int cpu;
737223fb 4190
438ced17 4191 for_each_tracing_cpu(cpu)
12883efb 4192 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4193}
4194
12883efb 4195#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4196/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4197static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4198 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4199{
4200 int cpu, ret = 0;
4201
4202 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4203 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4204 ret = ring_buffer_resize(trace_buf->buffer,
4205 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4206 if (ret < 0)
4207 break;
12883efb
SRRH
4208 per_cpu_ptr(trace_buf->data, cpu)->entries =
4209 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4210 }
4211 } else {
12883efb
SRRH
4212 ret = ring_buffer_resize(trace_buf->buffer,
4213 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4214 if (ret == 0)
12883efb
SRRH
4215 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4216 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4217 }
4218
4219 return ret;
4220}
12883efb 4221#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4222
2b6080f2
SR
4223static int __tracing_resize_ring_buffer(struct trace_array *tr,
4224 unsigned long size, int cpu)
73c5162a
SR
4225{
4226 int ret;
4227
4228 /*
4229 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4230 * we use the size that was given, and we can forget about
4231 * expanding it later.
73c5162a 4232 */
55034cd6 4233 ring_buffer_expanded = true;
73c5162a 4234
b382ede6 4235 /* May be called before buffers are initialized */
12883efb 4236 if (!tr->trace_buffer.buffer)
b382ede6
SR
4237 return 0;
4238
12883efb 4239 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4240 if (ret < 0)
4241 return ret;
4242
12883efb 4243#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4244 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4245 !tr->current_trace->use_max_tr)
ef710e10
KM
4246 goto out;
4247
12883efb 4248 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4249 if (ret < 0) {
12883efb
SRRH
4250 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4251 &tr->trace_buffer, cpu);
73c5162a 4252 if (r < 0) {
a123c52b
SR
4253 /*
4254 * AARGH! We are left with different
4255 * size max buffer!!!!
4256 * The max buffer is our "snapshot" buffer.
4257 * When a tracer needs a snapshot (one of the
4258 * latency tracers), it swaps the max buffer
4259 * with the saved snap shot. We succeeded to
4260 * update the size of the main buffer, but failed to
4261 * update the size of the max buffer. But when we tried
4262 * to reset the main buffer to the original size, we
4263 * failed there too. This is very unlikely to
4264 * happen, but if it does, warn and kill all
4265 * tracing.
4266 */
73c5162a
SR
4267 WARN_ON(1);
4268 tracing_disabled = 1;
4269 }
4270 return ret;
4271 }
4272
438ced17 4273 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4274 set_buffer_entries(&tr->max_buffer, size);
438ced17 4275 else
12883efb 4276 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4277
ef710e10 4278 out:
12883efb
SRRH
4279#endif /* CONFIG_TRACER_MAX_TRACE */
4280
438ced17 4281 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4282 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4283 else
12883efb 4284 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4285
4286 return ret;
4287}
4288
2b6080f2
SR
4289static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4290 unsigned long size, int cpu_id)
4f271a2a 4291{
83f40318 4292 int ret = size;
4f271a2a
VN
4293
4294 mutex_lock(&trace_types_lock);
4295
438ced17
VN
4296 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4297 /* make sure, this cpu is enabled in the mask */
4298 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4299 ret = -EINVAL;
4300 goto out;
4301 }
4302 }
4f271a2a 4303
2b6080f2 4304 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4305 if (ret < 0)
4306 ret = -ENOMEM;
4307
438ced17 4308out:
4f271a2a
VN
4309 mutex_unlock(&trace_types_lock);
4310
4311 return ret;
4312}
4313
ef710e10 4314
1852fcce
SR
4315/**
4316 * tracing_update_buffers - used by tracing facility to expand ring buffers
4317 *
4318 * To save on memory when the tracing is never used on a system with it
4319 * configured in. The ring buffers are set to a minimum size. But once
4320 * a user starts to use the tracing facility, then they need to grow
4321 * to their default size.
4322 *
4323 * This function is to be called when a tracer is about to be used.
4324 */
4325int tracing_update_buffers(void)
4326{
4327 int ret = 0;
4328
1027fcb2 4329 mutex_lock(&trace_types_lock);
1852fcce 4330 if (!ring_buffer_expanded)
2b6080f2 4331 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4332 RING_BUFFER_ALL_CPUS);
1027fcb2 4333 mutex_unlock(&trace_types_lock);
1852fcce
SR
4334
4335 return ret;
4336}
4337
577b785f
SR
4338struct trace_option_dentry;
4339
37aea98b 4340static void
2b6080f2 4341create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4342
6b450d25
SRRH
4343/*
4344 * Used to clear out the tracer before deletion of an instance.
4345 * Must have trace_types_lock held.
4346 */
4347static void tracing_set_nop(struct trace_array *tr)
4348{
4349 if (tr->current_trace == &nop_trace)
4350 return;
4351
50512ab5 4352 tr->current_trace->enabled--;
6b450d25
SRRH
4353
4354 if (tr->current_trace->reset)
4355 tr->current_trace->reset(tr);
4356
4357 tr->current_trace = &nop_trace;
4358}
4359
41d9c0be 4360static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4361{
09d23a1d
SRRH
4362 /* Only enable if the directory has been created already. */
4363 if (!tr->dir)
4364 return;
4365
37aea98b 4366 create_trace_option_files(tr, t);
09d23a1d
SRRH
4367}
4368
4369static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4370{
bc0c38d1 4371 struct tracer *t;
12883efb 4372#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4373 bool had_max_tr;
12883efb 4374#endif
d9e54076 4375 int ret = 0;
bc0c38d1 4376
1027fcb2
SR
4377 mutex_lock(&trace_types_lock);
4378
73c5162a 4379 if (!ring_buffer_expanded) {
2b6080f2 4380 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4381 RING_BUFFER_ALL_CPUS);
73c5162a 4382 if (ret < 0)
59f586db 4383 goto out;
73c5162a
SR
4384 ret = 0;
4385 }
4386
bc0c38d1
SR
4387 for (t = trace_types; t; t = t->next) {
4388 if (strcmp(t->name, buf) == 0)
4389 break;
4390 }
c2931e05
FW
4391 if (!t) {
4392 ret = -EINVAL;
4393 goto out;
4394 }
2b6080f2 4395 if (t == tr->current_trace)
bc0c38d1
SR
4396 goto out;
4397
607e2ea1
SRRH
4398 /* Some tracers are only allowed for the top level buffer */
4399 if (!trace_ok_for_array(t, tr)) {
4400 ret = -EINVAL;
4401 goto out;
4402 }
4403
cf6ab6d9
SRRH
4404 /* If trace pipe files are being read, we can't change the tracer */
4405 if (tr->current_trace->ref) {
4406 ret = -EBUSY;
4407 goto out;
4408 }
4409
9f029e83 4410 trace_branch_disable();
613f04a0 4411
50512ab5 4412 tr->current_trace->enabled--;
613f04a0 4413
2b6080f2
SR
4414 if (tr->current_trace->reset)
4415 tr->current_trace->reset(tr);
34600f0e 4416
12883efb 4417 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4418 tr->current_trace = &nop_trace;
34600f0e 4419
45ad21ca
SRRH
4420#ifdef CONFIG_TRACER_MAX_TRACE
4421 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4422
4423 if (had_max_tr && !t->use_max_tr) {
4424 /*
4425 * We need to make sure that the update_max_tr sees that
4426 * current_trace changed to nop_trace to keep it from
4427 * swapping the buffers after we resize it.
4428 * The update_max_tr is called from interrupts disabled
4429 * so a synchronized_sched() is sufficient.
4430 */
4431 synchronize_sched();
3209cff4 4432 free_snapshot(tr);
ef710e10 4433 }
12883efb 4434#endif
12883efb
SRRH
4435
4436#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4437 if (t->use_max_tr && !had_max_tr) {
3209cff4 4438 ret = alloc_snapshot(tr);
d60da506
HT
4439 if (ret < 0)
4440 goto out;
ef710e10 4441 }
12883efb 4442#endif
577b785f 4443
1c80025a 4444 if (t->init) {
b6f11df2 4445 ret = tracer_init(t, tr);
1c80025a
FW
4446 if (ret)
4447 goto out;
4448 }
bc0c38d1 4449
2b6080f2 4450 tr->current_trace = t;
50512ab5 4451 tr->current_trace->enabled++;
9f029e83 4452 trace_branch_enable(tr);
bc0c38d1
SR
4453 out:
4454 mutex_unlock(&trace_types_lock);
4455
d9e54076
PZ
4456 return ret;
4457}
4458
4459static ssize_t
4460tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4461 size_t cnt, loff_t *ppos)
4462{
607e2ea1 4463 struct trace_array *tr = filp->private_data;
ee6c2c1b 4464 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4465 int i;
4466 size_t ret;
e6e7a65a
FW
4467 int err;
4468
4469 ret = cnt;
d9e54076 4470
ee6c2c1b
LZ
4471 if (cnt > MAX_TRACER_SIZE)
4472 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4473
4474 if (copy_from_user(&buf, ubuf, cnt))
4475 return -EFAULT;
4476
4477 buf[cnt] = 0;
4478
4479 /* strip ending whitespace. */
4480 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4481 buf[i] = 0;
4482
607e2ea1 4483 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4484 if (err)
4485 return err;
d9e54076 4486
cf8517cf 4487 *ppos += ret;
bc0c38d1 4488
c2931e05 4489 return ret;
bc0c38d1
SR
4490}
4491
4492static ssize_t
6508fa76
SF
4493tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4494 size_t cnt, loff_t *ppos)
bc0c38d1 4495{
bc0c38d1
SR
4496 char buf[64];
4497 int r;
4498
cffae437 4499 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4500 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4501 if (r > sizeof(buf))
4502 r = sizeof(buf);
4bf39a94 4503 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4504}
4505
4506static ssize_t
6508fa76
SF
4507tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4508 size_t cnt, loff_t *ppos)
bc0c38d1 4509{
5e39841c 4510 unsigned long val;
c6caeeb1 4511 int ret;
bc0c38d1 4512
22fe9b54
PH
4513 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4514 if (ret)
c6caeeb1 4515 return ret;
bc0c38d1
SR
4516
4517 *ptr = val * 1000;
4518
4519 return cnt;
4520}
4521
6508fa76
SF
4522static ssize_t
4523tracing_thresh_read(struct file *filp, char __user *ubuf,
4524 size_t cnt, loff_t *ppos)
4525{
4526 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4527}
4528
4529static ssize_t
4530tracing_thresh_write(struct file *filp, const char __user *ubuf,
4531 size_t cnt, loff_t *ppos)
4532{
4533 struct trace_array *tr = filp->private_data;
4534 int ret;
4535
4536 mutex_lock(&trace_types_lock);
4537 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4538 if (ret < 0)
4539 goto out;
4540
4541 if (tr->current_trace->update_thresh) {
4542 ret = tr->current_trace->update_thresh(tr);
4543 if (ret < 0)
4544 goto out;
4545 }
4546
4547 ret = cnt;
4548out:
4549 mutex_unlock(&trace_types_lock);
4550
4551 return ret;
4552}
4553
e428abbb
CG
4554#ifdef CONFIG_TRACER_MAX_TRACE
4555
6508fa76
SF
4556static ssize_t
4557tracing_max_lat_read(struct file *filp, char __user *ubuf,
4558 size_t cnt, loff_t *ppos)
4559{
4560 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4561}
4562
4563static ssize_t
4564tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4565 size_t cnt, loff_t *ppos)
4566{
4567 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4568}
4569
e428abbb
CG
4570#endif
4571
b3806b43
SR
4572static int tracing_open_pipe(struct inode *inode, struct file *filp)
4573{
15544209 4574 struct trace_array *tr = inode->i_private;
b3806b43 4575 struct trace_iterator *iter;
b04cc6b1 4576 int ret = 0;
b3806b43
SR
4577
4578 if (tracing_disabled)
4579 return -ENODEV;
4580
7b85af63
SRRH
4581 if (trace_array_get(tr) < 0)
4582 return -ENODEV;
4583
b04cc6b1
FW
4584 mutex_lock(&trace_types_lock);
4585
b3806b43
SR
4586 /* create a buffer to store the information to pass to userspace */
4587 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4588 if (!iter) {
4589 ret = -ENOMEM;
f77d09a3 4590 __trace_array_put(tr);
b04cc6b1
FW
4591 goto out;
4592 }
b3806b43 4593
3a161d99 4594 trace_seq_init(&iter->seq);
d716ff71 4595 iter->trace = tr->current_trace;
d7350c3f 4596
4462344e 4597 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4598 ret = -ENOMEM;
d7350c3f 4599 goto fail;
4462344e
RR
4600 }
4601
a309720c 4602 /* trace pipe does not show start of buffer */
4462344e 4603 cpumask_setall(iter->started);
a309720c 4604
983f938a 4605 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
4606 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4607
8be0709f 4608 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4609 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4610 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4611
15544209
ON
4612 iter->tr = tr;
4613 iter->trace_buffer = &tr->trace_buffer;
4614 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4615 mutex_init(&iter->mutex);
b3806b43
SR
4616 filp->private_data = iter;
4617
107bad8b
SR
4618 if (iter->trace->pipe_open)
4619 iter->trace->pipe_open(iter);
107bad8b 4620
b444786f 4621 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4622
4623 tr->current_trace->ref++;
b04cc6b1
FW
4624out:
4625 mutex_unlock(&trace_types_lock);
4626 return ret;
d7350c3f
FW
4627
4628fail:
4629 kfree(iter->trace);
4630 kfree(iter);
7b85af63 4631 __trace_array_put(tr);
d7350c3f
FW
4632 mutex_unlock(&trace_types_lock);
4633 return ret;
b3806b43
SR
4634}
4635
4636static int tracing_release_pipe(struct inode *inode, struct file *file)
4637{
4638 struct trace_iterator *iter = file->private_data;
15544209 4639 struct trace_array *tr = inode->i_private;
b3806b43 4640
b04cc6b1
FW
4641 mutex_lock(&trace_types_lock);
4642
cf6ab6d9
SRRH
4643 tr->current_trace->ref--;
4644
29bf4a5e 4645 if (iter->trace->pipe_close)
c521efd1
SR
4646 iter->trace->pipe_close(iter);
4647
b04cc6b1
FW
4648 mutex_unlock(&trace_types_lock);
4649
4462344e 4650 free_cpumask_var(iter->started);
d7350c3f 4651 mutex_destroy(&iter->mutex);
b3806b43 4652 kfree(iter);
b3806b43 4653
7b85af63
SRRH
4654 trace_array_put(tr);
4655
b3806b43
SR
4656 return 0;
4657}
4658
2a2cc8f7 4659static unsigned int
cc60cdc9 4660trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4661{
983f938a
SRRH
4662 struct trace_array *tr = iter->tr;
4663
15693458
SRRH
4664 /* Iterators are static, they should be filled or empty */
4665 if (trace_buffer_iter(iter, iter->cpu_file))
4666 return POLLIN | POLLRDNORM;
2a2cc8f7 4667
983f938a 4668 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4669 /*
4670 * Always select as readable when in blocking mode
4671 */
4672 return POLLIN | POLLRDNORM;
15693458 4673 else
12883efb 4674 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4675 filp, poll_table);
2a2cc8f7 4676}
2a2cc8f7 4677
cc60cdc9
SR
4678static unsigned int
4679tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4680{
4681 struct trace_iterator *iter = filp->private_data;
4682
4683 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4684}
4685
d716ff71 4686/* Must be called with iter->mutex held. */
ff98781b 4687static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4688{
4689 struct trace_iterator *iter = filp->private_data;
8b8b3683 4690 int ret;
b3806b43 4691
b3806b43 4692 while (trace_empty(iter)) {
2dc8f095 4693
107bad8b 4694 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4695 return -EAGAIN;
107bad8b 4696 }
2dc8f095 4697
b3806b43 4698 /*
250bfd3d 4699 * We block until we read something and tracing is disabled.
b3806b43
SR
4700 * We still block if tracing is disabled, but we have never
4701 * read anything. This allows a user to cat this file, and
4702 * then enable tracing. But after we have read something,
4703 * we give an EOF when tracing is again disabled.
4704 *
4705 * iter->pos will be 0 if we haven't read anything.
4706 */
10246fa3 4707 if (!tracing_is_on() && iter->pos)
b3806b43 4708 break;
f4874261
SRRH
4709
4710 mutex_unlock(&iter->mutex);
4711
e30f53aa 4712 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4713
4714 mutex_lock(&iter->mutex);
4715
8b8b3683
SRRH
4716 if (ret)
4717 return ret;
b3806b43
SR
4718 }
4719
ff98781b
EGM
4720 return 1;
4721}
4722
4723/*
4724 * Consumer reader.
4725 */
4726static ssize_t
4727tracing_read_pipe(struct file *filp, char __user *ubuf,
4728 size_t cnt, loff_t *ppos)
4729{
4730 struct trace_iterator *iter = filp->private_data;
4731 ssize_t sret;
4732
4733 /* return any leftover data */
4734 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4735 if (sret != -EBUSY)
4736 return sret;
4737
f9520750 4738 trace_seq_init(&iter->seq);
ff98781b 4739
d7350c3f
FW
4740 /*
4741 * Avoid more than one consumer on a single file descriptor
4742 * This is just a matter of traces coherency, the ring buffer itself
4743 * is protected.
4744 */
4745 mutex_lock(&iter->mutex);
ff98781b
EGM
4746 if (iter->trace->read) {
4747 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4748 if (sret)
4749 goto out;
4750 }
4751
4752waitagain:
4753 sret = tracing_wait_pipe(filp);
4754 if (sret <= 0)
4755 goto out;
4756
b3806b43 4757 /* stop when tracing is finished */
ff98781b
EGM
4758 if (trace_empty(iter)) {
4759 sret = 0;
107bad8b 4760 goto out;
ff98781b 4761 }
b3806b43
SR
4762
4763 if (cnt >= PAGE_SIZE)
4764 cnt = PAGE_SIZE - 1;
4765
53d0aa77 4766 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4767 memset(&iter->seq, 0,
4768 sizeof(struct trace_iterator) -
4769 offsetof(struct trace_iterator, seq));
ed5467da 4770 cpumask_clear(iter->started);
4823ed7e 4771 iter->pos = -1;
b3806b43 4772
4f535968 4773 trace_event_read_lock();
7e53bd42 4774 trace_access_lock(iter->cpu_file);
955b61e5 4775 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4776 enum print_line_t ret;
5ac48378 4777 int save_len = iter->seq.seq.len;
088b1e42 4778
f9896bf3 4779 ret = print_trace_line(iter);
2c4f035f 4780 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4781 /* don't print partial lines */
5ac48378 4782 iter->seq.seq.len = save_len;
b3806b43 4783 break;
088b1e42 4784 }
b91facc3
FW
4785 if (ret != TRACE_TYPE_NO_CONSUME)
4786 trace_consume(iter);
b3806b43 4787
5ac48378 4788 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4789 break;
ee5e51f5
JO
4790
4791 /*
4792 * Setting the full flag means we reached the trace_seq buffer
4793 * size and we should leave by partial output condition above.
4794 * One of the trace_seq_* functions is not used properly.
4795 */
4796 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4797 iter->ent->type);
b3806b43 4798 }
7e53bd42 4799 trace_access_unlock(iter->cpu_file);
4f535968 4800 trace_event_read_unlock();
b3806b43 4801
b3806b43 4802 /* Now copy what we have to the user */
6c6c2796 4803 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4804 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4805 trace_seq_init(&iter->seq);
9ff4b974
PP
4806
4807 /*
25985edc 4808 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4809 * entries, go back to wait for more entries.
4810 */
6c6c2796 4811 if (sret == -EBUSY)
9ff4b974 4812 goto waitagain;
b3806b43 4813
107bad8b 4814out:
d7350c3f 4815 mutex_unlock(&iter->mutex);
107bad8b 4816
6c6c2796 4817 return sret;
b3806b43
SR
4818}
4819
3c56819b
EGM
4820static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4821 unsigned int idx)
4822{
4823 __free_page(spd->pages[idx]);
4824}
4825
28dfef8f 4826static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4827 .can_merge = 0,
34cd4998 4828 .confirm = generic_pipe_buf_confirm,
92fdd98c 4829 .release = generic_pipe_buf_release,
34cd4998
SR
4830 .steal = generic_pipe_buf_steal,
4831 .get = generic_pipe_buf_get,
3c56819b
EGM
4832};
4833
34cd4998 4834static size_t
fa7c7f6e 4835tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4836{
4837 size_t count;
74f06bb7 4838 int save_len;
34cd4998
SR
4839 int ret;
4840
4841 /* Seq buffer is page-sized, exactly what we need. */
4842 for (;;) {
74f06bb7 4843 save_len = iter->seq.seq.len;
34cd4998 4844 ret = print_trace_line(iter);
74f06bb7
SRRH
4845
4846 if (trace_seq_has_overflowed(&iter->seq)) {
4847 iter->seq.seq.len = save_len;
34cd4998
SR
4848 break;
4849 }
74f06bb7
SRRH
4850
4851 /*
4852 * This should not be hit, because it should only
4853 * be set if the iter->seq overflowed. But check it
4854 * anyway to be safe.
4855 */
34cd4998 4856 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4857 iter->seq.seq.len = save_len;
4858 break;
4859 }
4860
5ac48378 4861 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4862 if (rem < count) {
4863 rem = 0;
4864 iter->seq.seq.len = save_len;
34cd4998
SR
4865 break;
4866 }
4867
74e7ff8c
LJ
4868 if (ret != TRACE_TYPE_NO_CONSUME)
4869 trace_consume(iter);
34cd4998 4870 rem -= count;
955b61e5 4871 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4872 rem = 0;
4873 iter->ent = NULL;
4874 break;
4875 }
4876 }
4877
4878 return rem;
4879}
4880
3c56819b
EGM
4881static ssize_t tracing_splice_read_pipe(struct file *filp,
4882 loff_t *ppos,
4883 struct pipe_inode_info *pipe,
4884 size_t len,
4885 unsigned int flags)
4886{
35f3d14d
JA
4887 struct page *pages_def[PIPE_DEF_BUFFERS];
4888 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4889 struct trace_iterator *iter = filp->private_data;
4890 struct splice_pipe_desc spd = {
35f3d14d
JA
4891 .pages = pages_def,
4892 .partial = partial_def,
34cd4998 4893 .nr_pages = 0, /* This gets updated below. */
047fe360 4894 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4895 .flags = flags,
4896 .ops = &tracing_pipe_buf_ops,
4897 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4898 };
4899 ssize_t ret;
34cd4998 4900 size_t rem;
3c56819b
EGM
4901 unsigned int i;
4902
35f3d14d
JA
4903 if (splice_grow_spd(pipe, &spd))
4904 return -ENOMEM;
4905
d7350c3f 4906 mutex_lock(&iter->mutex);
3c56819b
EGM
4907
4908 if (iter->trace->splice_read) {
4909 ret = iter->trace->splice_read(iter, filp,
4910 ppos, pipe, len, flags);
4911 if (ret)
34cd4998 4912 goto out_err;
3c56819b
EGM
4913 }
4914
4915 ret = tracing_wait_pipe(filp);
4916 if (ret <= 0)
34cd4998 4917 goto out_err;
3c56819b 4918
955b61e5 4919 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4920 ret = -EFAULT;
34cd4998 4921 goto out_err;
3c56819b
EGM
4922 }
4923
4f535968 4924 trace_event_read_lock();
7e53bd42 4925 trace_access_lock(iter->cpu_file);
4f535968 4926
3c56819b 4927 /* Fill as many pages as possible. */
a786c06d 4928 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4929 spd.pages[i] = alloc_page(GFP_KERNEL);
4930 if (!spd.pages[i])
34cd4998 4931 break;
3c56819b 4932
fa7c7f6e 4933 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4934
4935 /* Copy the data into the page, so we can start over. */
4936 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4937 page_address(spd.pages[i]),
5ac48378 4938 trace_seq_used(&iter->seq));
3c56819b 4939 if (ret < 0) {
35f3d14d 4940 __free_page(spd.pages[i]);
3c56819b
EGM
4941 break;
4942 }
35f3d14d 4943 spd.partial[i].offset = 0;
5ac48378 4944 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4945
f9520750 4946 trace_seq_init(&iter->seq);
3c56819b
EGM
4947 }
4948
7e53bd42 4949 trace_access_unlock(iter->cpu_file);
4f535968 4950 trace_event_read_unlock();
d7350c3f 4951 mutex_unlock(&iter->mutex);
3c56819b
EGM
4952
4953 spd.nr_pages = i;
4954
35f3d14d
JA
4955 ret = splice_to_pipe(pipe, &spd);
4956out:
047fe360 4957 splice_shrink_spd(&spd);
35f3d14d 4958 return ret;
3c56819b 4959
34cd4998 4960out_err:
d7350c3f 4961 mutex_unlock(&iter->mutex);
35f3d14d 4962 goto out;
3c56819b
EGM
4963}
4964
a98a3c3f
SR
4965static ssize_t
4966tracing_entries_read(struct file *filp, char __user *ubuf,
4967 size_t cnt, loff_t *ppos)
4968{
0bc392ee
ON
4969 struct inode *inode = file_inode(filp);
4970 struct trace_array *tr = inode->i_private;
4971 int cpu = tracing_get_cpu(inode);
438ced17
VN
4972 char buf[64];
4973 int r = 0;
4974 ssize_t ret;
a98a3c3f 4975
db526ca3 4976 mutex_lock(&trace_types_lock);
438ced17 4977
0bc392ee 4978 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4979 int cpu, buf_size_same;
4980 unsigned long size;
4981
4982 size = 0;
4983 buf_size_same = 1;
4984 /* check if all cpu sizes are same */
4985 for_each_tracing_cpu(cpu) {
4986 /* fill in the size from first enabled cpu */
4987 if (size == 0)
12883efb
SRRH
4988 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4989 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4990 buf_size_same = 0;
4991 break;
4992 }
4993 }
4994
4995 if (buf_size_same) {
4996 if (!ring_buffer_expanded)
4997 r = sprintf(buf, "%lu (expanded: %lu)\n",
4998 size >> 10,
4999 trace_buf_size >> 10);
5000 else
5001 r = sprintf(buf, "%lu\n", size >> 10);
5002 } else
5003 r = sprintf(buf, "X\n");
5004 } else
0bc392ee 5005 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5006
db526ca3
SR
5007 mutex_unlock(&trace_types_lock);
5008
438ced17
VN
5009 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5010 return ret;
a98a3c3f
SR
5011}
5012
5013static ssize_t
5014tracing_entries_write(struct file *filp, const char __user *ubuf,
5015 size_t cnt, loff_t *ppos)
5016{
0bc392ee
ON
5017 struct inode *inode = file_inode(filp);
5018 struct trace_array *tr = inode->i_private;
a98a3c3f 5019 unsigned long val;
4f271a2a 5020 int ret;
a98a3c3f 5021
22fe9b54
PH
5022 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5023 if (ret)
c6caeeb1 5024 return ret;
a98a3c3f
SR
5025
5026 /* must have at least 1 entry */
5027 if (!val)
5028 return -EINVAL;
5029
1696b2b0
SR
5030 /* value is in KB */
5031 val <<= 10;
0bc392ee 5032 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5033 if (ret < 0)
5034 return ret;
a98a3c3f 5035
cf8517cf 5036 *ppos += cnt;
a98a3c3f 5037
4f271a2a
VN
5038 return cnt;
5039}
bf5e6519 5040
f81ab074
VN
5041static ssize_t
5042tracing_total_entries_read(struct file *filp, char __user *ubuf,
5043 size_t cnt, loff_t *ppos)
5044{
5045 struct trace_array *tr = filp->private_data;
5046 char buf[64];
5047 int r, cpu;
5048 unsigned long size = 0, expanded_size = 0;
5049
5050 mutex_lock(&trace_types_lock);
5051 for_each_tracing_cpu(cpu) {
12883efb 5052 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5053 if (!ring_buffer_expanded)
5054 expanded_size += trace_buf_size >> 10;
5055 }
5056 if (ring_buffer_expanded)
5057 r = sprintf(buf, "%lu\n", size);
5058 else
5059 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5060 mutex_unlock(&trace_types_lock);
5061
5062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5063}
5064
4f271a2a
VN
5065static ssize_t
5066tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5067 size_t cnt, loff_t *ppos)
5068{
5069 /*
5070 * There is no need to read what the user has written, this function
5071 * is just to make sure that there is no error when "echo" is used
5072 */
5073
5074 *ppos += cnt;
a98a3c3f
SR
5075
5076 return cnt;
5077}
5078
4f271a2a
VN
5079static int
5080tracing_free_buffer_release(struct inode *inode, struct file *filp)
5081{
2b6080f2
SR
5082 struct trace_array *tr = inode->i_private;
5083
cf30cf67 5084 /* disable tracing ? */
983f938a 5085 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5086 tracer_tracing_off(tr);
4f271a2a 5087 /* resize the ring buffer to 0 */
2b6080f2 5088 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5089
7b85af63
SRRH
5090 trace_array_put(tr);
5091
4f271a2a
VN
5092 return 0;
5093}
5094
5bf9a1ee
PP
5095static ssize_t
5096tracing_mark_write(struct file *filp, const char __user *ubuf,
5097 size_t cnt, loff_t *fpos)
5098{
d696b58c 5099 unsigned long addr = (unsigned long)ubuf;
2d71619c 5100 struct trace_array *tr = filp->private_data;
d696b58c
SR
5101 struct ring_buffer_event *event;
5102 struct ring_buffer *buffer;
5103 struct print_entry *entry;
5104 unsigned long irq_flags;
5105 struct page *pages[2];
6edb2a8a 5106 void *map_page[2];
d696b58c
SR
5107 int nr_pages = 1;
5108 ssize_t written;
d696b58c
SR
5109 int offset;
5110 int size;
5111 int len;
5112 int ret;
6edb2a8a 5113 int i;
5bf9a1ee 5114
c76f0694 5115 if (tracing_disabled)
5bf9a1ee
PP
5116 return -EINVAL;
5117
983f938a 5118 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5119 return -EINVAL;
5120
5bf9a1ee
PP
5121 if (cnt > TRACE_BUF_SIZE)
5122 cnt = TRACE_BUF_SIZE;
5123
d696b58c
SR
5124 /*
5125 * Userspace is injecting traces into the kernel trace buffer.
5126 * We want to be as non intrusive as possible.
5127 * To do so, we do not want to allocate any special buffers
5128 * or take any locks, but instead write the userspace data
5129 * straight into the ring buffer.
5130 *
5131 * First we need to pin the userspace buffer into memory,
5132 * which, most likely it is, because it just referenced it.
5133 * But there's no guarantee that it is. By using get_user_pages_fast()
5134 * and kmap_atomic/kunmap_atomic() we can get access to the
5135 * pages directly. We then write the data directly into the
5136 * ring buffer.
5137 */
5138 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5139
d696b58c
SR
5140 /* check if we cross pages */
5141 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5142 nr_pages = 2;
5143
5144 offset = addr & (PAGE_SIZE - 1);
5145 addr &= PAGE_MASK;
5146
5147 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5148 if (ret < nr_pages) {
5149 while (--ret >= 0)
5150 put_page(pages[ret]);
5151 written = -EFAULT;
5152 goto out;
5bf9a1ee 5153 }
d696b58c 5154
6edb2a8a
SR
5155 for (i = 0; i < nr_pages; i++)
5156 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5157
5158 local_save_flags(irq_flags);
5159 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5160 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5161 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5162 irq_flags, preempt_count());
5163 if (!event) {
5164 /* Ring buffer disabled, return as if not open for write */
5165 written = -EBADF;
5166 goto out_unlock;
5bf9a1ee 5167 }
d696b58c
SR
5168
5169 entry = ring_buffer_event_data(event);
5170 entry->ip = _THIS_IP_;
5171
5172 if (nr_pages == 2) {
5173 len = PAGE_SIZE - offset;
6edb2a8a
SR
5174 memcpy(&entry->buf, map_page[0] + offset, len);
5175 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5176 } else
6edb2a8a 5177 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5178
d696b58c
SR
5179 if (entry->buf[cnt - 1] != '\n') {
5180 entry->buf[cnt] = '\n';
5181 entry->buf[cnt + 1] = '\0';
5182 } else
5183 entry->buf[cnt] = '\0';
5184
7ffbd48d 5185 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5186
d696b58c 5187 written = cnt;
5bf9a1ee 5188
d696b58c 5189 *fpos += written;
1aa54bca 5190
d696b58c 5191 out_unlock:
7215853e 5192 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5193 kunmap_atomic(map_page[i]);
5194 put_page(pages[i]);
5195 }
d696b58c 5196 out:
1aa54bca 5197 return written;
5bf9a1ee
PP
5198}
5199
13f16d20 5200static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5201{
2b6080f2 5202 struct trace_array *tr = m->private;
5079f326
Z
5203 int i;
5204
5205 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5206 seq_printf(m,
5079f326 5207 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5208 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5209 i == tr->clock_id ? "]" : "");
13f16d20 5210 seq_putc(m, '\n');
5079f326 5211
13f16d20 5212 return 0;
5079f326
Z
5213}
5214
e1e232ca 5215static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5216{
5079f326
Z
5217 int i;
5218
5079f326
Z
5219 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5220 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5221 break;
5222 }
5223 if (i == ARRAY_SIZE(trace_clocks))
5224 return -EINVAL;
5225
5079f326
Z
5226 mutex_lock(&trace_types_lock);
5227
2b6080f2
SR
5228 tr->clock_id = i;
5229
12883efb 5230 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5231
60303ed3
DS
5232 /*
5233 * New clock may not be consistent with the previous clock.
5234 * Reset the buffer so that it doesn't have incomparable timestamps.
5235 */
9457158b 5236 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5237
5238#ifdef CONFIG_TRACER_MAX_TRACE
5239 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5240 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5241 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5242#endif
60303ed3 5243
5079f326
Z
5244 mutex_unlock(&trace_types_lock);
5245
e1e232ca
SR
5246 return 0;
5247}
5248
5249static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5250 size_t cnt, loff_t *fpos)
5251{
5252 struct seq_file *m = filp->private_data;
5253 struct trace_array *tr = m->private;
5254 char buf[64];
5255 const char *clockstr;
5256 int ret;
5257
5258 if (cnt >= sizeof(buf))
5259 return -EINVAL;
5260
5261 if (copy_from_user(&buf, ubuf, cnt))
5262 return -EFAULT;
5263
5264 buf[cnt] = 0;
5265
5266 clockstr = strstrip(buf);
5267
5268 ret = tracing_set_clock(tr, clockstr);
5269 if (ret)
5270 return ret;
5271
5079f326
Z
5272 *fpos += cnt;
5273
5274 return cnt;
5275}
5276
13f16d20
LZ
5277static int tracing_clock_open(struct inode *inode, struct file *file)
5278{
7b85af63
SRRH
5279 struct trace_array *tr = inode->i_private;
5280 int ret;
5281
13f16d20
LZ
5282 if (tracing_disabled)
5283 return -ENODEV;
2b6080f2 5284
7b85af63
SRRH
5285 if (trace_array_get(tr))
5286 return -ENODEV;
5287
5288 ret = single_open(file, tracing_clock_show, inode->i_private);
5289 if (ret < 0)
5290 trace_array_put(tr);
5291
5292 return ret;
13f16d20
LZ
5293}
5294
6de58e62
SRRH
5295struct ftrace_buffer_info {
5296 struct trace_iterator iter;
5297 void *spare;
5298 unsigned int read;
5299};
5300
debdd57f
HT
5301#ifdef CONFIG_TRACER_SNAPSHOT
5302static int tracing_snapshot_open(struct inode *inode, struct file *file)
5303{
6484c71c 5304 struct trace_array *tr = inode->i_private;
debdd57f 5305 struct trace_iterator *iter;
2b6080f2 5306 struct seq_file *m;
debdd57f
HT
5307 int ret = 0;
5308
ff451961
SRRH
5309 if (trace_array_get(tr) < 0)
5310 return -ENODEV;
5311
debdd57f 5312 if (file->f_mode & FMODE_READ) {
6484c71c 5313 iter = __tracing_open(inode, file, true);
debdd57f
HT
5314 if (IS_ERR(iter))
5315 ret = PTR_ERR(iter);
2b6080f2
SR
5316 } else {
5317 /* Writes still need the seq_file to hold the private data */
f77d09a3 5318 ret = -ENOMEM;
2b6080f2
SR
5319 m = kzalloc(sizeof(*m), GFP_KERNEL);
5320 if (!m)
f77d09a3 5321 goto out;
2b6080f2
SR
5322 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5323 if (!iter) {
5324 kfree(m);
f77d09a3 5325 goto out;
2b6080f2 5326 }
f77d09a3
AL
5327 ret = 0;
5328
ff451961 5329 iter->tr = tr;
6484c71c
ON
5330 iter->trace_buffer = &tr->max_buffer;
5331 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5332 m->private = iter;
5333 file->private_data = m;
debdd57f 5334 }
f77d09a3 5335out:
ff451961
SRRH
5336 if (ret < 0)
5337 trace_array_put(tr);
5338
debdd57f
HT
5339 return ret;
5340}
5341
5342static ssize_t
5343tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5344 loff_t *ppos)
5345{
2b6080f2
SR
5346 struct seq_file *m = filp->private_data;
5347 struct trace_iterator *iter = m->private;
5348 struct trace_array *tr = iter->tr;
debdd57f
HT
5349 unsigned long val;
5350 int ret;
5351
5352 ret = tracing_update_buffers();
5353 if (ret < 0)
5354 return ret;
5355
5356 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5357 if (ret)
5358 return ret;
5359
5360 mutex_lock(&trace_types_lock);
5361
2b6080f2 5362 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5363 ret = -EBUSY;
5364 goto out;
5365 }
5366
5367 switch (val) {
5368 case 0:
f1affcaa
SRRH
5369 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5370 ret = -EINVAL;
5371 break;
debdd57f 5372 }
3209cff4
SRRH
5373 if (tr->allocated_snapshot)
5374 free_snapshot(tr);
debdd57f
HT
5375 break;
5376 case 1:
f1affcaa
SRRH
5377/* Only allow per-cpu swap if the ring buffer supports it */
5378#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5379 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5380 ret = -EINVAL;
5381 break;
5382 }
5383#endif
45ad21ca 5384 if (!tr->allocated_snapshot) {
3209cff4 5385 ret = alloc_snapshot(tr);
debdd57f
HT
5386 if (ret < 0)
5387 break;
debdd57f 5388 }
debdd57f
HT
5389 local_irq_disable();
5390 /* Now, we're going to swap */
f1affcaa 5391 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5392 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5393 else
ce9bae55 5394 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5395 local_irq_enable();
5396 break;
5397 default:
45ad21ca 5398 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5399 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5400 tracing_reset_online_cpus(&tr->max_buffer);
5401 else
5402 tracing_reset(&tr->max_buffer, iter->cpu_file);
5403 }
debdd57f
HT
5404 break;
5405 }
5406
5407 if (ret >= 0) {
5408 *ppos += cnt;
5409 ret = cnt;
5410 }
5411out:
5412 mutex_unlock(&trace_types_lock);
5413 return ret;
5414}
2b6080f2
SR
5415
5416static int tracing_snapshot_release(struct inode *inode, struct file *file)
5417{
5418 struct seq_file *m = file->private_data;
ff451961
SRRH
5419 int ret;
5420
5421 ret = tracing_release(inode, file);
2b6080f2
SR
5422
5423 if (file->f_mode & FMODE_READ)
ff451961 5424 return ret;
2b6080f2
SR
5425
5426 /* If write only, the seq_file is just a stub */
5427 if (m)
5428 kfree(m->private);
5429 kfree(m);
5430
5431 return 0;
5432}
5433
6de58e62
SRRH
5434static int tracing_buffers_open(struct inode *inode, struct file *filp);
5435static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5436 size_t count, loff_t *ppos);
5437static int tracing_buffers_release(struct inode *inode, struct file *file);
5438static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5439 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5440
5441static int snapshot_raw_open(struct inode *inode, struct file *filp)
5442{
5443 struct ftrace_buffer_info *info;
5444 int ret;
5445
5446 ret = tracing_buffers_open(inode, filp);
5447 if (ret < 0)
5448 return ret;
5449
5450 info = filp->private_data;
5451
5452 if (info->iter.trace->use_max_tr) {
5453 tracing_buffers_release(inode, filp);
5454 return -EBUSY;
5455 }
5456
5457 info->iter.snapshot = true;
5458 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5459
5460 return ret;
5461}
5462
debdd57f
HT
5463#endif /* CONFIG_TRACER_SNAPSHOT */
5464
5465
6508fa76
SF
5466static const struct file_operations tracing_thresh_fops = {
5467 .open = tracing_open_generic,
5468 .read = tracing_thresh_read,
5469 .write = tracing_thresh_write,
5470 .llseek = generic_file_llseek,
5471};
5472
e428abbb 5473#ifdef CONFIG_TRACER_MAX_TRACE
5e2336a0 5474static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5475 .open = tracing_open_generic,
5476 .read = tracing_max_lat_read,
5477 .write = tracing_max_lat_write,
b444786f 5478 .llseek = generic_file_llseek,
bc0c38d1 5479};
e428abbb 5480#endif
bc0c38d1 5481
5e2336a0 5482static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5483 .open = tracing_open_generic,
5484 .read = tracing_set_trace_read,
5485 .write = tracing_set_trace_write,
b444786f 5486 .llseek = generic_file_llseek,
bc0c38d1
SR
5487};
5488
5e2336a0 5489static const struct file_operations tracing_pipe_fops = {
4bf39a94 5490 .open = tracing_open_pipe,
2a2cc8f7 5491 .poll = tracing_poll_pipe,
4bf39a94 5492 .read = tracing_read_pipe,
3c56819b 5493 .splice_read = tracing_splice_read_pipe,
4bf39a94 5494 .release = tracing_release_pipe,
b444786f 5495 .llseek = no_llseek,
b3806b43
SR
5496};
5497
5e2336a0 5498static const struct file_operations tracing_entries_fops = {
0bc392ee 5499 .open = tracing_open_generic_tr,
a98a3c3f
SR
5500 .read = tracing_entries_read,
5501 .write = tracing_entries_write,
b444786f 5502 .llseek = generic_file_llseek,
0bc392ee 5503 .release = tracing_release_generic_tr,
a98a3c3f
SR
5504};
5505
f81ab074 5506static const struct file_operations tracing_total_entries_fops = {
7b85af63 5507 .open = tracing_open_generic_tr,
f81ab074
VN
5508 .read = tracing_total_entries_read,
5509 .llseek = generic_file_llseek,
7b85af63 5510 .release = tracing_release_generic_tr,
f81ab074
VN
5511};
5512
4f271a2a 5513static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5514 .open = tracing_open_generic_tr,
4f271a2a
VN
5515 .write = tracing_free_buffer_write,
5516 .release = tracing_free_buffer_release,
5517};
5518
5e2336a0 5519static const struct file_operations tracing_mark_fops = {
7b85af63 5520 .open = tracing_open_generic_tr,
5bf9a1ee 5521 .write = tracing_mark_write,
b444786f 5522 .llseek = generic_file_llseek,
7b85af63 5523 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5524};
5525
5079f326 5526static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5527 .open = tracing_clock_open,
5528 .read = seq_read,
5529 .llseek = seq_lseek,
7b85af63 5530 .release = tracing_single_release_tr,
5079f326
Z
5531 .write = tracing_clock_write,
5532};
5533
debdd57f
HT
5534#ifdef CONFIG_TRACER_SNAPSHOT
5535static const struct file_operations snapshot_fops = {
5536 .open = tracing_snapshot_open,
5537 .read = seq_read,
5538 .write = tracing_snapshot_write,
098c879e 5539 .llseek = tracing_lseek,
2b6080f2 5540 .release = tracing_snapshot_release,
debdd57f 5541};
debdd57f 5542
6de58e62
SRRH
5543static const struct file_operations snapshot_raw_fops = {
5544 .open = snapshot_raw_open,
5545 .read = tracing_buffers_read,
5546 .release = tracing_buffers_release,
5547 .splice_read = tracing_buffers_splice_read,
5548 .llseek = no_llseek,
2cadf913
SR
5549};
5550
6de58e62
SRRH
5551#endif /* CONFIG_TRACER_SNAPSHOT */
5552
2cadf913
SR
5553static int tracing_buffers_open(struct inode *inode, struct file *filp)
5554{
46ef2be0 5555 struct trace_array *tr = inode->i_private;
2cadf913 5556 struct ftrace_buffer_info *info;
7b85af63 5557 int ret;
2cadf913
SR
5558
5559 if (tracing_disabled)
5560 return -ENODEV;
5561
7b85af63
SRRH
5562 if (trace_array_get(tr) < 0)
5563 return -ENODEV;
5564
2cadf913 5565 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5566 if (!info) {
5567 trace_array_put(tr);
2cadf913 5568 return -ENOMEM;
7b85af63 5569 }
2cadf913 5570
a695cb58
SRRH
5571 mutex_lock(&trace_types_lock);
5572
cc60cdc9 5573 info->iter.tr = tr;
46ef2be0 5574 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5575 info->iter.trace = tr->current_trace;
12883efb 5576 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5577 info->spare = NULL;
2cadf913 5578 /* Force reading ring buffer for first read */
cc60cdc9 5579 info->read = (unsigned int)-1;
2cadf913
SR
5580
5581 filp->private_data = info;
5582
cf6ab6d9
SRRH
5583 tr->current_trace->ref++;
5584
a695cb58
SRRH
5585 mutex_unlock(&trace_types_lock);
5586
7b85af63
SRRH
5587 ret = nonseekable_open(inode, filp);
5588 if (ret < 0)
5589 trace_array_put(tr);
5590
5591 return ret;
2cadf913
SR
5592}
5593
cc60cdc9
SR
5594static unsigned int
5595tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5596{
5597 struct ftrace_buffer_info *info = filp->private_data;
5598 struct trace_iterator *iter = &info->iter;
5599
5600 return trace_poll(iter, filp, poll_table);
5601}
5602
2cadf913
SR
5603static ssize_t
5604tracing_buffers_read(struct file *filp, char __user *ubuf,
5605 size_t count, loff_t *ppos)
5606{
5607 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5608 struct trace_iterator *iter = &info->iter;
2cadf913 5609 ssize_t ret;
6de58e62 5610 ssize_t size;
2cadf913 5611
2dc5d12b
SR
5612 if (!count)
5613 return 0;
5614
6de58e62 5615#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5616 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5617 return -EBUSY;
6de58e62
SRRH
5618#endif
5619
ddd538f3 5620 if (!info->spare)
12883efb
SRRH
5621 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5622 iter->cpu_file);
ddd538f3 5623 if (!info->spare)
d716ff71 5624 return -ENOMEM;
ddd538f3 5625
2cadf913
SR
5626 /* Do we have previous read data to read? */
5627 if (info->read < PAGE_SIZE)
5628 goto read;
5629
b627344f 5630 again:
cc60cdc9 5631 trace_access_lock(iter->cpu_file);
12883efb 5632 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5633 &info->spare,
5634 count,
cc60cdc9
SR
5635 iter->cpu_file, 0);
5636 trace_access_unlock(iter->cpu_file);
2cadf913 5637
b627344f
SR
5638 if (ret < 0) {
5639 if (trace_empty(iter)) {
d716ff71
SRRH
5640 if ((filp->f_flags & O_NONBLOCK))
5641 return -EAGAIN;
5642
e30f53aa 5643 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5644 if (ret)
5645 return ret;
5646
b627344f
SR
5647 goto again;
5648 }
d716ff71 5649 return 0;
b627344f 5650 }
436fc280 5651
436fc280 5652 info->read = 0;
b627344f 5653 read:
2cadf913
SR
5654 size = PAGE_SIZE - info->read;
5655 if (size > count)
5656 size = count;
5657
5658 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5659 if (ret == size)
5660 return -EFAULT;
5661
2dc5d12b
SR
5662 size -= ret;
5663
2cadf913
SR
5664 *ppos += size;
5665 info->read += size;
5666
5667 return size;
5668}
5669
5670static int tracing_buffers_release(struct inode *inode, struct file *file)
5671{
5672 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5673 struct trace_iterator *iter = &info->iter;
2cadf913 5674
a695cb58
SRRH
5675 mutex_lock(&trace_types_lock);
5676
cf6ab6d9
SRRH
5677 iter->tr->current_trace->ref--;
5678
ff451961 5679 __trace_array_put(iter->tr);
2cadf913 5680
ddd538f3 5681 if (info->spare)
12883efb 5682 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5683 kfree(info);
5684
a695cb58
SRRH
5685 mutex_unlock(&trace_types_lock);
5686
2cadf913
SR
5687 return 0;
5688}
5689
5690struct buffer_ref {
5691 struct ring_buffer *buffer;
5692 void *page;
5693 int ref;
5694};
5695
5696static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5697 struct pipe_buffer *buf)
5698{
5699 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5700
5701 if (--ref->ref)
5702 return;
5703
5704 ring_buffer_free_read_page(ref->buffer, ref->page);
5705 kfree(ref);
5706 buf->private = 0;
5707}
5708
2cadf913
SR
5709static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5710 struct pipe_buffer *buf)
5711{
5712 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5713
5714 ref->ref++;
5715}
5716
5717/* Pipe buffer operations for a buffer. */
28dfef8f 5718static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5719 .can_merge = 0,
2cadf913
SR
5720 .confirm = generic_pipe_buf_confirm,
5721 .release = buffer_pipe_buf_release,
d55cb6cf 5722 .steal = generic_pipe_buf_steal,
2cadf913
SR
5723 .get = buffer_pipe_buf_get,
5724};
5725
5726/*
5727 * Callback from splice_to_pipe(), if we need to release some pages
5728 * at the end of the spd in case we error'ed out in filling the pipe.
5729 */
5730static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5731{
5732 struct buffer_ref *ref =
5733 (struct buffer_ref *)spd->partial[i].private;
5734
5735 if (--ref->ref)
5736 return;
5737
5738 ring_buffer_free_read_page(ref->buffer, ref->page);
5739 kfree(ref);
5740 spd->partial[i].private = 0;
5741}
5742
5743static ssize_t
5744tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5745 struct pipe_inode_info *pipe, size_t len,
5746 unsigned int flags)
5747{
5748 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5749 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5750 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5751 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5752 struct splice_pipe_desc spd = {
35f3d14d
JA
5753 .pages = pages_def,
5754 .partial = partial_def,
047fe360 5755 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5756 .flags = flags,
5757 .ops = &buffer_pipe_buf_ops,
5758 .spd_release = buffer_spd_release,
5759 };
5760 struct buffer_ref *ref;
93459c6c 5761 int entries, size, i;
07906da7 5762 ssize_t ret = 0;
2cadf913 5763
6de58e62 5764#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5765 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5766 return -EBUSY;
6de58e62
SRRH
5767#endif
5768
d716ff71
SRRH
5769 if (splice_grow_spd(pipe, &spd))
5770 return -ENOMEM;
35f3d14d 5771
d716ff71
SRRH
5772 if (*ppos & (PAGE_SIZE - 1))
5773 return -EINVAL;
93cfb3c9
LJ
5774
5775 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5776 if (len < PAGE_SIZE)
5777 return -EINVAL;
93cfb3c9
LJ
5778 len &= PAGE_MASK;
5779 }
5780
cc60cdc9
SR
5781 again:
5782 trace_access_lock(iter->cpu_file);
12883efb 5783 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5784
a786c06d 5785 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5786 struct page *page;
5787 int r;
5788
5789 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5790 if (!ref) {
5791 ret = -ENOMEM;
2cadf913 5792 break;
07906da7 5793 }
2cadf913 5794
7267fa68 5795 ref->ref = 1;
12883efb 5796 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5797 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5798 if (!ref->page) {
07906da7 5799 ret = -ENOMEM;
2cadf913
SR
5800 kfree(ref);
5801 break;
5802 }
5803
5804 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5805 len, iter->cpu_file, 1);
2cadf913 5806 if (r < 0) {
7ea59064 5807 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5808 kfree(ref);
5809 break;
5810 }
5811
5812 /*
5813 * zero out any left over data, this is going to
5814 * user land.
5815 */
5816 size = ring_buffer_page_len(ref->page);
5817 if (size < PAGE_SIZE)
5818 memset(ref->page + size, 0, PAGE_SIZE - size);
5819
5820 page = virt_to_page(ref->page);
5821
5822 spd.pages[i] = page;
5823 spd.partial[i].len = PAGE_SIZE;
5824 spd.partial[i].offset = 0;
5825 spd.partial[i].private = (unsigned long)ref;
5826 spd.nr_pages++;
93cfb3c9 5827 *ppos += PAGE_SIZE;
93459c6c 5828
12883efb 5829 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5830 }
5831
cc60cdc9 5832 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5833 spd.nr_pages = i;
5834
5835 /* did we read anything? */
5836 if (!spd.nr_pages) {
07906da7 5837 if (ret)
d716ff71
SRRH
5838 return ret;
5839
5840 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5841 return -EAGAIN;
07906da7 5842
e30f53aa 5843 ret = wait_on_pipe(iter, true);
8b8b3683 5844 if (ret)
d716ff71 5845 return ret;
e30f53aa 5846
cc60cdc9 5847 goto again;
2cadf913
SR
5848 }
5849
5850 ret = splice_to_pipe(pipe, &spd);
047fe360 5851 splice_shrink_spd(&spd);
6de58e62 5852
2cadf913
SR
5853 return ret;
5854}
5855
5856static const struct file_operations tracing_buffers_fops = {
5857 .open = tracing_buffers_open,
5858 .read = tracing_buffers_read,
cc60cdc9 5859 .poll = tracing_buffers_poll,
2cadf913
SR
5860 .release = tracing_buffers_release,
5861 .splice_read = tracing_buffers_splice_read,
5862 .llseek = no_llseek,
5863};
5864
c8d77183
SR
5865static ssize_t
5866tracing_stats_read(struct file *filp, char __user *ubuf,
5867 size_t count, loff_t *ppos)
5868{
4d3435b8
ON
5869 struct inode *inode = file_inode(filp);
5870 struct trace_array *tr = inode->i_private;
12883efb 5871 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5872 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5873 struct trace_seq *s;
5874 unsigned long cnt;
c64e148a
VN
5875 unsigned long long t;
5876 unsigned long usec_rem;
c8d77183 5877
e4f2d10f 5878 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5879 if (!s)
a646365c 5880 return -ENOMEM;
c8d77183
SR
5881
5882 trace_seq_init(s);
5883
12883efb 5884 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5885 trace_seq_printf(s, "entries: %ld\n", cnt);
5886
12883efb 5887 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5888 trace_seq_printf(s, "overrun: %ld\n", cnt);
5889
12883efb 5890 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5891 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5892
12883efb 5893 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5894 trace_seq_printf(s, "bytes: %ld\n", cnt);
5895
58e8eedf 5896 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5897 /* local or global for trace_clock */
12883efb 5898 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5899 usec_rem = do_div(t, USEC_PER_SEC);
5900 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5901 t, usec_rem);
5902
12883efb 5903 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5904 usec_rem = do_div(t, USEC_PER_SEC);
5905 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5906 } else {
5907 /* counter or tsc mode for trace_clock */
5908 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5909 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5910
11043d8b 5911 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5912 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5913 }
c64e148a 5914
12883efb 5915 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5916 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5917
12883efb 5918 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5919 trace_seq_printf(s, "read events: %ld\n", cnt);
5920
5ac48378
SRRH
5921 count = simple_read_from_buffer(ubuf, count, ppos,
5922 s->buffer, trace_seq_used(s));
c8d77183
SR
5923
5924 kfree(s);
5925
5926 return count;
5927}
5928
5929static const struct file_operations tracing_stats_fops = {
4d3435b8 5930 .open = tracing_open_generic_tr,
c8d77183 5931 .read = tracing_stats_read,
b444786f 5932 .llseek = generic_file_llseek,
4d3435b8 5933 .release = tracing_release_generic_tr,
c8d77183
SR
5934};
5935
bc0c38d1
SR
5936#ifdef CONFIG_DYNAMIC_FTRACE
5937
b807c3d0
SR
5938int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5939{
5940 return 0;
5941}
5942
bc0c38d1 5943static ssize_t
b807c3d0 5944tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5945 size_t cnt, loff_t *ppos)
5946{
a26a2a27
SR
5947 static char ftrace_dyn_info_buffer[1024];
5948 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5949 unsigned long *p = filp->private_data;
b807c3d0 5950 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5951 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5952 int r;
5953
b807c3d0
SR
5954 mutex_lock(&dyn_info_mutex);
5955 r = sprintf(buf, "%ld ", *p);
4bf39a94 5956
a26a2a27 5957 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5958 buf[r++] = '\n';
5959
5960 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5961
5962 mutex_unlock(&dyn_info_mutex);
5963
5964 return r;
bc0c38d1
SR
5965}
5966
5e2336a0 5967static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5968 .open = tracing_open_generic,
b807c3d0 5969 .read = tracing_read_dyn_info,
b444786f 5970 .llseek = generic_file_llseek,
bc0c38d1 5971};
77fd5c15 5972#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5973
77fd5c15
SRRH
5974#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5975static void
5976ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5977{
5978 tracing_snapshot();
5979}
bc0c38d1 5980
77fd5c15
SRRH
5981static void
5982ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5983{
77fd5c15
SRRH
5984 unsigned long *count = (long *)data;
5985
5986 if (!*count)
5987 return;
bc0c38d1 5988
77fd5c15
SRRH
5989 if (*count != -1)
5990 (*count)--;
5991
5992 tracing_snapshot();
5993}
5994
5995static int
5996ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5997 struct ftrace_probe_ops *ops, void *data)
5998{
5999 long count = (long)data;
6000
6001 seq_printf(m, "%ps:", (void *)ip);
6002
fa6f0cc7 6003 seq_puts(m, "snapshot");
77fd5c15
SRRH
6004
6005 if (count == -1)
fa6f0cc7 6006 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
6007 else
6008 seq_printf(m, ":count=%ld\n", count);
6009
6010 return 0;
6011}
6012
6013static struct ftrace_probe_ops snapshot_probe_ops = {
6014 .func = ftrace_snapshot,
6015 .print = ftrace_snapshot_print,
6016};
6017
6018static struct ftrace_probe_ops snapshot_count_probe_ops = {
6019 .func = ftrace_count_snapshot,
6020 .print = ftrace_snapshot_print,
6021};
6022
6023static int
6024ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6025 char *glob, char *cmd, char *param, int enable)
6026{
6027 struct ftrace_probe_ops *ops;
6028 void *count = (void *)-1;
6029 char *number;
6030 int ret;
6031
6032 /* hash funcs only work with set_ftrace_filter */
6033 if (!enable)
6034 return -EINVAL;
6035
6036 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6037
6038 if (glob[0] == '!') {
6039 unregister_ftrace_function_probe_func(glob+1, ops);
6040 return 0;
6041 }
6042
6043 if (!param)
6044 goto out_reg;
6045
6046 number = strsep(&param, ":");
6047
6048 if (!strlen(number))
6049 goto out_reg;
6050
6051 /*
6052 * We use the callback data field (which is a pointer)
6053 * as our counter.
6054 */
6055 ret = kstrtoul(number, 0, (unsigned long *)&count);
6056 if (ret)
6057 return ret;
6058
6059 out_reg:
6060 ret = register_ftrace_function_probe(glob, ops, count);
6061
6062 if (ret >= 0)
6063 alloc_snapshot(&global_trace);
6064
6065 return ret < 0 ? ret : 0;
6066}
6067
6068static struct ftrace_func_command ftrace_snapshot_cmd = {
6069 .name = "snapshot",
6070 .func = ftrace_trace_snapshot_callback,
6071};
6072
38de93ab 6073static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6074{
6075 return register_ftrace_command(&ftrace_snapshot_cmd);
6076}
6077#else
38de93ab 6078static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6079#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6080
7eeafbca 6081static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6082{
8434dc93
SRRH
6083 if (WARN_ON(!tr->dir))
6084 return ERR_PTR(-ENODEV);
6085
6086 /* Top directory uses NULL as the parent */
6087 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6088 return NULL;
6089
6090 /* All sub buffers have a descriptor */
2b6080f2 6091 return tr->dir;
bc0c38d1
SR
6092}
6093
2b6080f2 6094static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6095{
b04cc6b1
FW
6096 struct dentry *d_tracer;
6097
2b6080f2
SR
6098 if (tr->percpu_dir)
6099 return tr->percpu_dir;
b04cc6b1 6100
7eeafbca 6101 d_tracer = tracing_get_dentry(tr);
14a5ae40 6102 if (IS_ERR(d_tracer))
b04cc6b1
FW
6103 return NULL;
6104
8434dc93 6105 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6106
2b6080f2 6107 WARN_ONCE(!tr->percpu_dir,
8434dc93 6108 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6109
2b6080f2 6110 return tr->percpu_dir;
b04cc6b1
FW
6111}
6112
649e9c70
ON
6113static struct dentry *
6114trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6115 void *data, long cpu, const struct file_operations *fops)
6116{
6117 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6118
6119 if (ret) /* See tracing_get_cpu() */
7682c918 6120 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6121 return ret;
6122}
6123
2b6080f2 6124static void
8434dc93 6125tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6126{
2b6080f2 6127 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6128 struct dentry *d_cpu;
dd49a38c 6129 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6130
0a3d7ce7
NK
6131 if (!d_percpu)
6132 return;
6133
dd49a38c 6134 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6135 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6136 if (!d_cpu) {
8434dc93 6137 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6138 return;
6139 }
b04cc6b1 6140
8656e7a2 6141 /* per cpu trace_pipe */
649e9c70 6142 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6143 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6144
6145 /* per cpu trace */
649e9c70 6146 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6147 tr, cpu, &tracing_fops);
7f96f93f 6148
649e9c70 6149 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6150 tr, cpu, &tracing_buffers_fops);
7f96f93f 6151
649e9c70 6152 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6153 tr, cpu, &tracing_stats_fops);
438ced17 6154
649e9c70 6155 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6156 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6157
6158#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6159 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6160 tr, cpu, &snapshot_fops);
6de58e62 6161
649e9c70 6162 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6163 tr, cpu, &snapshot_raw_fops);
f1affcaa 6164#endif
b04cc6b1
FW
6165}
6166
60a11774
SR
6167#ifdef CONFIG_FTRACE_SELFTEST
6168/* Let selftest have access to static functions in this file */
6169#include "trace_selftest.c"
6170#endif
6171
577b785f
SR
6172static ssize_t
6173trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6174 loff_t *ppos)
6175{
6176 struct trace_option_dentry *topt = filp->private_data;
6177 char *buf;
6178
6179 if (topt->flags->val & topt->opt->bit)
6180 buf = "1\n";
6181 else
6182 buf = "0\n";
6183
6184 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6185}
6186
6187static ssize_t
6188trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6189 loff_t *ppos)
6190{
6191 struct trace_option_dentry *topt = filp->private_data;
6192 unsigned long val;
577b785f
SR
6193 int ret;
6194
22fe9b54
PH
6195 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6196 if (ret)
577b785f
SR
6197 return ret;
6198
8d18eaaf
LZ
6199 if (val != 0 && val != 1)
6200 return -EINVAL;
577b785f 6201
8d18eaaf 6202 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6203 mutex_lock(&trace_types_lock);
8c1a49ae 6204 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6205 topt->opt, !val);
577b785f
SR
6206 mutex_unlock(&trace_types_lock);
6207 if (ret)
6208 return ret;
577b785f
SR
6209 }
6210
6211 *ppos += cnt;
6212
6213 return cnt;
6214}
6215
6216
6217static const struct file_operations trace_options_fops = {
6218 .open = tracing_open_generic,
6219 .read = trace_options_read,
6220 .write = trace_options_write,
b444786f 6221 .llseek = generic_file_llseek,
577b785f
SR
6222};
6223
9a38a885
SRRH
6224/*
6225 * In order to pass in both the trace_array descriptor as well as the index
6226 * to the flag that the trace option file represents, the trace_array
6227 * has a character array of trace_flags_index[], which holds the index
6228 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6229 * The address of this character array is passed to the flag option file
6230 * read/write callbacks.
6231 *
6232 * In order to extract both the index and the trace_array descriptor,
6233 * get_tr_index() uses the following algorithm.
6234 *
6235 * idx = *ptr;
6236 *
6237 * As the pointer itself contains the address of the index (remember
6238 * index[1] == 1).
6239 *
6240 * Then to get the trace_array descriptor, by subtracting that index
6241 * from the ptr, we get to the start of the index itself.
6242 *
6243 * ptr - idx == &index[0]
6244 *
6245 * Then a simple container_of() from that pointer gets us to the
6246 * trace_array descriptor.
6247 */
6248static void get_tr_index(void *data, struct trace_array **ptr,
6249 unsigned int *pindex)
6250{
6251 *pindex = *(unsigned char *)data;
6252
6253 *ptr = container_of(data - *pindex, struct trace_array,
6254 trace_flags_index);
6255}
6256
a8259075
SR
6257static ssize_t
6258trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6259 loff_t *ppos)
6260{
9a38a885
SRRH
6261 void *tr_index = filp->private_data;
6262 struct trace_array *tr;
6263 unsigned int index;
a8259075
SR
6264 char *buf;
6265
9a38a885
SRRH
6266 get_tr_index(tr_index, &tr, &index);
6267
6268 if (tr->trace_flags & (1 << index))
a8259075
SR
6269 buf = "1\n";
6270 else
6271 buf = "0\n";
6272
6273 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6274}
6275
6276static ssize_t
6277trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6278 loff_t *ppos)
6279{
9a38a885
SRRH
6280 void *tr_index = filp->private_data;
6281 struct trace_array *tr;
6282 unsigned int index;
a8259075
SR
6283 unsigned long val;
6284 int ret;
6285
9a38a885
SRRH
6286 get_tr_index(tr_index, &tr, &index);
6287
22fe9b54
PH
6288 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6289 if (ret)
a8259075
SR
6290 return ret;
6291
f2d84b65 6292 if (val != 0 && val != 1)
a8259075 6293 return -EINVAL;
69d34da2
SRRH
6294
6295 mutex_lock(&trace_types_lock);
2b6080f2 6296 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6297 mutex_unlock(&trace_types_lock);
a8259075 6298
613f04a0
SRRH
6299 if (ret < 0)
6300 return ret;
6301
a8259075
SR
6302 *ppos += cnt;
6303
6304 return cnt;
6305}
6306
a8259075
SR
6307static const struct file_operations trace_options_core_fops = {
6308 .open = tracing_open_generic,
6309 .read = trace_options_core_read,
6310 .write = trace_options_core_write,
b444786f 6311 .llseek = generic_file_llseek,
a8259075
SR
6312};
6313
5452af66 6314struct dentry *trace_create_file(const char *name,
f4ae40a6 6315 umode_t mode,
5452af66
FW
6316 struct dentry *parent,
6317 void *data,
6318 const struct file_operations *fops)
6319{
6320 struct dentry *ret;
6321
8434dc93 6322 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6323 if (!ret)
8434dc93 6324 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6325
6326 return ret;
6327}
6328
6329
2b6080f2 6330static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6331{
6332 struct dentry *d_tracer;
a8259075 6333
2b6080f2
SR
6334 if (tr->options)
6335 return tr->options;
a8259075 6336
7eeafbca 6337 d_tracer = tracing_get_dentry(tr);
14a5ae40 6338 if (IS_ERR(d_tracer))
a8259075
SR
6339 return NULL;
6340
8434dc93 6341 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6342 if (!tr->options) {
8434dc93 6343 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6344 return NULL;
6345 }
6346
2b6080f2 6347 return tr->options;
a8259075
SR
6348}
6349
577b785f 6350static void
2b6080f2
SR
6351create_trace_option_file(struct trace_array *tr,
6352 struct trace_option_dentry *topt,
577b785f
SR
6353 struct tracer_flags *flags,
6354 struct tracer_opt *opt)
6355{
6356 struct dentry *t_options;
577b785f 6357
2b6080f2 6358 t_options = trace_options_init_dentry(tr);
577b785f
SR
6359 if (!t_options)
6360 return;
6361
6362 topt->flags = flags;
6363 topt->opt = opt;
2b6080f2 6364 topt->tr = tr;
577b785f 6365
5452af66 6366 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6367 &trace_options_fops);
6368
577b785f
SR
6369}
6370
37aea98b 6371static void
2b6080f2 6372create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6373{
6374 struct trace_option_dentry *topts;
37aea98b 6375 struct trace_options *tr_topts;
577b785f
SR
6376 struct tracer_flags *flags;
6377 struct tracer_opt *opts;
6378 int cnt;
37aea98b 6379 int i;
577b785f
SR
6380
6381 if (!tracer)
37aea98b 6382 return;
577b785f
SR
6383
6384 flags = tracer->flags;
6385
6386 if (!flags || !flags->opts)
37aea98b
SRRH
6387 return;
6388
6389 /*
6390 * If this is an instance, only create flags for tracers
6391 * the instance may have.
6392 */
6393 if (!trace_ok_for_array(tracer, tr))
6394 return;
6395
6396 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
6397 /* Make sure there's no duplicate flags. */
6398 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
6399 return;
6400 }
577b785f
SR
6401
6402 opts = flags->opts;
6403
6404 for (cnt = 0; opts[cnt].name; cnt++)
6405 ;
6406
0cfe8245 6407 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 6408 if (!topts)
37aea98b
SRRH
6409 return;
6410
6411 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6412 GFP_KERNEL);
6413 if (!tr_topts) {
6414 kfree(topts);
6415 return;
6416 }
6417
6418 tr->topts = tr_topts;
6419 tr->topts[tr->nr_topts].tracer = tracer;
6420 tr->topts[tr->nr_topts].topts = topts;
6421 tr->nr_topts++;
577b785f 6422
41d9c0be 6423 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6424 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6425 &opts[cnt]);
41d9c0be
SRRH
6426 WARN_ONCE(topts[cnt].entry == NULL,
6427 "Failed to create trace option: %s",
6428 opts[cnt].name);
6429 }
577b785f
SR
6430}
6431
a8259075 6432static struct dentry *
2b6080f2
SR
6433create_trace_option_core_file(struct trace_array *tr,
6434 const char *option, long index)
a8259075
SR
6435{
6436 struct dentry *t_options;
a8259075 6437
2b6080f2 6438 t_options = trace_options_init_dentry(tr);
a8259075
SR
6439 if (!t_options)
6440 return NULL;
6441
9a38a885
SRRH
6442 return trace_create_file(option, 0644, t_options,
6443 (void *)&tr->trace_flags_index[index],
6444 &trace_options_core_fops);
a8259075
SR
6445}
6446
16270145 6447static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6448{
6449 struct dentry *t_options;
16270145 6450 bool top_level = tr == &global_trace;
a8259075
SR
6451 int i;
6452
2b6080f2 6453 t_options = trace_options_init_dentry(tr);
a8259075
SR
6454 if (!t_options)
6455 return;
6456
16270145
SRRH
6457 for (i = 0; trace_options[i]; i++) {
6458 if (top_level ||
6459 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6460 create_trace_option_core_file(tr, trace_options[i], i);
6461 }
a8259075
SR
6462}
6463
499e5470
SR
6464static ssize_t
6465rb_simple_read(struct file *filp, char __user *ubuf,
6466 size_t cnt, loff_t *ppos)
6467{
348f0fc2 6468 struct trace_array *tr = filp->private_data;
499e5470
SR
6469 char buf[64];
6470 int r;
6471
10246fa3 6472 r = tracer_tracing_is_on(tr);
499e5470
SR
6473 r = sprintf(buf, "%d\n", r);
6474
6475 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6476}
6477
6478static ssize_t
6479rb_simple_write(struct file *filp, const char __user *ubuf,
6480 size_t cnt, loff_t *ppos)
6481{
348f0fc2 6482 struct trace_array *tr = filp->private_data;
12883efb 6483 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6484 unsigned long val;
6485 int ret;
6486
6487 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6488 if (ret)
6489 return ret;
6490
6491 if (buffer) {
2df8f8a6
SR
6492 mutex_lock(&trace_types_lock);
6493 if (val) {
10246fa3 6494 tracer_tracing_on(tr);
2b6080f2
SR
6495 if (tr->current_trace->start)
6496 tr->current_trace->start(tr);
2df8f8a6 6497 } else {
10246fa3 6498 tracer_tracing_off(tr);
2b6080f2
SR
6499 if (tr->current_trace->stop)
6500 tr->current_trace->stop(tr);
2df8f8a6
SR
6501 }
6502 mutex_unlock(&trace_types_lock);
499e5470
SR
6503 }
6504
6505 (*ppos)++;
6506
6507 return cnt;
6508}
6509
6510static const struct file_operations rb_simple_fops = {
7b85af63 6511 .open = tracing_open_generic_tr,
499e5470
SR
6512 .read = rb_simple_read,
6513 .write = rb_simple_write,
7b85af63 6514 .release = tracing_release_generic_tr,
499e5470
SR
6515 .llseek = default_llseek,
6516};
6517
277ba044
SR
6518struct dentry *trace_instance_dir;
6519
6520static void
8434dc93 6521init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6522
55034cd6
SRRH
6523static int
6524allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6525{
6526 enum ring_buffer_flags rb_flags;
737223fb 6527
983f938a 6528 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 6529
dced341b
SRRH
6530 buf->tr = tr;
6531
55034cd6
SRRH
6532 buf->buffer = ring_buffer_alloc(size, rb_flags);
6533 if (!buf->buffer)
6534 return -ENOMEM;
737223fb 6535
55034cd6
SRRH
6536 buf->data = alloc_percpu(struct trace_array_cpu);
6537 if (!buf->data) {
6538 ring_buffer_free(buf->buffer);
6539 return -ENOMEM;
6540 }
737223fb 6541
737223fb
SRRH
6542 /* Allocate the first page for all buffers */
6543 set_buffer_entries(&tr->trace_buffer,
6544 ring_buffer_size(tr->trace_buffer.buffer, 0));
6545
55034cd6
SRRH
6546 return 0;
6547}
737223fb 6548
55034cd6
SRRH
6549static int allocate_trace_buffers(struct trace_array *tr, int size)
6550{
6551 int ret;
737223fb 6552
55034cd6
SRRH
6553 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6554 if (ret)
6555 return ret;
737223fb 6556
55034cd6
SRRH
6557#ifdef CONFIG_TRACER_MAX_TRACE
6558 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6559 allocate_snapshot ? size : 1);
6560 if (WARN_ON(ret)) {
737223fb 6561 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6562 free_percpu(tr->trace_buffer.data);
6563 return -ENOMEM;
6564 }
6565 tr->allocated_snapshot = allocate_snapshot;
737223fb 6566
55034cd6
SRRH
6567 /*
6568 * Only the top level trace array gets its snapshot allocated
6569 * from the kernel command line.
6570 */
6571 allocate_snapshot = false;
737223fb 6572#endif
55034cd6 6573 return 0;
737223fb
SRRH
6574}
6575
f0b70cc4
SRRH
6576static void free_trace_buffer(struct trace_buffer *buf)
6577{
6578 if (buf->buffer) {
6579 ring_buffer_free(buf->buffer);
6580 buf->buffer = NULL;
6581 free_percpu(buf->data);
6582 buf->data = NULL;
6583 }
6584}
6585
23aaa3c1
SRRH
6586static void free_trace_buffers(struct trace_array *tr)
6587{
6588 if (!tr)
6589 return;
6590
f0b70cc4 6591 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6592
6593#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6594 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6595#endif
6596}
6597
9a38a885
SRRH
6598static void init_trace_flags_index(struct trace_array *tr)
6599{
6600 int i;
6601
6602 /* Used by the trace options files */
6603 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6604 tr->trace_flags_index[i] = i;
6605}
6606
37aea98b
SRRH
6607static void __update_tracer_options(struct trace_array *tr)
6608{
6609 struct tracer *t;
6610
6611 for (t = trace_types; t; t = t->next)
6612 add_tracer_options(tr, t);
6613}
6614
6615static void update_tracer_options(struct trace_array *tr)
6616{
6617 mutex_lock(&trace_types_lock);
6618 __update_tracer_options(tr);
6619 mutex_unlock(&trace_types_lock);
6620}
6621
eae47358 6622static int instance_mkdir(const char *name)
737223fb 6623{
277ba044
SR
6624 struct trace_array *tr;
6625 int ret;
277ba044
SR
6626
6627 mutex_lock(&trace_types_lock);
6628
6629 ret = -EEXIST;
6630 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6631 if (tr->name && strcmp(tr->name, name) == 0)
6632 goto out_unlock;
6633 }
6634
6635 ret = -ENOMEM;
6636 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6637 if (!tr)
6638 goto out_unlock;
6639
6640 tr->name = kstrdup(name, GFP_KERNEL);
6641 if (!tr->name)
6642 goto out_free_tr;
6643
ccfe9e42
AL
6644 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6645 goto out_free_tr;
6646
983f938a
SRRH
6647 tr->trace_flags = global_trace.trace_flags;
6648
ccfe9e42
AL
6649 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6650
277ba044
SR
6651 raw_spin_lock_init(&tr->start_lock);
6652
0b9b12c1
SRRH
6653 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6654
277ba044
SR
6655 tr->current_trace = &nop_trace;
6656
6657 INIT_LIST_HEAD(&tr->systems);
6658 INIT_LIST_HEAD(&tr->events);
6659
737223fb 6660 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6661 goto out_free_tr;
6662
8434dc93 6663 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6664 if (!tr->dir)
6665 goto out_free_tr;
6666
6667 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6668 if (ret) {
8434dc93 6669 tracefs_remove_recursive(tr->dir);
277ba044 6670 goto out_free_tr;
609e85a7 6671 }
277ba044 6672
8434dc93 6673 init_tracer_tracefs(tr, tr->dir);
9a38a885 6674 init_trace_flags_index(tr);
37aea98b 6675 __update_tracer_options(tr);
277ba044
SR
6676
6677 list_add(&tr->list, &ftrace_trace_arrays);
6678
6679 mutex_unlock(&trace_types_lock);
6680
6681 return 0;
6682
6683 out_free_tr:
23aaa3c1 6684 free_trace_buffers(tr);
ccfe9e42 6685 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6686 kfree(tr->name);
6687 kfree(tr);
6688
6689 out_unlock:
6690 mutex_unlock(&trace_types_lock);
6691
6692 return ret;
6693
6694}
6695
eae47358 6696static int instance_rmdir(const char *name)
0c8916c3
SR
6697{
6698 struct trace_array *tr;
6699 int found = 0;
6700 int ret;
37aea98b 6701 int i;
0c8916c3
SR
6702
6703 mutex_lock(&trace_types_lock);
6704
6705 ret = -ENODEV;
6706 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6707 if (tr->name && strcmp(tr->name, name) == 0) {
6708 found = 1;
6709 break;
6710 }
6711 }
6712 if (!found)
6713 goto out_unlock;
6714
a695cb58 6715 ret = -EBUSY;
cf6ab6d9 6716 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6717 goto out_unlock;
6718
0c8916c3
SR
6719 list_del(&tr->list);
6720
6b450d25 6721 tracing_set_nop(tr);
0c8916c3 6722 event_trace_del_tracer(tr);
591dffda 6723 ftrace_destroy_function_files(tr);
681a4a2f 6724 tracefs_remove_recursive(tr->dir);
a9fcaaac 6725 free_trace_buffers(tr);
0c8916c3 6726
37aea98b
SRRH
6727 for (i = 0; i < tr->nr_topts; i++) {
6728 kfree(tr->topts[i].topts);
6729 }
6730 kfree(tr->topts);
6731
0c8916c3
SR
6732 kfree(tr->name);
6733 kfree(tr);
6734
6735 ret = 0;
6736
6737 out_unlock:
6738 mutex_unlock(&trace_types_lock);
6739
6740 return ret;
6741}
6742
277ba044
SR
6743static __init void create_trace_instances(struct dentry *d_tracer)
6744{
eae47358
SRRH
6745 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6746 instance_mkdir,
6747 instance_rmdir);
277ba044
SR
6748 if (WARN_ON(!trace_instance_dir))
6749 return;
277ba044
SR
6750}
6751
2b6080f2 6752static void
8434dc93 6753init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6754{
121aaee7 6755 int cpu;
2b6080f2 6756
607e2ea1
SRRH
6757 trace_create_file("available_tracers", 0444, d_tracer,
6758 tr, &show_traces_fops);
6759
6760 trace_create_file("current_tracer", 0644, d_tracer,
6761 tr, &set_tracer_fops);
6762
ccfe9e42
AL
6763 trace_create_file("tracing_cpumask", 0644, d_tracer,
6764 tr, &tracing_cpumask_fops);
6765
2b6080f2
SR
6766 trace_create_file("trace_options", 0644, d_tracer,
6767 tr, &tracing_iter_fops);
6768
6769 trace_create_file("trace", 0644, d_tracer,
6484c71c 6770 tr, &tracing_fops);
2b6080f2
SR
6771
6772 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6773 tr, &tracing_pipe_fops);
2b6080f2
SR
6774
6775 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6776 tr, &tracing_entries_fops);
2b6080f2
SR
6777
6778 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6779 tr, &tracing_total_entries_fops);
6780
238ae93d 6781 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6782 tr, &tracing_free_buffer_fops);
6783
6784 trace_create_file("trace_marker", 0220, d_tracer,
6785 tr, &tracing_mark_fops);
6786
6787 trace_create_file("trace_clock", 0644, d_tracer, tr,
6788 &trace_clock_fops);
6789
6790 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6791 tr, &rb_simple_fops);
ce9bae55 6792
16270145
SRRH
6793 create_trace_options_dir(tr);
6794
6d9b3fa5
SRRH
6795#ifdef CONFIG_TRACER_MAX_TRACE
6796 trace_create_file("tracing_max_latency", 0644, d_tracer,
6797 &tr->max_latency, &tracing_max_lat_fops);
6798#endif
6799
591dffda
SRRH
6800 if (ftrace_create_function_files(tr, d_tracer))
6801 WARN(1, "Could not allocate function filter files");
6802
ce9bae55
SRRH
6803#ifdef CONFIG_TRACER_SNAPSHOT
6804 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6805 tr, &snapshot_fops);
ce9bae55 6806#endif
121aaee7
SRRH
6807
6808 for_each_tracing_cpu(cpu)
8434dc93 6809 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6810
2b6080f2
SR
6811}
6812
f76180bc
SRRH
6813static struct vfsmount *trace_automount(void *ingore)
6814{
6815 struct vfsmount *mnt;
6816 struct file_system_type *type;
6817
6818 /*
6819 * To maintain backward compatibility for tools that mount
6820 * debugfs to get to the tracing facility, tracefs is automatically
6821 * mounted to the debugfs/tracing directory.
6822 */
6823 type = get_fs_type("tracefs");
6824 if (!type)
6825 return NULL;
6826 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6827 put_filesystem(type);
6828 if (IS_ERR(mnt))
6829 return NULL;
6830 mntget(mnt);
6831
6832 return mnt;
6833}
6834
7eeafbca
SRRH
6835/**
6836 * tracing_init_dentry - initialize top level trace array
6837 *
6838 * This is called when creating files or directories in the tracing
6839 * directory. It is called via fs_initcall() by any of the boot up code
6840 * and expects to return the dentry of the top level tracing directory.
6841 */
6842struct dentry *tracing_init_dentry(void)
6843{
6844 struct trace_array *tr = &global_trace;
6845
f76180bc 6846 /* The top level trace array uses NULL as parent */
7eeafbca 6847 if (tr->dir)
f76180bc 6848 return NULL;
7eeafbca 6849
8b129199
JW
6850 if (WARN_ON(!tracefs_initialized()) ||
6851 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6852 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
6853 return ERR_PTR(-ENODEV);
6854
f76180bc
SRRH
6855 /*
6856 * As there may still be users that expect the tracing
6857 * files to exist in debugfs/tracing, we must automount
6858 * the tracefs file system there, so older tools still
6859 * work with the newer kerenl.
6860 */
6861 tr->dir = debugfs_create_automount("tracing", NULL,
6862 trace_automount, NULL);
7eeafbca
SRRH
6863 if (!tr->dir) {
6864 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6865 return ERR_PTR(-ENOMEM);
6866 }
6867
8434dc93 6868 return NULL;
7eeafbca
SRRH
6869}
6870
0c564a53
SRRH
6871extern struct trace_enum_map *__start_ftrace_enum_maps[];
6872extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6873
6874static void __init trace_enum_init(void)
6875{
3673b8e4
SRRH
6876 int len;
6877
6878 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6879 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6880}
6881
6882#ifdef CONFIG_MODULES
6883static void trace_module_add_enums(struct module *mod)
6884{
6885 if (!mod->num_trace_enums)
6886 return;
6887
6888 /*
6889 * Modules with bad taint do not have events created, do
6890 * not bother with enums either.
6891 */
6892 if (trace_module_has_bad_taint(mod))
6893 return;
6894
9828413d 6895 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6896}
6897
9828413d
SRRH
6898#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6899static void trace_module_remove_enums(struct module *mod)
6900{
6901 union trace_enum_map_item *map;
6902 union trace_enum_map_item **last = &trace_enum_maps;
6903
6904 if (!mod->num_trace_enums)
6905 return;
6906
6907 mutex_lock(&trace_enum_mutex);
6908
6909 map = trace_enum_maps;
6910
6911 while (map) {
6912 if (map->head.mod == mod)
6913 break;
6914 map = trace_enum_jmp_to_tail(map);
6915 last = &map->tail.next;
6916 map = map->tail.next;
6917 }
6918 if (!map)
6919 goto out;
6920
6921 *last = trace_enum_jmp_to_tail(map)->tail.next;
6922 kfree(map);
6923 out:
6924 mutex_unlock(&trace_enum_mutex);
6925}
6926#else
6927static inline void trace_module_remove_enums(struct module *mod) { }
6928#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6929
3673b8e4
SRRH
6930static int trace_module_notify(struct notifier_block *self,
6931 unsigned long val, void *data)
6932{
6933 struct module *mod = data;
6934
6935 switch (val) {
6936 case MODULE_STATE_COMING:
6937 trace_module_add_enums(mod);
6938 break;
9828413d
SRRH
6939 case MODULE_STATE_GOING:
6940 trace_module_remove_enums(mod);
6941 break;
3673b8e4
SRRH
6942 }
6943
6944 return 0;
0c564a53
SRRH
6945}
6946
3673b8e4
SRRH
6947static struct notifier_block trace_module_nb = {
6948 .notifier_call = trace_module_notify,
6949 .priority = 0,
6950};
9828413d 6951#endif /* CONFIG_MODULES */
3673b8e4 6952
8434dc93 6953static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6954{
6955 struct dentry *d_tracer;
bc0c38d1 6956
7e53bd42
LJ
6957 trace_access_lock_init();
6958
bc0c38d1 6959 d_tracer = tracing_init_dentry();
14a5ae40 6960 if (IS_ERR(d_tracer))
ed6f1c99 6961 return 0;
bc0c38d1 6962
8434dc93 6963 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6964
5452af66 6965 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6966 &global_trace, &tracing_thresh_fops);
a8259075 6967
339ae5d3 6968 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6969 NULL, &tracing_readme_fops);
6970
69abe6a5
AP
6971 trace_create_file("saved_cmdlines", 0444, d_tracer,
6972 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6973
939c7a4f
YY
6974 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6975 NULL, &tracing_saved_cmdlines_size_fops);
6976
0c564a53
SRRH
6977 trace_enum_init();
6978
9828413d
SRRH
6979 trace_create_enum_file(d_tracer);
6980
3673b8e4
SRRH
6981#ifdef CONFIG_MODULES
6982 register_module_notifier(&trace_module_nb);
6983#endif
6984
bc0c38d1 6985#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6986 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6987 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6988#endif
b04cc6b1 6989
277ba044 6990 create_trace_instances(d_tracer);
5452af66 6991
37aea98b 6992 update_tracer_options(&global_trace);
09d23a1d 6993
b5ad384e 6994 return 0;
bc0c38d1
SR
6995}
6996
3f5a54e3
SR
6997static int trace_panic_handler(struct notifier_block *this,
6998 unsigned long event, void *unused)
6999{
944ac425 7000 if (ftrace_dump_on_oops)
cecbca96 7001 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7002 return NOTIFY_OK;
7003}
7004
7005static struct notifier_block trace_panic_notifier = {
7006 .notifier_call = trace_panic_handler,
7007 .next = NULL,
7008 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7009};
7010
7011static int trace_die_handler(struct notifier_block *self,
7012 unsigned long val,
7013 void *data)
7014{
7015 switch (val) {
7016 case DIE_OOPS:
944ac425 7017 if (ftrace_dump_on_oops)
cecbca96 7018 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7019 break;
7020 default:
7021 break;
7022 }
7023 return NOTIFY_OK;
7024}
7025
7026static struct notifier_block trace_die_notifier = {
7027 .notifier_call = trace_die_handler,
7028 .priority = 200
7029};
7030
7031/*
7032 * printk is set to max of 1024, we really don't need it that big.
7033 * Nothing should be printing 1000 characters anyway.
7034 */
7035#define TRACE_MAX_PRINT 1000
7036
7037/*
7038 * Define here KERN_TRACE so that we have one place to modify
7039 * it if we decide to change what log level the ftrace dump
7040 * should be at.
7041 */
428aee14 7042#define KERN_TRACE KERN_EMERG
3f5a54e3 7043
955b61e5 7044void
3f5a54e3
SR
7045trace_printk_seq(struct trace_seq *s)
7046{
7047 /* Probably should print a warning here. */
3a161d99
SRRH
7048 if (s->seq.len >= TRACE_MAX_PRINT)
7049 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 7050
820b75f6
SRRH
7051 /*
7052 * More paranoid code. Although the buffer size is set to
7053 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7054 * an extra layer of protection.
7055 */
7056 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7057 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
7058
7059 /* should be zero ended, but we are paranoid. */
3a161d99 7060 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
7061
7062 printk(KERN_TRACE "%s", s->buffer);
7063
f9520750 7064 trace_seq_init(s);
3f5a54e3
SR
7065}
7066
955b61e5
JW
7067void trace_init_global_iter(struct trace_iterator *iter)
7068{
7069 iter->tr = &global_trace;
2b6080f2 7070 iter->trace = iter->tr->current_trace;
ae3b5093 7071 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 7072 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
7073
7074 if (iter->trace && iter->trace->open)
7075 iter->trace->open(iter);
7076
7077 /* Annotate start of buffers if we had overruns */
7078 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7079 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7080
7081 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7082 if (trace_clocks[iter->tr->clock_id].in_ns)
7083 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7084}
7085
7fe70b57 7086void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7087{
3f5a54e3
SR
7088 /* use static because iter can be a bit big for the stack */
7089 static struct trace_iterator iter;
7fe70b57 7090 static atomic_t dump_running;
983f938a 7091 struct trace_array *tr = &global_trace;
cf586b61 7092 unsigned int old_userobj;
d769041f
SR
7093 unsigned long flags;
7094 int cnt = 0, cpu;
3f5a54e3 7095
7fe70b57
SRRH
7096 /* Only allow one dump user at a time. */
7097 if (atomic_inc_return(&dump_running) != 1) {
7098 atomic_dec(&dump_running);
7099 return;
7100 }
3f5a54e3 7101
7fe70b57
SRRH
7102 /*
7103 * Always turn off tracing when we dump.
7104 * We don't need to show trace output of what happens
7105 * between multiple crashes.
7106 *
7107 * If the user does a sysrq-z, then they can re-enable
7108 * tracing with echo 1 > tracing_on.
7109 */
0ee6b6cf 7110 tracing_off();
cf586b61 7111
7fe70b57 7112 local_irq_save(flags);
3f5a54e3 7113
38dbe0b1 7114 /* Simulate the iterator */
955b61e5
JW
7115 trace_init_global_iter(&iter);
7116
d769041f 7117 for_each_tracing_cpu(cpu) {
5e2d5ef8 7118 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7119 }
7120
983f938a 7121 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7122
b54d3de9 7123 /* don't look at user memory in panic mode */
983f938a 7124 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7125
cecbca96
FW
7126 switch (oops_dump_mode) {
7127 case DUMP_ALL:
ae3b5093 7128 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7129 break;
7130 case DUMP_ORIG:
7131 iter.cpu_file = raw_smp_processor_id();
7132 break;
7133 case DUMP_NONE:
7134 goto out_enable;
7135 default:
7136 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7137 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7138 }
7139
7140 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7141
7fe70b57
SRRH
7142 /* Did function tracer already get disabled? */
7143 if (ftrace_is_dead()) {
7144 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7145 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7146 }
7147
3f5a54e3
SR
7148 /*
7149 * We need to stop all tracing on all CPUS to read the
7150 * the next buffer. This is a bit expensive, but is
7151 * not done often. We fill all what we can read,
7152 * and then release the locks again.
7153 */
7154
3f5a54e3
SR
7155 while (!trace_empty(&iter)) {
7156
7157 if (!cnt)
7158 printk(KERN_TRACE "---------------------------------\n");
7159
7160 cnt++;
7161
7162 /* reset all but tr, trace, and overruns */
7163 memset(&iter.seq, 0,
7164 sizeof(struct trace_iterator) -
7165 offsetof(struct trace_iterator, seq));
7166 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7167 iter.pos = -1;
7168
955b61e5 7169 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7170 int ret;
7171
7172 ret = print_trace_line(&iter);
7173 if (ret != TRACE_TYPE_NO_CONSUME)
7174 trace_consume(&iter);
3f5a54e3 7175 }
b892e5c8 7176 touch_nmi_watchdog();
3f5a54e3
SR
7177
7178 trace_printk_seq(&iter.seq);
7179 }
7180
7181 if (!cnt)
7182 printk(KERN_TRACE " (ftrace buffer empty)\n");
7183 else
7184 printk(KERN_TRACE "---------------------------------\n");
7185
cecbca96 7186 out_enable:
983f938a 7187 tr->trace_flags |= old_userobj;
cf586b61 7188
7fe70b57
SRRH
7189 for_each_tracing_cpu(cpu) {
7190 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7191 }
7fe70b57 7192 atomic_dec(&dump_running);
cd891ae0 7193 local_irq_restore(flags);
3f5a54e3 7194}
a8eecf22 7195EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7196
3928a8a2 7197__init static int tracer_alloc_buffers(void)
bc0c38d1 7198{
73c5162a 7199 int ring_buf_size;
9e01c1b7 7200 int ret = -ENOMEM;
4c11d7ae 7201
b5e87c05
SRRH
7202 /*
7203 * Make sure we don't accidently add more trace options
7204 * than we have bits for.
7205 */
9a38a885 7206 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 7207
9e01c1b7
RR
7208 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7209 goto out;
7210
ccfe9e42 7211 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7212 goto out_free_buffer_mask;
4c11d7ae 7213
07d777fe
SR
7214 /* Only allocate trace_printk buffers if a trace_printk exists */
7215 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7216 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7217 trace_printk_init_buffers();
7218
73c5162a
SR
7219 /* To save memory, keep the ring buffer size to its minimum */
7220 if (ring_buffer_expanded)
7221 ring_buf_size = trace_buf_size;
7222 else
7223 ring_buf_size = 1;
7224
9e01c1b7 7225 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7226 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7227
2b6080f2
SR
7228 raw_spin_lock_init(&global_trace.start_lock);
7229
2c4a33ab
SRRH
7230 /* Used for event triggers */
7231 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7232 if (!temp_buffer)
7233 goto out_free_cpumask;
7234
939c7a4f
YY
7235 if (trace_create_savedcmd() < 0)
7236 goto out_free_temp_buffer;
7237
9e01c1b7 7238 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7239 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7240 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7241 WARN_ON(1);
939c7a4f 7242 goto out_free_savedcmd;
4c11d7ae 7243 }
a7603ff4 7244
499e5470
SR
7245 if (global_trace.buffer_disabled)
7246 tracing_off();
4c11d7ae 7247
e1e232ca
SR
7248 if (trace_boot_clock) {
7249 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7250 if (ret < 0)
7251 pr_warning("Trace clock %s not defined, going back to default\n",
7252 trace_boot_clock);
7253 }
7254
ca164318
SRRH
7255 /*
7256 * register_tracer() might reference current_trace, so it
7257 * needs to be set before we register anything. This is
7258 * just a bootstrap of current_trace anyway.
7259 */
2b6080f2
SR
7260 global_trace.current_trace = &nop_trace;
7261
0b9b12c1
SRRH
7262 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7263
4104d326
SRRH
7264 ftrace_init_global_array_ops(&global_trace);
7265
9a38a885
SRRH
7266 init_trace_flags_index(&global_trace);
7267
ca164318
SRRH
7268 register_tracer(&nop_trace);
7269
60a11774
SR
7270 /* All seems OK, enable tracing */
7271 tracing_disabled = 0;
3928a8a2 7272
3f5a54e3
SR
7273 atomic_notifier_chain_register(&panic_notifier_list,
7274 &trace_panic_notifier);
7275
7276 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7277
ae63b31e
SR
7278 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7279
7280 INIT_LIST_HEAD(&global_trace.systems);
7281 INIT_LIST_HEAD(&global_trace.events);
7282 list_add(&global_trace.list, &ftrace_trace_arrays);
7283
a4d1e688 7284 apply_trace_boot_options();
7bcfaf54 7285
77fd5c15
SRRH
7286 register_snapshot_cmd();
7287
2fc1dfbe 7288 return 0;
3f5a54e3 7289
939c7a4f
YY
7290out_free_savedcmd:
7291 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7292out_free_temp_buffer:
7293 ring_buffer_free(temp_buffer);
9e01c1b7 7294out_free_cpumask:
ccfe9e42 7295 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7296out_free_buffer_mask:
7297 free_cpumask_var(tracing_buffer_mask);
7298out:
7299 return ret;
bc0c38d1 7300}
b2821ae6 7301
5f893b26
SRRH
7302void __init trace_init(void)
7303{
0daa2302
SRRH
7304 if (tracepoint_printk) {
7305 tracepoint_print_iter =
7306 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7307 if (WARN_ON(!tracepoint_print_iter))
7308 tracepoint_printk = 0;
7309 }
5f893b26 7310 tracer_alloc_buffers();
0c564a53 7311 trace_event_init();
5f893b26
SRRH
7312}
7313
b2821ae6
SR
7314__init static int clear_boot_tracer(void)
7315{
7316 /*
7317 * The default tracer at boot buffer is an init section.
7318 * This function is called in lateinit. If we did not
7319 * find the boot tracer, then clear it out, to prevent
7320 * later registration from accessing the buffer that is
7321 * about to be freed.
7322 */
7323 if (!default_bootup_tracer)
7324 return 0;
7325
7326 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7327 default_bootup_tracer);
7328 default_bootup_tracer = NULL;
7329
7330 return 0;
7331}
7332
8434dc93 7333fs_initcall(tracer_init_tracefs);
b2821ae6 7334late_initcall(clear_boot_tracer);