]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace.c
tracing/kprobes: Turn trace_probe->files into list_head
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
4c11d7ae 23#include <linux/pagemap.h>
bc0c38d1
SR
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
2cadf913 27#include <linux/kprobes.h>
bc0c38d1
SR
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
2cadf913 31#include <linux/splice.h>
3f5a54e3 32#include <linux/kdebug.h>
5f0c6c03 33#include <linux/string.h>
7e53bd42 34#include <linux/rwsem.h>
5a0e3ad6 35#include <linux/slab.h>
bc0c38d1
SR
36#include <linux/ctype.h>
37#include <linux/init.h>
2a2cc8f7 38#include <linux/poll.h>
b892e5c8 39#include <linux/nmi.h>
bc0c38d1 40#include <linux/fs.h>
8bd75c77 41#include <linux/sched/rt.h>
86387f7e 42
bc0c38d1 43#include "trace.h"
f0868d1e 44#include "trace_output.h"
bc0c38d1 45
73c5162a
SR
46/*
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
55034cd6 50bool ring_buffer_expanded;
73c5162a 51
8e1b82e0
FW
52/*
53 * We need to change this state when a selftest is running.
ff32504f
FW
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
5e1607a0 56 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
57 * at the same time, giving false positive or negative results.
58 */
8e1b82e0 59static bool __read_mostly tracing_selftest_running;
ff32504f 60
b2821ae6
SR
61/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
020e5f85 64bool __read_mostly tracing_selftest_disabled;
b2821ae6 65
adf9f195
FW
66/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
0f048701 80
7ffbd48d
SR
81/*
82 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
0f048701
SR
88/*
89 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
4fd27358 94static int tracing_disabled = 1;
0f048701 95
9288f99a 96DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 97
955b61e5 98cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 99
944ac425
SR
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 114 */
cecbca96
FW
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 117
de7edd31
SRRH
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
b2821ae6
SR
121static int tracing_set_tracer(const char *buf);
122
ee6c2c1b
LZ
123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 125static char *default_bootup_tracer;
d9e54076 126
55034cd6
SRRH
127static bool allocate_snapshot;
128
1beee96b 129static int __init set_cmdline_ftrace(char *str)
d9e54076 130{
67012ab1 131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 132 default_bootup_tracer = bootup_tracer_buf;
73c5162a 133 /* We are using ftrace early, expand it */
55034cd6 134 ring_buffer_expanded = true;
d9e54076
PZ
135 return 1;
136}
1beee96b 137__setup("ftrace=", set_cmdline_ftrace);
d9e54076 138
944ac425
SR
139static int __init set_ftrace_dump_on_oops(char *str)
140{
cecbca96
FW
141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
143 return 1;
144 }
145
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
148 return 1;
149 }
150
151 return 0;
944ac425
SR
152}
153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 154
de7edd31
SRRH
155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
3209cff4 162static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
163{
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
167 return 1;
168}
3209cff4 169__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 170
7bcfaf54
SR
171
172static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173static char *trace_boot_options __initdata;
174
175static int __init set_trace_boot_options(char *str)
176{
67012ab1 177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
178 trace_boot_options = trace_boot_options_buf;
179 return 0;
180}
181__setup("trace_options=", set_trace_boot_options);
182
de7edd31 183
cf8e3474 184unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
185{
186 nsec += 500;
187 do_div(nsec, 1000);
188 return nsec;
189}
190
4fcdae83
SR
191/*
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
198 *
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
202 */
bc0c38d1
SR
203static struct trace_array global_trace;
204
ae63b31e 205LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 206
e77405ad
SR
207int filter_current_check_discard(struct ring_buffer *buffer,
208 struct ftrace_event_call *call, void *rec,
eb02ce01
TZ
209 struct ring_buffer_event *event)
210{
e77405ad 211 return filter_check_discard(call, rec, buffer, event);
eb02ce01 212}
17c873ec 213EXPORT_SYMBOL_GPL(filter_current_check_discard);
eb02ce01 214
37886f6a
SR
215cycle_t ftrace_now(int cpu)
216{
217 u64 ts;
218
219 /* Early boot up does not have a buffer yet */
12883efb 220 if (!global_trace.trace_buffer.buffer)
37886f6a
SR
221 return trace_clock_local();
222
12883efb
SRRH
223 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
224 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
37886f6a
SR
225
226 return ts;
227}
bc0c38d1 228
9036990d
SR
229int tracing_is_enabled(void)
230{
0fb9656d 231 return tracing_is_on();
9036990d
SR
232}
233
4fcdae83 234/*
3928a8a2
SR
235 * trace_buf_size is the size in bytes that is allocated
236 * for a buffer. Note, the number of bytes is always rounded
237 * to page size.
3f5a54e3
SR
238 *
239 * This number is purposely set to a low number of 16384.
240 * If the dump on oops happens, it will be much appreciated
241 * to not have to wait for all that output. Anyway this can be
242 * boot time and run time configurable.
4fcdae83 243 */
3928a8a2 244#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 245
3928a8a2 246static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 247
4fcdae83 248/* trace_types holds a link list of available tracers. */
bc0c38d1 249static struct tracer *trace_types __read_mostly;
4fcdae83 250
4fcdae83
SR
251/*
252 * trace_types_lock is used to protect the trace_types list.
4fcdae83 253 */
bc0c38d1 254static DEFINE_MUTEX(trace_types_lock);
4fcdae83 255
7e53bd42
LJ
256/*
257 * serialize the access of the ring buffer
258 *
259 * ring buffer serializes readers, but it is low level protection.
260 * The validity of the events (which returns by ring_buffer_peek() ..etc)
261 * are not protected by ring buffer.
262 *
263 * The content of events may become garbage if we allow other process consumes
264 * these events concurrently:
265 * A) the page of the consumed events may become a normal page
266 * (not reader page) in ring buffer, and this page will be rewrited
267 * by events producer.
268 * B) The page of the consumed events may become a page for splice_read,
269 * and this page will be returned to system.
270 *
271 * These primitives allow multi process access to different cpu ring buffer
272 * concurrently.
273 *
274 * These primitives don't distinguish read-only and read-consume access.
275 * Multi read-only access are also serialized.
276 */
277
278#ifdef CONFIG_SMP
279static DECLARE_RWSEM(all_cpu_access_lock);
280static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
281
282static inline void trace_access_lock(int cpu)
283{
ae3b5093 284 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
285 /* gain it for accessing the whole ring buffer. */
286 down_write(&all_cpu_access_lock);
287 } else {
288 /* gain it for accessing a cpu ring buffer. */
289
ae3b5093 290 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
291 down_read(&all_cpu_access_lock);
292
293 /* Secondly block other access to this @cpu ring buffer. */
294 mutex_lock(&per_cpu(cpu_access_lock, cpu));
295 }
296}
297
298static inline void trace_access_unlock(int cpu)
299{
ae3b5093 300 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
301 up_write(&all_cpu_access_lock);
302 } else {
303 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
304 up_read(&all_cpu_access_lock);
305 }
306}
307
308static inline void trace_access_lock_init(void)
309{
310 int cpu;
311
312 for_each_possible_cpu(cpu)
313 mutex_init(&per_cpu(cpu_access_lock, cpu));
314}
315
316#else
317
318static DEFINE_MUTEX(access_lock);
319
320static inline void trace_access_lock(int cpu)
321{
322 (void)cpu;
323 mutex_lock(&access_lock);
324}
325
326static inline void trace_access_unlock(int cpu)
327{
328 (void)cpu;
329 mutex_unlock(&access_lock);
330}
331
332static inline void trace_access_lock_init(void)
333{
334}
335
336#endif
337
ee6bce52 338/* trace_flags holds trace_options default values */
12ef7d44 339unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 340 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 342 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
e7e2ee89 343
499e5470
SR
344/**
345 * tracing_on - enable tracing buffers
346 *
347 * This function enables tracing buffers that may have been
348 * disabled with tracing_off.
349 */
350void tracing_on(void)
351{
12883efb
SRRH
352 if (global_trace.trace_buffer.buffer)
353 ring_buffer_record_on(global_trace.trace_buffer.buffer);
499e5470
SR
354 /*
355 * This flag is only looked at when buffers haven't been
356 * allocated yet. We don't really care about the race
357 * between setting this flag and actually turning
358 * on the buffer.
359 */
360 global_trace.buffer_disabled = 0;
361}
362EXPORT_SYMBOL_GPL(tracing_on);
363
09ae7234
SRRH
364/**
365 * __trace_puts - write a constant string into the trace buffer.
366 * @ip: The address of the caller
367 * @str: The constant string to write
368 * @size: The size of the string.
369 */
370int __trace_puts(unsigned long ip, const char *str, int size)
371{
372 struct ring_buffer_event *event;
373 struct ring_buffer *buffer;
374 struct print_entry *entry;
375 unsigned long irq_flags;
376 int alloc;
377
378 alloc = sizeof(*entry) + size + 2; /* possible \n added */
379
380 local_save_flags(irq_flags);
381 buffer = global_trace.trace_buffer.buffer;
382 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
383 irq_flags, preempt_count());
384 if (!event)
385 return 0;
386
387 entry = ring_buffer_event_data(event);
388 entry->ip = ip;
389
390 memcpy(&entry->buf, str, size);
391
392 /* Add a newline if necessary */
393 if (entry->buf[size - 1] != '\n') {
394 entry->buf[size] = '\n';
395 entry->buf[size + 1] = '\0';
396 } else
397 entry->buf[size] = '\0';
398
399 __buffer_unlock_commit(buffer, event);
400
401 return size;
402}
403EXPORT_SYMBOL_GPL(__trace_puts);
404
405/**
406 * __trace_bputs - write the pointer to a constant string into trace buffer
407 * @ip: The address of the caller
408 * @str: The constant string to write to the buffer to
409 */
410int __trace_bputs(unsigned long ip, const char *str)
411{
412 struct ring_buffer_event *event;
413 struct ring_buffer *buffer;
414 struct bputs_entry *entry;
415 unsigned long irq_flags;
416 int size = sizeof(struct bputs_entry);
417
418 local_save_flags(irq_flags);
419 buffer = global_trace.trace_buffer.buffer;
420 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
421 irq_flags, preempt_count());
422 if (!event)
423 return 0;
424
425 entry = ring_buffer_event_data(event);
426 entry->ip = ip;
427 entry->str = str;
428
429 __buffer_unlock_commit(buffer, event);
430
431 return 1;
432}
433EXPORT_SYMBOL_GPL(__trace_bputs);
434
ad909e21
SRRH
435#ifdef CONFIG_TRACER_SNAPSHOT
436/**
437 * trace_snapshot - take a snapshot of the current buffer.
438 *
439 * This causes a swap between the snapshot buffer and the current live
440 * tracing buffer. You can use this to take snapshots of the live
441 * trace when some condition is triggered, but continue to trace.
442 *
443 * Note, make sure to allocate the snapshot with either
444 * a tracing_snapshot_alloc(), or by doing it manually
445 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
446 *
447 * If the snapshot buffer is not allocated, it will stop tracing.
448 * Basically making a permanent snapshot.
449 */
450void tracing_snapshot(void)
451{
452 struct trace_array *tr = &global_trace;
453 struct tracer *tracer = tr->current_trace;
454 unsigned long flags;
455
1b22e382
SRRH
456 if (in_nmi()) {
457 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
458 internal_trace_puts("*** snapshot is being ignored ***\n");
459 return;
460 }
461
ad909e21 462 if (!tr->allocated_snapshot) {
ca268da6
SRRH
463 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
464 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
465 tracing_off();
466 return;
467 }
468
469 /* Note, snapshot can not be used when the tracer uses it */
470 if (tracer->use_max_tr) {
ca268da6
SRRH
471 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
472 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
473 return;
474 }
475
476 local_irq_save(flags);
477 update_max_tr(tr, current, smp_processor_id());
478 local_irq_restore(flags);
479}
1b22e382 480EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
481
482static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
483 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
484static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
485
486static int alloc_snapshot(struct trace_array *tr)
487{
488 int ret;
489
490 if (!tr->allocated_snapshot) {
491
492 /* allocate spare buffer */
493 ret = resize_buffer_duplicate_size(&tr->max_buffer,
494 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
495 if (ret < 0)
496 return ret;
497
498 tr->allocated_snapshot = true;
499 }
500
501 return 0;
502}
503
504void free_snapshot(struct trace_array *tr)
505{
506 /*
507 * We don't free the ring buffer. instead, resize it because
508 * The max_tr ring buffer has some state (e.g. ring->clock) and
509 * we want preserve it.
510 */
511 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
512 set_buffer_entries(&tr->max_buffer, 1);
513 tracing_reset_online_cpus(&tr->max_buffer);
514 tr->allocated_snapshot = false;
515}
ad909e21
SRRH
516
517/**
518 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
519 *
520 * This is similar to trace_snapshot(), but it will allocate the
521 * snapshot buffer if it isn't already allocated. Use this only
522 * where it is safe to sleep, as the allocation may sleep.
523 *
524 * This causes a swap between the snapshot buffer and the current live
525 * tracing buffer. You can use this to take snapshots of the live
526 * trace when some condition is triggered, but continue to trace.
527 */
528void tracing_snapshot_alloc(void)
529{
530 struct trace_array *tr = &global_trace;
531 int ret;
532
3209cff4
SRRH
533 ret = alloc_snapshot(tr);
534 if (WARN_ON(ret < 0))
535 return;
ad909e21
SRRH
536
537 tracing_snapshot();
538}
1b22e382 539EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
540#else
541void tracing_snapshot(void)
542{
543 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
544}
1b22e382 545EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
546void tracing_snapshot_alloc(void)
547{
548 /* Give warning */
549 tracing_snapshot();
550}
1b22e382 551EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
552#endif /* CONFIG_TRACER_SNAPSHOT */
553
499e5470
SR
554/**
555 * tracing_off - turn off tracing buffers
556 *
557 * This function stops the tracing buffers from recording data.
558 * It does not disable any overhead the tracers themselves may
559 * be causing. This function simply causes all recording to
560 * the ring buffers to fail.
561 */
562void tracing_off(void)
563{
12883efb
SRRH
564 if (global_trace.trace_buffer.buffer)
565 ring_buffer_record_off(global_trace.trace_buffer.buffer);
499e5470
SR
566 /*
567 * This flag is only looked at when buffers haven't been
568 * allocated yet. We don't really care about the race
569 * between setting this flag and actually turning
570 * on the buffer.
571 */
572 global_trace.buffer_disabled = 1;
573}
574EXPORT_SYMBOL_GPL(tracing_off);
575
de7edd31
SRRH
576void disable_trace_on_warning(void)
577{
578 if (__disable_trace_on_warning)
579 tracing_off();
580}
581
499e5470
SR
582/**
583 * tracing_is_on - show state of ring buffers enabled
584 */
585int tracing_is_on(void)
586{
12883efb
SRRH
587 if (global_trace.trace_buffer.buffer)
588 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
499e5470
SR
589 return !global_trace.buffer_disabled;
590}
591EXPORT_SYMBOL_GPL(tracing_is_on);
592
3928a8a2 593static int __init set_buf_size(char *str)
bc0c38d1 594{
3928a8a2 595 unsigned long buf_size;
c6caeeb1 596
bc0c38d1
SR
597 if (!str)
598 return 0;
9d612bef 599 buf_size = memparse(str, &str);
c6caeeb1 600 /* nr_entries can not be zero */
9d612bef 601 if (buf_size == 0)
c6caeeb1 602 return 0;
3928a8a2 603 trace_buf_size = buf_size;
bc0c38d1
SR
604 return 1;
605}
3928a8a2 606__setup("trace_buf_size=", set_buf_size);
bc0c38d1 607
0e950173
TB
608static int __init set_tracing_thresh(char *str)
609{
87abb3b1 610 unsigned long threshold;
0e950173
TB
611 int ret;
612
613 if (!str)
614 return 0;
bcd83ea6 615 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
616 if (ret < 0)
617 return 0;
87abb3b1 618 tracing_thresh = threshold * 1000;
0e950173
TB
619 return 1;
620}
621__setup("tracing_thresh=", set_tracing_thresh);
622
57f50be1
SR
623unsigned long nsecs_to_usecs(unsigned long nsecs)
624{
625 return nsecs / 1000;
626}
627
4fcdae83 628/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
629static const char *trace_options[] = {
630 "print-parent",
631 "sym-offset",
632 "sym-addr",
633 "verbose",
f9896bf3 634 "raw",
5e3ca0ec 635 "hex",
cb0f12aa 636 "bin",
2a2cc8f7 637 "block",
86387f7e 638 "stacktrace",
5e1607a0 639 "trace_printk",
b2a866f9 640 "ftrace_preempt",
9f029e83 641 "branch",
12ef7d44 642 "annotate",
02b67518 643 "userstacktrace",
b54d3de9 644 "sym-userobj",
66896a85 645 "printk-msg-only",
c4a8e8be 646 "context-info",
c032ef64 647 "latency-format",
be6f164a 648 "sleep-time",
a2a16d6a 649 "graph-time",
e870e9a1 650 "record-cmd",
750912fa 651 "overwrite",
cf30cf67 652 "disable_on_free",
77271ce4 653 "irq-info",
5224c3a3 654 "markers",
328df475 655 "function-trace",
bc0c38d1
SR
656 NULL
657};
658
5079f326
Z
659static struct {
660 u64 (*func)(void);
661 const char *name;
8be0709f 662 int in_ns; /* is this clock in nanoseconds? */
5079f326 663} trace_clocks[] = {
8be0709f
DS
664 { trace_clock_local, "local", 1 },
665 { trace_clock_global, "global", 1 },
666 { trace_clock_counter, "counter", 0 },
8aacf017 667 { trace_clock_jiffies, "uptime", 1 },
76f11917 668 { trace_clock, "perf", 1 },
8cbd9cc6 669 ARCH_TRACE_CLOCKS
5079f326
Z
670};
671
672int trace_clock_id;
673
b63f39ea 674/*
675 * trace_parser_get_init - gets the buffer for trace parser
676 */
677int trace_parser_get_init(struct trace_parser *parser, int size)
678{
679 memset(parser, 0, sizeof(*parser));
680
681 parser->buffer = kmalloc(size, GFP_KERNEL);
682 if (!parser->buffer)
683 return 1;
684
685 parser->size = size;
686 return 0;
687}
688
689/*
690 * trace_parser_put - frees the buffer for trace parser
691 */
692void trace_parser_put(struct trace_parser *parser)
693{
694 kfree(parser->buffer);
695}
696
697/*
698 * trace_get_user - reads the user input string separated by space
699 * (matched by isspace(ch))
700 *
701 * For each string found the 'struct trace_parser' is updated,
702 * and the function returns.
703 *
704 * Returns number of bytes read.
705 *
706 * See kernel/trace/trace.h for 'struct trace_parser' details.
707 */
708int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
709 size_t cnt, loff_t *ppos)
710{
711 char ch;
712 size_t read = 0;
713 ssize_t ret;
714
715 if (!*ppos)
716 trace_parser_clear(parser);
717
718 ret = get_user(ch, ubuf++);
719 if (ret)
720 goto out;
721
722 read++;
723 cnt--;
724
725 /*
726 * The parser is not finished with the last write,
727 * continue reading the user input without skipping spaces.
728 */
729 if (!parser->cont) {
730 /* skip white space */
731 while (cnt && isspace(ch)) {
732 ret = get_user(ch, ubuf++);
733 if (ret)
734 goto out;
735 read++;
736 cnt--;
737 }
738
739 /* only spaces were written */
740 if (isspace(ch)) {
741 *ppos += read;
742 ret = read;
743 goto out;
744 }
745
746 parser->idx = 0;
747 }
748
749 /* read the non-space input */
750 while (cnt && !isspace(ch)) {
3c235a33 751 if (parser->idx < parser->size - 1)
b63f39ea 752 parser->buffer[parser->idx++] = ch;
753 else {
754 ret = -EINVAL;
755 goto out;
756 }
757 ret = get_user(ch, ubuf++);
758 if (ret)
759 goto out;
760 read++;
761 cnt--;
762 }
763
764 /* We either got finished input or we have to wait for another call. */
765 if (isspace(ch)) {
766 parser->buffer[parser->idx] = 0;
767 parser->cont = false;
768 } else {
769 parser->cont = true;
770 parser->buffer[parser->idx++] = ch;
771 }
772
773 *ppos += read;
774 ret = read;
775
776out:
777 return ret;
778}
779
6c6c2796
PP
780ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
781{
782 int len;
783 int ret;
784
2dc5d12b
SR
785 if (!cnt)
786 return 0;
787
6c6c2796
PP
788 if (s->len <= s->readpos)
789 return -EBUSY;
790
791 len = s->len - s->readpos;
792 if (cnt > len)
793 cnt = len;
794 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
2dc5d12b 795 if (ret == cnt)
6c6c2796
PP
796 return -EFAULT;
797
2dc5d12b
SR
798 cnt -= ret;
799
e74da523 800 s->readpos += cnt;
6c6c2796 801 return cnt;
214023c3
SR
802}
803
b8b94265 804static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
805{
806 int len;
3c56819b
EGM
807
808 if (s->len <= s->readpos)
809 return -EBUSY;
810
811 len = s->len - s->readpos;
812 if (cnt > len)
813 cnt = len;
5a26c8f0 814 memcpy(buf, s->buffer + s->readpos, cnt);
3c56819b 815
e74da523 816 s->readpos += cnt;
3c56819b
EGM
817 return cnt;
818}
819
5d4a9dba
SR
820/*
821 * ftrace_max_lock is used to protect the swapping of buffers
822 * when taking a max snapshot. The buffers themselves are
823 * protected by per_cpu spinlocks. But the action of the swap
824 * needs its own lock.
825 *
445c8951 826 * This is defined as a arch_spinlock_t in order to help
5d4a9dba
SR
827 * with performance when lockdep debugging is enabled.
828 *
829 * It is also used in other places outside the update_max_tr
830 * so it needs to be defined outside of the
831 * CONFIG_TRACER_MAX_TRACE.
832 */
445c8951 833static arch_spinlock_t ftrace_max_lock =
edc35bd7 834 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5d4a9dba 835
0e950173
TB
836unsigned long __read_mostly tracing_thresh;
837
5d4a9dba
SR
838#ifdef CONFIG_TRACER_MAX_TRACE
839unsigned long __read_mostly tracing_max_latency;
5d4a9dba
SR
840
841/*
842 * Copy the new maximum trace into the separate maximum-trace
843 * structure. (this way the maximum trace is permanently saved,
844 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
845 */
846static void
847__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
848{
12883efb
SRRH
849 struct trace_buffer *trace_buf = &tr->trace_buffer;
850 struct trace_buffer *max_buf = &tr->max_buffer;
851 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
852 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 853
12883efb
SRRH
854 max_buf->cpu = cpu;
855 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 856
8248ac05
SR
857 max_data->saved_latency = tracing_max_latency;
858 max_data->critical_start = data->critical_start;
859 max_data->critical_end = data->critical_end;
5d4a9dba 860
1acaa1b2 861 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 862 max_data->pid = tsk->pid;
f17a5194
SRRH
863 /*
864 * If tsk == current, then use current_uid(), as that does not use
865 * RCU. The irq tracer can be called out of RCU scope.
866 */
867 if (tsk == current)
868 max_data->uid = current_uid();
869 else
870 max_data->uid = task_uid(tsk);
871
8248ac05
SR
872 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
873 max_data->policy = tsk->policy;
874 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
875
876 /* record this tasks comm */
877 tracing_record_cmdline(tsk);
878}
879
4fcdae83
SR
880/**
881 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
882 * @tr: tracer
883 * @tsk: the task with the latency
884 * @cpu: The cpu that initiated the trace.
885 *
886 * Flip the buffers between the @tr and the max_tr and record information
887 * about which task was the cause of this latency.
888 */
e309b41d 889void
bc0c38d1
SR
890update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
891{
2721e72d 892 struct ring_buffer *buf;
bc0c38d1 893
2b6080f2 894 if (tr->stop_count)
b8de7bd1
SR
895 return;
896
4c11d7ae 897 WARN_ON_ONCE(!irqs_disabled());
34600f0e 898
45ad21ca 899 if (!tr->allocated_snapshot) {
debdd57f 900 /* Only the nop tracer should hit this when disabling */
2b6080f2 901 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 902 return;
debdd57f 903 }
34600f0e 904
0199c4e6 905 arch_spin_lock(&ftrace_max_lock);
3928a8a2 906
12883efb
SRRH
907 buf = tr->trace_buffer.buffer;
908 tr->trace_buffer.buffer = tr->max_buffer.buffer;
909 tr->max_buffer.buffer = buf;
3928a8a2 910
bc0c38d1 911 __update_max_tr(tr, tsk, cpu);
0199c4e6 912 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
913}
914
915/**
916 * update_max_tr_single - only copy one trace over, and reset the rest
917 * @tr - tracer
918 * @tsk - task with the latency
919 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
920 *
921 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 922 */
e309b41d 923void
bc0c38d1
SR
924update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
925{
3928a8a2 926 int ret;
bc0c38d1 927
2b6080f2 928 if (tr->stop_count)
b8de7bd1
SR
929 return;
930
4c11d7ae 931 WARN_ON_ONCE(!irqs_disabled());
6c24499f 932 if (!tr->allocated_snapshot) {
2930e04d 933 /* Only the nop tracer should hit this when disabling */
9e8529af 934 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 935 return;
2930e04d 936 }
ef710e10 937
0199c4e6 938 arch_spin_lock(&ftrace_max_lock);
bc0c38d1 939
12883efb 940 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 941
e8165dbb
SR
942 if (ret == -EBUSY) {
943 /*
944 * We failed to swap the buffer due to a commit taking
945 * place on this CPU. We fail to record, but we reset
946 * the max trace buffer (no one writes directly to it)
947 * and flag that it failed.
948 */
12883efb 949 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
950 "Failed to swap buffers due to commit in progress\n");
951 }
952
e8165dbb 953 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
954
955 __update_max_tr(tr, tsk, cpu);
0199c4e6 956 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1 957}
5d4a9dba 958#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 959
0d5c6e1c
SR
960static void default_wait_pipe(struct trace_iterator *iter)
961{
15693458
SRRH
962 /* Iterators are static, they should be filled or empty */
963 if (trace_buffer_iter(iter, iter->cpu_file))
964 return;
0d5c6e1c 965
12883efb 966 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
0d5c6e1c
SR
967}
968
f4e781c0
SRRH
969#ifdef CONFIG_FTRACE_STARTUP_TEST
970static int run_tracer_selftest(struct tracer *type)
971{
972 struct trace_array *tr = &global_trace;
973 struct tracer *saved_tracer = tr->current_trace;
974 int ret;
0d5c6e1c 975
f4e781c0
SRRH
976 if (!type->selftest || tracing_selftest_disabled)
977 return 0;
0d5c6e1c
SR
978
979 /*
f4e781c0
SRRH
980 * Run a selftest on this tracer.
981 * Here we reset the trace buffer, and set the current
982 * tracer to be this tracer. The tracer can then run some
983 * internal tracing to verify that everything is in order.
984 * If we fail, we do not register this tracer.
0d5c6e1c 985 */
f4e781c0 986 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 987
f4e781c0
SRRH
988 tr->current_trace = type;
989
990#ifdef CONFIG_TRACER_MAX_TRACE
991 if (type->use_max_tr) {
992 /* If we expanded the buffers, make sure the max is expanded too */
993 if (ring_buffer_expanded)
994 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
995 RING_BUFFER_ALL_CPUS);
996 tr->allocated_snapshot = true;
997 }
998#endif
999
1000 /* the test is responsible for initializing and enabling */
1001 pr_info("Testing tracer %s: ", type->name);
1002 ret = type->selftest(type, tr);
1003 /* the test is responsible for resetting too */
1004 tr->current_trace = saved_tracer;
1005 if (ret) {
1006 printk(KERN_CONT "FAILED!\n");
1007 /* Add the warning after printing 'FAILED' */
1008 WARN_ON(1);
1009 return -1;
1010 }
1011 /* Only reset on passing, to avoid touching corrupted buffers */
1012 tracing_reset_online_cpus(&tr->trace_buffer);
1013
1014#ifdef CONFIG_TRACER_MAX_TRACE
1015 if (type->use_max_tr) {
1016 tr->allocated_snapshot = false;
0d5c6e1c 1017
f4e781c0
SRRH
1018 /* Shrink the max buffer again */
1019 if (ring_buffer_expanded)
1020 ring_buffer_resize(tr->max_buffer.buffer, 1,
1021 RING_BUFFER_ALL_CPUS);
1022 }
1023#endif
1024
1025 printk(KERN_CONT "PASSED\n");
1026 return 0;
1027}
1028#else
1029static inline int run_tracer_selftest(struct tracer *type)
1030{
1031 return 0;
0d5c6e1c 1032}
f4e781c0 1033#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1034
4fcdae83
SR
1035/**
1036 * register_tracer - register a tracer with the ftrace system.
1037 * @type - the plugin for the tracer
1038 *
1039 * Register a new plugin tracer.
1040 */
bc0c38d1
SR
1041int register_tracer(struct tracer *type)
1042{
1043 struct tracer *t;
bc0c38d1
SR
1044 int ret = 0;
1045
1046 if (!type->name) {
1047 pr_info("Tracer must have a name\n");
1048 return -1;
1049 }
1050
24a461d5 1051 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1052 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1053 return -1;
1054 }
1055
bc0c38d1 1056 mutex_lock(&trace_types_lock);
86fa2f60 1057
8e1b82e0
FW
1058 tracing_selftest_running = true;
1059
bc0c38d1
SR
1060 for (t = trace_types; t; t = t->next) {
1061 if (strcmp(type->name, t->name) == 0) {
1062 /* already found */
ee6c2c1b 1063 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1064 type->name);
1065 ret = -1;
1066 goto out;
1067 }
1068 }
1069
adf9f195
FW
1070 if (!type->set_flag)
1071 type->set_flag = &dummy_set_flag;
1072 if (!type->flags)
1073 type->flags = &dummy_tracer_flags;
1074 else
1075 if (!type->flags->opts)
1076 type->flags->opts = dummy_tracer_opt;
6eaaa5d5
FW
1077 if (!type->wait_pipe)
1078 type->wait_pipe = default_wait_pipe;
1079
f4e781c0
SRRH
1080 ret = run_tracer_selftest(type);
1081 if (ret < 0)
1082 goto out;
60a11774 1083
bc0c38d1
SR
1084 type->next = trace_types;
1085 trace_types = type;
60a11774 1086
bc0c38d1 1087 out:
8e1b82e0 1088 tracing_selftest_running = false;
bc0c38d1
SR
1089 mutex_unlock(&trace_types_lock);
1090
dac74940
SR
1091 if (ret || !default_bootup_tracer)
1092 goto out_unlock;
1093
ee6c2c1b 1094 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1095 goto out_unlock;
1096
1097 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1098 /* Do we want this tracer to start on bootup? */
1099 tracing_set_tracer(type->name);
1100 default_bootup_tracer = NULL;
1101 /* disable other selftests, since this will break it. */
55034cd6 1102 tracing_selftest_disabled = true;
b2821ae6 1103#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1104 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1105 type->name);
b2821ae6 1106#endif
b2821ae6 1107
dac74940 1108 out_unlock:
bc0c38d1
SR
1109 return ret;
1110}
1111
12883efb 1112void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1113{
12883efb 1114 struct ring_buffer *buffer = buf->buffer;
f633903a 1115
a5416411
HT
1116 if (!buffer)
1117 return;
1118
f633903a
SR
1119 ring_buffer_record_disable(buffer);
1120
1121 /* Make sure all commits have finished */
1122 synchronize_sched();
68179686 1123 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1124
1125 ring_buffer_record_enable(buffer);
1126}
1127
12883efb 1128void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1129{
12883efb 1130 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1131 int cpu;
1132
a5416411
HT
1133 if (!buffer)
1134 return;
1135
621968cd
SR
1136 ring_buffer_record_disable(buffer);
1137
1138 /* Make sure all commits have finished */
1139 synchronize_sched();
1140
12883efb 1141 buf->time_start = ftrace_now(buf->cpu);
213cc060
PE
1142
1143 for_each_online_cpu(cpu)
68179686 1144 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1145
1146 ring_buffer_record_enable(buffer);
213cc060
PE
1147}
1148
9456f0fa
SR
1149void tracing_reset_current(int cpu)
1150{
12883efb 1151 tracing_reset(&global_trace.trace_buffer, cpu);
9456f0fa
SR
1152}
1153
873c642f 1154void tracing_reset_all_online_cpus(void)
9456f0fa 1155{
873c642f
SRRH
1156 struct trace_array *tr;
1157
1158 mutex_lock(&trace_types_lock);
1159 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1160 tracing_reset_online_cpus(&tr->trace_buffer);
1161#ifdef CONFIG_TRACER_MAX_TRACE
1162 tracing_reset_online_cpus(&tr->max_buffer);
1163#endif
873c642f
SRRH
1164 }
1165 mutex_unlock(&trace_types_lock);
9456f0fa
SR
1166}
1167
bc0c38d1 1168#define SAVED_CMDLINES 128
2c7eea4c 1169#define NO_CMDLINE_MAP UINT_MAX
bc0c38d1
SR
1170static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1171static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1172static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1173static int cmdline_idx;
edc35bd7 1174static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
25b0b44a 1175
25b0b44a 1176/* temporary disable recording */
4fd27358 1177static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
1178
1179static void trace_init_cmdlines(void)
1180{
2c7eea4c
TG
1181 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1182 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
bc0c38d1
SR
1183 cmdline_idx = 0;
1184}
1185
b5130b1e
CE
1186int is_tracing_stopped(void)
1187{
2b6080f2 1188 return global_trace.stop_count;
b5130b1e
CE
1189}
1190
69bb54ec
SR
1191/**
1192 * ftrace_off_permanent - disable all ftrace code permanently
1193 *
1194 * This should only be called when a serious anomally has
1195 * been detected. This will turn off the function tracing,
1196 * ring buffers, and other tracing utilites. It takes no
1197 * locks and can be called from any context.
1198 */
1199void ftrace_off_permanent(void)
1200{
1201 tracing_disabled = 1;
1202 ftrace_stop();
1203 tracing_off_permanent();
1204}
1205
0f048701
SR
1206/**
1207 * tracing_start - quick start of the tracer
1208 *
1209 * If tracing is enabled but was stopped by tracing_stop,
1210 * this will start the tracer back up.
1211 */
1212void tracing_start(void)
1213{
1214 struct ring_buffer *buffer;
1215 unsigned long flags;
1216
1217 if (tracing_disabled)
1218 return;
1219
2b6080f2
SR
1220 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1221 if (--global_trace.stop_count) {
1222 if (global_trace.stop_count < 0) {
b06a8301
SR
1223 /* Someone screwed up their debugging */
1224 WARN_ON_ONCE(1);
2b6080f2 1225 global_trace.stop_count = 0;
b06a8301 1226 }
0f048701
SR
1227 goto out;
1228 }
1229
a2f80714
SR
1230 /* Prevent the buffers from switching */
1231 arch_spin_lock(&ftrace_max_lock);
0f048701 1232
12883efb 1233 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1234 if (buffer)
1235 ring_buffer_record_enable(buffer);
1236
12883efb
SRRH
1237#ifdef CONFIG_TRACER_MAX_TRACE
1238 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1239 if (buffer)
1240 ring_buffer_record_enable(buffer);
12883efb 1241#endif
0f048701 1242
a2f80714
SR
1243 arch_spin_unlock(&ftrace_max_lock);
1244
0f048701
SR
1245 ftrace_start();
1246 out:
2b6080f2
SR
1247 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1248}
1249
1250static void tracing_start_tr(struct trace_array *tr)
1251{
1252 struct ring_buffer *buffer;
1253 unsigned long flags;
1254
1255 if (tracing_disabled)
1256 return;
1257
1258 /* If global, we need to also start the max tracer */
1259 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1260 return tracing_start();
1261
1262 raw_spin_lock_irqsave(&tr->start_lock, flags);
1263
1264 if (--tr->stop_count) {
1265 if (tr->stop_count < 0) {
1266 /* Someone screwed up their debugging */
1267 WARN_ON_ONCE(1);
1268 tr->stop_count = 0;
1269 }
1270 goto out;
1271 }
1272
12883efb 1273 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1274 if (buffer)
1275 ring_buffer_record_enable(buffer);
1276
1277 out:
1278 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1279}
1280
1281/**
1282 * tracing_stop - quick stop of the tracer
1283 *
1284 * Light weight way to stop tracing. Use in conjunction with
1285 * tracing_start.
1286 */
1287void tracing_stop(void)
1288{
1289 struct ring_buffer *buffer;
1290 unsigned long flags;
1291
1292 ftrace_stop();
2b6080f2
SR
1293 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1294 if (global_trace.stop_count++)
0f048701
SR
1295 goto out;
1296
a2f80714
SR
1297 /* Prevent the buffers from switching */
1298 arch_spin_lock(&ftrace_max_lock);
1299
12883efb 1300 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1301 if (buffer)
1302 ring_buffer_record_disable(buffer);
1303
12883efb
SRRH
1304#ifdef CONFIG_TRACER_MAX_TRACE
1305 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1306 if (buffer)
1307 ring_buffer_record_disable(buffer);
12883efb 1308#endif
0f048701 1309
a2f80714
SR
1310 arch_spin_unlock(&ftrace_max_lock);
1311
0f048701 1312 out:
2b6080f2
SR
1313 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1314}
1315
1316static void tracing_stop_tr(struct trace_array *tr)
1317{
1318 struct ring_buffer *buffer;
1319 unsigned long flags;
1320
1321 /* If global, we need to also stop the max tracer */
1322 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1323 return tracing_stop();
1324
1325 raw_spin_lock_irqsave(&tr->start_lock, flags);
1326 if (tr->stop_count++)
1327 goto out;
1328
12883efb 1329 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1330 if (buffer)
1331 ring_buffer_record_disable(buffer);
1332
1333 out:
1334 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1335}
1336
e309b41d 1337void trace_stop_cmdline_recording(void);
bc0c38d1 1338
e309b41d 1339static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1340{
a635cf04 1341 unsigned pid, idx;
bc0c38d1
SR
1342
1343 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1344 return;
1345
1346 /*
1347 * It's not the end of the world if we don't get
1348 * the lock, but we also don't want to spin
1349 * nor do we want to disable interrupts,
1350 * so if we miss here, then better luck next time.
1351 */
0199c4e6 1352 if (!arch_spin_trylock(&trace_cmdline_lock))
bc0c38d1
SR
1353 return;
1354
1355 idx = map_pid_to_cmdline[tsk->pid];
2c7eea4c 1356 if (idx == NO_CMDLINE_MAP) {
bc0c38d1
SR
1357 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1358
a635cf04
CE
1359 /*
1360 * Check whether the cmdline buffer at idx has a pid
1361 * mapped. We are going to overwrite that entry so we
1362 * need to clear the map_pid_to_cmdline. Otherwise we
1363 * would read the new comm for the old pid.
1364 */
1365 pid = map_cmdline_to_pid[idx];
1366 if (pid != NO_CMDLINE_MAP)
1367 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1368
a635cf04 1369 map_cmdline_to_pid[idx] = tsk->pid;
bc0c38d1
SR
1370 map_pid_to_cmdline[tsk->pid] = idx;
1371
1372 cmdline_idx = idx;
1373 }
1374
1375 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1376
0199c4e6 1377 arch_spin_unlock(&trace_cmdline_lock);
bc0c38d1
SR
1378}
1379
4ca53085 1380void trace_find_cmdline(int pid, char comm[])
bc0c38d1 1381{
bc0c38d1
SR
1382 unsigned map;
1383
4ca53085
SR
1384 if (!pid) {
1385 strcpy(comm, "<idle>");
1386 return;
1387 }
bc0c38d1 1388
74bf4076
SR
1389 if (WARN_ON_ONCE(pid < 0)) {
1390 strcpy(comm, "<XXX>");
1391 return;
1392 }
1393
4ca53085
SR
1394 if (pid > PID_MAX_DEFAULT) {
1395 strcpy(comm, "<...>");
1396 return;
1397 }
bc0c38d1 1398
5b6045a9 1399 preempt_disable();
0199c4e6 1400 arch_spin_lock(&trace_cmdline_lock);
bc0c38d1 1401 map = map_pid_to_cmdline[pid];
50d88758
TG
1402 if (map != NO_CMDLINE_MAP)
1403 strcpy(comm, saved_cmdlines[map]);
1404 else
1405 strcpy(comm, "<...>");
bc0c38d1 1406
0199c4e6 1407 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1408 preempt_enable();
bc0c38d1
SR
1409}
1410
e309b41d 1411void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1412{
0fb9656d 1413 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1414 return;
1415
7ffbd48d
SR
1416 if (!__this_cpu_read(trace_cmdline_save))
1417 return;
1418
1419 __this_cpu_write(trace_cmdline_save, false);
1420
bc0c38d1
SR
1421 trace_save_cmdline(tsk);
1422}
1423
45dcd8b8 1424void
38697053
SR
1425tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1426 int pc)
bc0c38d1
SR
1427{
1428 struct task_struct *tsk = current;
bc0c38d1 1429
777e208d
SR
1430 entry->preempt_count = pc & 0xff;
1431 entry->pid = (tsk) ? tsk->pid : 0;
1432 entry->flags =
9244489a 1433#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1434 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1435#else
1436 TRACE_FLAG_IRQS_NOSUPPORT |
1437#endif
bc0c38d1
SR
1438 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1439 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1440 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1441}
f413cdb8 1442EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1443
e77405ad
SR
1444struct ring_buffer_event *
1445trace_buffer_lock_reserve(struct ring_buffer *buffer,
1446 int type,
1447 unsigned long len,
1448 unsigned long flags, int pc)
51a763dd
ACM
1449{
1450 struct ring_buffer_event *event;
1451
e77405ad 1452 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1453 if (event != NULL) {
1454 struct trace_entry *ent = ring_buffer_event_data(event);
1455
1456 tracing_generic_entry_update(ent, flags, pc);
1457 ent->type = type;
1458 }
1459
1460 return event;
1461}
51a763dd 1462
7ffbd48d
SR
1463void
1464__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1465{
1466 __this_cpu_write(trace_cmdline_save, true);
1467 ring_buffer_unlock_commit(buffer, event);
1468}
1469
e77405ad
SR
1470static inline void
1471__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1472 struct ring_buffer_event *event,
0d5c6e1c 1473 unsigned long flags, int pc)
51a763dd 1474{
7ffbd48d 1475 __buffer_unlock_commit(buffer, event);
51a763dd 1476
e77405ad
SR
1477 ftrace_trace_stack(buffer, flags, 6, pc);
1478 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1479}
1480
e77405ad
SR
1481void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1482 struct ring_buffer_event *event,
1483 unsigned long flags, int pc)
07edf712 1484{
0d5c6e1c 1485 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1486}
0d5c6e1c 1487EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1488
ccb469a1
SR
1489struct ring_buffer_event *
1490trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1491 struct ftrace_event_file *ftrace_file,
1492 int type, unsigned long len,
1493 unsigned long flags, int pc)
1494{
12883efb 1495 *current_rb = ftrace_file->tr->trace_buffer.buffer;
ccb469a1
SR
1496 return trace_buffer_lock_reserve(*current_rb,
1497 type, len, flags, pc);
1498}
1499EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1500
ef5580d0 1501struct ring_buffer_event *
e77405ad
SR
1502trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1503 int type, unsigned long len,
ef5580d0
SR
1504 unsigned long flags, int pc)
1505{
12883efb 1506 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1507 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1508 type, len, flags, pc);
1509}
94487d6d 1510EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1511
e77405ad
SR
1512void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1513 struct ring_buffer_event *event,
ef5580d0
SR
1514 unsigned long flags, int pc)
1515{
0d5c6e1c 1516 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1517}
94487d6d 1518EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1519
0d5c6e1c
SR
1520void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1521 struct ring_buffer_event *event,
1522 unsigned long flags, int pc,
1523 struct pt_regs *regs)
1fd8df2c 1524{
7ffbd48d 1525 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1526
1527 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1528 ftrace_trace_userstack(buffer, flags, pc);
1529}
0d5c6e1c 1530EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1531
e77405ad
SR
1532void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1533 struct ring_buffer_event *event)
77d9f465 1534{
e77405ad 1535 ring_buffer_discard_commit(buffer, event);
ef5580d0 1536}
12acd473 1537EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1538
e309b41d 1539void
7be42151 1540trace_function(struct trace_array *tr,
38697053
SR
1541 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1542 int pc)
bc0c38d1 1543{
e1112b4d 1544 struct ftrace_event_call *call = &event_function;
12883efb 1545 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1546 struct ring_buffer_event *event;
777e208d 1547 struct ftrace_entry *entry;
bc0c38d1 1548
d769041f 1549 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1550 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1551 return;
1552
e77405ad 1553 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1554 flags, pc);
3928a8a2
SR
1555 if (!event)
1556 return;
1557 entry = ring_buffer_event_data(event);
777e208d
SR
1558 entry->ip = ip;
1559 entry->parent_ip = parent_ip;
e1112b4d 1560
e77405ad 1561 if (!filter_check_discard(call, entry, buffer, event))
7ffbd48d 1562 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1563}
1564
e309b41d 1565void
2e0f5761 1566ftrace(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
1567 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1568 int pc)
2e0f5761
IM
1569{
1570 if (likely(!atomic_read(&data->disabled)))
7be42151 1571 trace_function(tr, ip, parent_ip, flags, pc);
2e0f5761
IM
1572}
1573
c0a0d0d3 1574#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1575
1576#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1577struct ftrace_stack {
1578 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1579};
1580
1581static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1582static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1583
e77405ad 1584static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1585 unsigned long flags,
1fd8df2c 1586 int skip, int pc, struct pt_regs *regs)
86387f7e 1587{
e1112b4d 1588 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1589 struct ring_buffer_event *event;
777e208d 1590 struct stack_entry *entry;
86387f7e 1591 struct stack_trace trace;
4a9bd3f1
SR
1592 int use_stack;
1593 int size = FTRACE_STACK_ENTRIES;
1594
1595 trace.nr_entries = 0;
1596 trace.skip = skip;
1597
1598 /*
1599 * Since events can happen in NMIs there's no safe way to
1600 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1601 * or NMI comes in, it will just have to use the default
1602 * FTRACE_STACK_SIZE.
1603 */
1604 preempt_disable_notrace();
1605
82146529 1606 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1607 /*
1608 * We don't need any atomic variables, just a barrier.
1609 * If an interrupt comes in, we don't care, because it would
1610 * have exited and put the counter back to what we want.
1611 * We just need a barrier to keep gcc from moving things
1612 * around.
1613 */
1614 barrier();
1615 if (use_stack == 1) {
1616 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1617 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1618
1619 if (regs)
1620 save_stack_trace_regs(regs, &trace);
1621 else
1622 save_stack_trace(&trace);
1623
1624 if (trace.nr_entries > size)
1625 size = trace.nr_entries;
1626 } else
1627 /* From now on, use_stack is a boolean */
1628 use_stack = 0;
1629
1630 size *= sizeof(unsigned long);
86387f7e 1631
e77405ad 1632 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1633 sizeof(*entry) + size, flags, pc);
3928a8a2 1634 if (!event)
4a9bd3f1
SR
1635 goto out;
1636 entry = ring_buffer_event_data(event);
86387f7e 1637
4a9bd3f1
SR
1638 memset(&entry->caller, 0, size);
1639
1640 if (use_stack)
1641 memcpy(&entry->caller, trace.entries,
1642 trace.nr_entries * sizeof(unsigned long));
1643 else {
1644 trace.max_entries = FTRACE_STACK_ENTRIES;
1645 trace.entries = entry->caller;
1646 if (regs)
1647 save_stack_trace_regs(regs, &trace);
1648 else
1649 save_stack_trace(&trace);
1650 }
1651
1652 entry->size = trace.nr_entries;
86387f7e 1653
e77405ad 1654 if (!filter_check_discard(call, entry, buffer, event))
7ffbd48d 1655 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1656
1657 out:
1658 /* Again, don't let gcc optimize things here */
1659 barrier();
82146529 1660 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1661 preempt_enable_notrace();
1662
f0a920d5
IM
1663}
1664
1fd8df2c
MH
1665void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1666 int skip, int pc, struct pt_regs *regs)
1667{
1668 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1669 return;
1670
1671 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1672}
1673
e77405ad
SR
1674void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1675 int skip, int pc)
53614991
SR
1676{
1677 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1678 return;
1679
1fd8df2c 1680 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1681}
1682
c0a0d0d3
FW
1683void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1684 int pc)
38697053 1685{
12883efb 1686 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1687}
1688
03889384
SR
1689/**
1690 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1691 * @skip: Number of functions to skip (helper handlers)
03889384 1692 */
c142be8e 1693void trace_dump_stack(int skip)
03889384
SR
1694{
1695 unsigned long flags;
1696
1697 if (tracing_disabled || tracing_selftest_running)
e36c5458 1698 return;
03889384
SR
1699
1700 local_save_flags(flags);
1701
c142be8e
SRRH
1702 /*
1703 * Skip 3 more, seems to get us at the caller of
1704 * this function.
1705 */
1706 skip += 3;
1707 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1708 flags, skip, preempt_count(), NULL);
03889384
SR
1709}
1710
91e86e56
SR
1711static DEFINE_PER_CPU(int, user_stack_count);
1712
e77405ad
SR
1713void
1714ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1715{
e1112b4d 1716 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1717 struct ring_buffer_event *event;
02b67518
TE
1718 struct userstack_entry *entry;
1719 struct stack_trace trace;
02b67518
TE
1720
1721 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1722 return;
1723
b6345879
SR
1724 /*
1725 * NMIs can not handle page faults, even with fix ups.
1726 * The save user stack can (and often does) fault.
1727 */
1728 if (unlikely(in_nmi()))
1729 return;
02b67518 1730
91e86e56
SR
1731 /*
1732 * prevent recursion, since the user stack tracing may
1733 * trigger other kernel events.
1734 */
1735 preempt_disable();
1736 if (__this_cpu_read(user_stack_count))
1737 goto out;
1738
1739 __this_cpu_inc(user_stack_count);
1740
e77405ad 1741 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1742 sizeof(*entry), flags, pc);
02b67518 1743 if (!event)
1dbd1951 1744 goto out_drop_count;
02b67518 1745 entry = ring_buffer_event_data(event);
02b67518 1746
48659d31 1747 entry->tgid = current->tgid;
02b67518
TE
1748 memset(&entry->caller, 0, sizeof(entry->caller));
1749
1750 trace.nr_entries = 0;
1751 trace.max_entries = FTRACE_STACK_ENTRIES;
1752 trace.skip = 0;
1753 trace.entries = entry->caller;
1754
1755 save_stack_trace_user(&trace);
e77405ad 1756 if (!filter_check_discard(call, entry, buffer, event))
7ffbd48d 1757 __buffer_unlock_commit(buffer, event);
91e86e56 1758
1dbd1951 1759 out_drop_count:
91e86e56 1760 __this_cpu_dec(user_stack_count);
91e86e56
SR
1761 out:
1762 preempt_enable();
02b67518
TE
1763}
1764
4fd27358
HE
1765#ifdef UNUSED
1766static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1767{
7be42151 1768 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1769}
4fd27358 1770#endif /* UNUSED */
02b67518 1771
c0a0d0d3
FW
1772#endif /* CONFIG_STACKTRACE */
1773
07d777fe
SR
1774/* created for use with alloc_percpu */
1775struct trace_buffer_struct {
1776 char buffer[TRACE_BUF_SIZE];
1777};
1778
1779static struct trace_buffer_struct *trace_percpu_buffer;
1780static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1781static struct trace_buffer_struct *trace_percpu_irq_buffer;
1782static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1783
1784/*
1785 * The buffer used is dependent on the context. There is a per cpu
1786 * buffer for normal context, softirq contex, hard irq context and
1787 * for NMI context. Thise allows for lockless recording.
1788 *
1789 * Note, if the buffers failed to be allocated, then this returns NULL
1790 */
1791static char *get_trace_buf(void)
1792{
1793 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1794
1795 /*
1796 * If we have allocated per cpu buffers, then we do not
1797 * need to do any locking.
1798 */
1799 if (in_nmi())
1800 percpu_buffer = trace_percpu_nmi_buffer;
1801 else if (in_irq())
1802 percpu_buffer = trace_percpu_irq_buffer;
1803 else if (in_softirq())
1804 percpu_buffer = trace_percpu_sirq_buffer;
1805 else
1806 percpu_buffer = trace_percpu_buffer;
1807
1808 if (!percpu_buffer)
1809 return NULL;
1810
d8a0349c 1811 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
1812}
1813
1814static int alloc_percpu_trace_buffer(void)
1815{
1816 struct trace_buffer_struct *buffers;
1817 struct trace_buffer_struct *sirq_buffers;
1818 struct trace_buffer_struct *irq_buffers;
1819 struct trace_buffer_struct *nmi_buffers;
1820
1821 buffers = alloc_percpu(struct trace_buffer_struct);
1822 if (!buffers)
1823 goto err_warn;
1824
1825 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1826 if (!sirq_buffers)
1827 goto err_sirq;
1828
1829 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1830 if (!irq_buffers)
1831 goto err_irq;
1832
1833 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1834 if (!nmi_buffers)
1835 goto err_nmi;
1836
1837 trace_percpu_buffer = buffers;
1838 trace_percpu_sirq_buffer = sirq_buffers;
1839 trace_percpu_irq_buffer = irq_buffers;
1840 trace_percpu_nmi_buffer = nmi_buffers;
1841
1842 return 0;
1843
1844 err_nmi:
1845 free_percpu(irq_buffers);
1846 err_irq:
1847 free_percpu(sirq_buffers);
1848 err_sirq:
1849 free_percpu(buffers);
1850 err_warn:
1851 WARN(1, "Could not allocate percpu trace_printk buffer");
1852 return -ENOMEM;
1853}
1854
81698831
SR
1855static int buffers_allocated;
1856
07d777fe
SR
1857void trace_printk_init_buffers(void)
1858{
07d777fe
SR
1859 if (buffers_allocated)
1860 return;
1861
1862 if (alloc_percpu_trace_buffer())
1863 return;
1864
1865 pr_info("ftrace: Allocated trace_printk buffers\n");
1866
b382ede6
SR
1867 /* Expand the buffers to set size */
1868 tracing_update_buffers();
1869
07d777fe 1870 buffers_allocated = 1;
81698831
SR
1871
1872 /*
1873 * trace_printk_init_buffers() can be called by modules.
1874 * If that happens, then we need to start cmdline recording
1875 * directly here. If the global_trace.buffer is already
1876 * allocated here, then this was called by module code.
1877 */
12883efb 1878 if (global_trace.trace_buffer.buffer)
81698831
SR
1879 tracing_start_cmdline_record();
1880}
1881
1882void trace_printk_start_comm(void)
1883{
1884 /* Start tracing comms if trace printk is set */
1885 if (!buffers_allocated)
1886 return;
1887 tracing_start_cmdline_record();
1888}
1889
1890static void trace_printk_start_stop_comm(int enabled)
1891{
1892 if (!buffers_allocated)
1893 return;
1894
1895 if (enabled)
1896 tracing_start_cmdline_record();
1897 else
1898 tracing_stop_cmdline_record();
07d777fe
SR
1899}
1900
769b0441 1901/**
48ead020 1902 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
1903 *
1904 */
40ce74f1 1905int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 1906{
e1112b4d 1907 struct ftrace_event_call *call = &event_bprint;
769b0441 1908 struct ring_buffer_event *event;
e77405ad 1909 struct ring_buffer *buffer;
769b0441 1910 struct trace_array *tr = &global_trace;
48ead020 1911 struct bprint_entry *entry;
769b0441 1912 unsigned long flags;
07d777fe
SR
1913 char *tbuffer;
1914 int len = 0, size, pc;
769b0441
FW
1915
1916 if (unlikely(tracing_selftest_running || tracing_disabled))
1917 return 0;
1918
1919 /* Don't pollute graph traces with trace_vprintk internals */
1920 pause_graph_tracing();
1921
1922 pc = preempt_count();
5168ae50 1923 preempt_disable_notrace();
769b0441 1924
07d777fe
SR
1925 tbuffer = get_trace_buf();
1926 if (!tbuffer) {
1927 len = 0;
769b0441 1928 goto out;
07d777fe 1929 }
769b0441 1930
07d777fe 1931 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 1932
07d777fe
SR
1933 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1934 goto out;
769b0441 1935
07d777fe 1936 local_save_flags(flags);
769b0441 1937 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 1938 buffer = tr->trace_buffer.buffer;
e77405ad
SR
1939 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1940 flags, pc);
769b0441 1941 if (!event)
07d777fe 1942 goto out;
769b0441
FW
1943 entry = ring_buffer_event_data(event);
1944 entry->ip = ip;
769b0441
FW
1945 entry->fmt = fmt;
1946
07d777fe 1947 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
d931369b 1948 if (!filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 1949 __buffer_unlock_commit(buffer, event);
d931369b
SR
1950 ftrace_trace_stack(buffer, flags, 6, pc);
1951 }
769b0441 1952
769b0441 1953out:
5168ae50 1954 preempt_enable_notrace();
769b0441
FW
1955 unpause_graph_tracing();
1956
1957 return len;
1958}
48ead020
FW
1959EXPORT_SYMBOL_GPL(trace_vbprintk);
1960
12883efb
SRRH
1961static int
1962__trace_array_vprintk(struct ring_buffer *buffer,
1963 unsigned long ip, const char *fmt, va_list args)
48ead020 1964{
e1112b4d 1965 struct ftrace_event_call *call = &event_print;
48ead020 1966 struct ring_buffer_event *event;
07d777fe 1967 int len = 0, size, pc;
48ead020 1968 struct print_entry *entry;
07d777fe
SR
1969 unsigned long flags;
1970 char *tbuffer;
48ead020
FW
1971
1972 if (tracing_disabled || tracing_selftest_running)
1973 return 0;
1974
07d777fe
SR
1975 /* Don't pollute graph traces with trace_vprintk internals */
1976 pause_graph_tracing();
1977
48ead020
FW
1978 pc = preempt_count();
1979 preempt_disable_notrace();
48ead020 1980
07d777fe
SR
1981
1982 tbuffer = get_trace_buf();
1983 if (!tbuffer) {
1984 len = 0;
48ead020 1985 goto out;
07d777fe 1986 }
48ead020 1987
07d777fe
SR
1988 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1989 if (len > TRACE_BUF_SIZE)
1990 goto out;
48ead020 1991
07d777fe 1992 local_save_flags(flags);
48ead020 1993 size = sizeof(*entry) + len + 1;
e77405ad 1994 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 1995 flags, pc);
48ead020 1996 if (!event)
07d777fe 1997 goto out;
48ead020 1998 entry = ring_buffer_event_data(event);
c13d2f7c 1999 entry->ip = ip;
48ead020 2000
07d777fe 2001 memcpy(&entry->buf, tbuffer, len);
c13d2f7c 2002 entry->buf[len] = '\0';
d931369b 2003 if (!filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2004 __buffer_unlock_commit(buffer, event);
07d777fe 2005 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 2006 }
48ead020
FW
2007 out:
2008 preempt_enable_notrace();
07d777fe 2009 unpause_graph_tracing();
48ead020
FW
2010
2011 return len;
2012}
659372d3 2013
12883efb
SRRH
2014int trace_array_vprintk(struct trace_array *tr,
2015 unsigned long ip, const char *fmt, va_list args)
2016{
2017 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2018}
2019
2020int trace_array_printk(struct trace_array *tr,
2021 unsigned long ip, const char *fmt, ...)
2022{
2023 int ret;
2024 va_list ap;
2025
2026 if (!(trace_flags & TRACE_ITER_PRINTK))
2027 return 0;
2028
2029 va_start(ap, fmt);
2030 ret = trace_array_vprintk(tr, ip, fmt, ap);
2031 va_end(ap);
2032 return ret;
2033}
2034
2035int trace_array_printk_buf(struct ring_buffer *buffer,
2036 unsigned long ip, const char *fmt, ...)
2037{
2038 int ret;
2039 va_list ap;
2040
2041 if (!(trace_flags & TRACE_ITER_PRINTK))
2042 return 0;
2043
2044 va_start(ap, fmt);
2045 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2046 va_end(ap);
2047 return ret;
2048}
2049
659372d3
SR
2050int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2051{
a813a159 2052 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2053}
769b0441
FW
2054EXPORT_SYMBOL_GPL(trace_vprintk);
2055
e2ac8ef5 2056static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2057{
6d158a81
SR
2058 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2059
5a90f577 2060 iter->idx++;
6d158a81
SR
2061 if (buf_iter)
2062 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2063}
2064
e309b41d 2065static struct trace_entry *
bc21b478
SR
2066peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2067 unsigned long *lost_events)
dd0e545f 2068{
3928a8a2 2069 struct ring_buffer_event *event;
6d158a81 2070 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2071
d769041f
SR
2072 if (buf_iter)
2073 event = ring_buffer_iter_peek(buf_iter, ts);
2074 else
12883efb 2075 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2076 lost_events);
d769041f 2077
4a9bd3f1
SR
2078 if (event) {
2079 iter->ent_size = ring_buffer_event_length(event);
2080 return ring_buffer_event_data(event);
2081 }
2082 iter->ent_size = 0;
2083 return NULL;
dd0e545f 2084}
d769041f 2085
dd0e545f 2086static struct trace_entry *
bc21b478
SR
2087__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2088 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2089{
12883efb 2090 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2091 struct trace_entry *ent, *next = NULL;
aa27497c 2092 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2093 int cpu_file = iter->cpu_file;
3928a8a2 2094 u64 next_ts = 0, ts;
bc0c38d1 2095 int next_cpu = -1;
12b5da34 2096 int next_size = 0;
bc0c38d1
SR
2097 int cpu;
2098
b04cc6b1
FW
2099 /*
2100 * If we are in a per_cpu trace file, don't bother by iterating over
2101 * all cpu and peek directly.
2102 */
ae3b5093 2103 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2104 if (ring_buffer_empty_cpu(buffer, cpu_file))
2105 return NULL;
bc21b478 2106 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2107 if (ent_cpu)
2108 *ent_cpu = cpu_file;
2109
2110 return ent;
2111 }
2112
ab46428c 2113 for_each_tracing_cpu(cpu) {
dd0e545f 2114
3928a8a2
SR
2115 if (ring_buffer_empty_cpu(buffer, cpu))
2116 continue;
dd0e545f 2117
bc21b478 2118 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2119
cdd31cd2
IM
2120 /*
2121 * Pick the entry with the smallest timestamp:
2122 */
3928a8a2 2123 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2124 next = ent;
2125 next_cpu = cpu;
3928a8a2 2126 next_ts = ts;
bc21b478 2127 next_lost = lost_events;
12b5da34 2128 next_size = iter->ent_size;
bc0c38d1
SR
2129 }
2130 }
2131
12b5da34
SR
2132 iter->ent_size = next_size;
2133
bc0c38d1
SR
2134 if (ent_cpu)
2135 *ent_cpu = next_cpu;
2136
3928a8a2
SR
2137 if (ent_ts)
2138 *ent_ts = next_ts;
2139
bc21b478
SR
2140 if (missing_events)
2141 *missing_events = next_lost;
2142
bc0c38d1
SR
2143 return next;
2144}
2145
dd0e545f 2146/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2147struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2148 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2149{
bc21b478 2150 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2151}
2152
2153/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2154void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2155{
bc21b478
SR
2156 iter->ent = __find_next_entry(iter, &iter->cpu,
2157 &iter->lost_events, &iter->ts);
dd0e545f 2158
3928a8a2 2159 if (iter->ent)
e2ac8ef5 2160 trace_iterator_increment(iter);
dd0e545f 2161
3928a8a2 2162 return iter->ent ? iter : NULL;
b3806b43 2163}
bc0c38d1 2164
e309b41d 2165static void trace_consume(struct trace_iterator *iter)
b3806b43 2166{
12883efb 2167 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2168 &iter->lost_events);
bc0c38d1
SR
2169}
2170
e309b41d 2171static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2172{
2173 struct trace_iterator *iter = m->private;
bc0c38d1 2174 int i = (int)*pos;
4e3c3333 2175 void *ent;
bc0c38d1 2176
a63ce5b3
SR
2177 WARN_ON_ONCE(iter->leftover);
2178
bc0c38d1
SR
2179 (*pos)++;
2180
2181 /* can't go backwards */
2182 if (iter->idx > i)
2183 return NULL;
2184
2185 if (iter->idx < 0)
955b61e5 2186 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2187 else
2188 ent = iter;
2189
2190 while (ent && iter->idx < i)
955b61e5 2191 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2192
2193 iter->pos = *pos;
2194
bc0c38d1
SR
2195 return ent;
2196}
2197
955b61e5 2198void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2199{
2f26ebd5
SR
2200 struct ring_buffer_event *event;
2201 struct ring_buffer_iter *buf_iter;
2202 unsigned long entries = 0;
2203 u64 ts;
2204
12883efb 2205 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2206
6d158a81
SR
2207 buf_iter = trace_buffer_iter(iter, cpu);
2208 if (!buf_iter)
2f26ebd5
SR
2209 return;
2210
2f26ebd5
SR
2211 ring_buffer_iter_reset(buf_iter);
2212
2213 /*
2214 * We could have the case with the max latency tracers
2215 * that a reset never took place on a cpu. This is evident
2216 * by the timestamp being before the start of the buffer.
2217 */
2218 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2219 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2220 break;
2221 entries++;
2222 ring_buffer_read(buf_iter, NULL);
2223 }
2224
12883efb 2225 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2226}
2227
d7350c3f 2228/*
d7350c3f
FW
2229 * The current tracer is copied to avoid a global locking
2230 * all around.
2231 */
bc0c38d1
SR
2232static void *s_start(struct seq_file *m, loff_t *pos)
2233{
2234 struct trace_iterator *iter = m->private;
2b6080f2 2235 struct trace_array *tr = iter->tr;
b04cc6b1 2236 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2237 void *p = NULL;
2238 loff_t l = 0;
3928a8a2 2239 int cpu;
bc0c38d1 2240
2fd196ec
HT
2241 /*
2242 * copy the tracer to avoid using a global lock all around.
2243 * iter->trace is a copy of current_trace, the pointer to the
2244 * name may be used instead of a strcmp(), as iter->trace->name
2245 * will point to the same string as current_trace->name.
2246 */
bc0c38d1 2247 mutex_lock(&trace_types_lock);
2b6080f2
SR
2248 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2249 *iter->trace = *tr->current_trace;
d7350c3f 2250 mutex_unlock(&trace_types_lock);
bc0c38d1 2251
12883efb 2252#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2253 if (iter->snapshot && iter->trace->use_max_tr)
2254 return ERR_PTR(-EBUSY);
12883efb 2255#endif
debdd57f
HT
2256
2257 if (!iter->snapshot)
2258 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2259
bc0c38d1
SR
2260 if (*pos != iter->pos) {
2261 iter->ent = NULL;
2262 iter->cpu = 0;
2263 iter->idx = -1;
2264
ae3b5093 2265 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2266 for_each_tracing_cpu(cpu)
2f26ebd5 2267 tracing_iter_reset(iter, cpu);
b04cc6b1 2268 } else
2f26ebd5 2269 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2270
ac91d854 2271 iter->leftover = 0;
bc0c38d1
SR
2272 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2273 ;
2274
2275 } else {
a63ce5b3
SR
2276 /*
2277 * If we overflowed the seq_file before, then we want
2278 * to just reuse the trace_seq buffer again.
2279 */
2280 if (iter->leftover)
2281 p = iter;
2282 else {
2283 l = *pos - 1;
2284 p = s_next(m, p, &l);
2285 }
bc0c38d1
SR
2286 }
2287
4f535968 2288 trace_event_read_lock();
7e53bd42 2289 trace_access_lock(cpu_file);
bc0c38d1
SR
2290 return p;
2291}
2292
2293static void s_stop(struct seq_file *m, void *p)
2294{
7e53bd42
LJ
2295 struct trace_iterator *iter = m->private;
2296
12883efb 2297#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2298 if (iter->snapshot && iter->trace->use_max_tr)
2299 return;
12883efb 2300#endif
debdd57f
HT
2301
2302 if (!iter->snapshot)
2303 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2304
7e53bd42 2305 trace_access_unlock(iter->cpu_file);
4f535968 2306 trace_event_read_unlock();
bc0c38d1
SR
2307}
2308
39eaf7ef 2309static void
12883efb
SRRH
2310get_total_entries(struct trace_buffer *buf,
2311 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2312{
2313 unsigned long count;
2314 int cpu;
2315
2316 *total = 0;
2317 *entries = 0;
2318
2319 for_each_tracing_cpu(cpu) {
12883efb 2320 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2321 /*
2322 * If this buffer has skipped entries, then we hold all
2323 * entries for the trace and we need to ignore the
2324 * ones before the time stamp.
2325 */
12883efb
SRRH
2326 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2327 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2328 /* total is the same as the entries */
2329 *total += count;
2330 } else
2331 *total += count +
12883efb 2332 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2333 *entries += count;
2334 }
2335}
2336
e309b41d 2337static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2338{
a6168353
ME
2339 seq_puts(m, "# _------=> CPU# \n");
2340 seq_puts(m, "# / _-----=> irqs-off \n");
2341 seq_puts(m, "# | / _----=> need-resched \n");
2342 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2343 seq_puts(m, "# ||| / _--=> preempt-depth \n");
e6e1e259
SR
2344 seq_puts(m, "# |||| / delay \n");
2345 seq_puts(m, "# cmd pid ||||| time | caller \n");
2346 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2347}
2348
12883efb 2349static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2350{
39eaf7ef
SR
2351 unsigned long total;
2352 unsigned long entries;
2353
12883efb 2354 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2355 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2356 entries, total, num_online_cpus());
2357 seq_puts(m, "#\n");
2358}
2359
12883efb 2360static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2361{
12883efb 2362 print_event_info(buf, m);
77271ce4 2363 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
a6168353 2364 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
2365}
2366
12883efb 2367static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2368{
12883efb 2369 print_event_info(buf, m);
77271ce4
SR
2370 seq_puts(m, "# _-----=> irqs-off\n");
2371 seq_puts(m, "# / _----=> need-resched\n");
2372 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2373 seq_puts(m, "# || / _--=> preempt-depth\n");
2374 seq_puts(m, "# ||| / delay\n");
2375 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2376 seq_puts(m, "# | | | |||| | |\n");
2377}
bc0c38d1 2378
62b915f1 2379void
bc0c38d1
SR
2380print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2381{
2382 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2383 struct trace_buffer *buf = iter->trace_buffer;
2384 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2385 struct tracer *type = iter->trace;
39eaf7ef
SR
2386 unsigned long entries;
2387 unsigned long total;
bc0c38d1
SR
2388 const char *name = "preemption";
2389
d840f718 2390 name = type->name;
bc0c38d1 2391
12883efb 2392 get_total_entries(buf, &total, &entries);
bc0c38d1 2393
888b55dc 2394 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2395 name, UTS_RELEASE);
888b55dc 2396 seq_puts(m, "# -----------------------------------"
bc0c38d1 2397 "---------------------------------\n");
888b55dc 2398 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2399 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2400 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2401 entries,
4c11d7ae 2402 total,
12883efb 2403 buf->cpu,
bc0c38d1
SR
2404#if defined(CONFIG_PREEMPT_NONE)
2405 "server",
2406#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2407 "desktop",
b5c21b45 2408#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2409 "preempt",
2410#else
2411 "unknown",
2412#endif
2413 /* These are reserved for later use */
2414 0, 0, 0, 0);
2415#ifdef CONFIG_SMP
2416 seq_printf(m, " #P:%d)\n", num_online_cpus());
2417#else
2418 seq_puts(m, ")\n");
2419#endif
888b55dc
KM
2420 seq_puts(m, "# -----------------\n");
2421 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2422 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2423 data->comm, data->pid,
2424 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2425 data->policy, data->rt_priority);
888b55dc 2426 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2427
2428 if (data->critical_start) {
888b55dc 2429 seq_puts(m, "# => started at: ");
214023c3
SR
2430 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2431 trace_print_seq(m, &iter->seq);
888b55dc 2432 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2433 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2434 trace_print_seq(m, &iter->seq);
8248ac05 2435 seq_puts(m, "\n#\n");
bc0c38d1
SR
2436 }
2437
888b55dc 2438 seq_puts(m, "#\n");
bc0c38d1
SR
2439}
2440
a309720c
SR
2441static void test_cpu_buff_start(struct trace_iterator *iter)
2442{
2443 struct trace_seq *s = &iter->seq;
2444
12ef7d44
SR
2445 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2446 return;
2447
2448 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2449 return;
2450
4462344e 2451 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2452 return;
2453
12883efb 2454 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2455 return;
2456
4462344e 2457 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2458
2459 /* Don't print started cpu buffer for the first entry of the trace */
2460 if (iter->idx > 1)
2461 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2462 iter->cpu);
a309720c
SR
2463}
2464
2c4f035f 2465static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2466{
214023c3 2467 struct trace_seq *s = &iter->seq;
bc0c38d1 2468 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2469 struct trace_entry *entry;
f633cef0 2470 struct trace_event *event;
bc0c38d1 2471
4e3c3333 2472 entry = iter->ent;
dd0e545f 2473
a309720c
SR
2474 test_cpu_buff_start(iter);
2475
c4a8e8be 2476 event = ftrace_find_event(entry->type);
bc0c38d1 2477
c4a8e8be 2478 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
27d48be8
SR
2479 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2480 if (!trace_print_lat_context(iter))
2481 goto partial;
2482 } else {
2483 if (!trace_print_context(iter))
2484 goto partial;
2485 }
c4a8e8be 2486 }
bc0c38d1 2487
268ccda0 2488 if (event)
a9a57763 2489 return event->funcs->trace(iter, sym_flags, event);
d9793bd8
ACM
2490
2491 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2492 goto partial;
02b67518 2493
2c4f035f 2494 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2495partial:
2496 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1
SR
2497}
2498
2c4f035f 2499static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2500{
2501 struct trace_seq *s = &iter->seq;
2502 struct trace_entry *entry;
f633cef0 2503 struct trace_event *event;
f9896bf3
IM
2504
2505 entry = iter->ent;
dd0e545f 2506
c4a8e8be 2507 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
d9793bd8
ACM
2508 if (!trace_seq_printf(s, "%d %d %llu ",
2509 entry->pid, iter->cpu, iter->ts))
2510 goto partial;
c4a8e8be 2511 }
f9896bf3 2512
f633cef0 2513 event = ftrace_find_event(entry->type);
268ccda0 2514 if (event)
a9a57763 2515 return event->funcs->raw(iter, 0, event);
d9793bd8
ACM
2516
2517 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2518 goto partial;
777e208d 2519
2c4f035f 2520 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2521partial:
2522 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3
IM
2523}
2524
2c4f035f 2525static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2526{
2527 struct trace_seq *s = &iter->seq;
2528 unsigned char newline = '\n';
2529 struct trace_entry *entry;
f633cef0 2530 struct trace_event *event;
5e3ca0ec
IM
2531
2532 entry = iter->ent;
dd0e545f 2533
c4a8e8be
FW
2534 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2535 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2536 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2537 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2538 }
5e3ca0ec 2539
f633cef0 2540 event = ftrace_find_event(entry->type);
268ccda0 2541 if (event) {
a9a57763 2542 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2543 if (ret != TRACE_TYPE_HANDLED)
2544 return ret;
2545 }
7104f300 2546
5e3ca0ec
IM
2547 SEQ_PUT_FIELD_RET(s, newline);
2548
2c4f035f 2549 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
2550}
2551
2c4f035f 2552static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2553{
2554 struct trace_seq *s = &iter->seq;
2555 struct trace_entry *entry;
f633cef0 2556 struct trace_event *event;
cb0f12aa
IM
2557
2558 entry = iter->ent;
dd0e545f 2559
c4a8e8be
FW
2560 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2561 SEQ_PUT_FIELD_RET(s, entry->pid);
1830b52d 2562 SEQ_PUT_FIELD_RET(s, iter->cpu);
c4a8e8be
FW
2563 SEQ_PUT_FIELD_RET(s, iter->ts);
2564 }
cb0f12aa 2565
f633cef0 2566 event = ftrace_find_event(entry->type);
a9a57763
SR
2567 return event ? event->funcs->binary(iter, 0, event) :
2568 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2569}
2570
62b915f1 2571int trace_empty(struct trace_iterator *iter)
bc0c38d1 2572{
6d158a81 2573 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2574 int cpu;
2575
9aba60fe 2576 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2577 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2578 cpu = iter->cpu_file;
6d158a81
SR
2579 buf_iter = trace_buffer_iter(iter, cpu);
2580 if (buf_iter) {
2581 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2582 return 0;
2583 } else {
12883efb 2584 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2585 return 0;
2586 }
2587 return 1;
2588 }
2589
ab46428c 2590 for_each_tracing_cpu(cpu) {
6d158a81
SR
2591 buf_iter = trace_buffer_iter(iter, cpu);
2592 if (buf_iter) {
2593 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2594 return 0;
2595 } else {
12883efb 2596 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2597 return 0;
2598 }
bc0c38d1 2599 }
d769041f 2600
797d3712 2601 return 1;
bc0c38d1
SR
2602}
2603
4f535968 2604/* Called with trace_event_read_lock() held. */
955b61e5 2605enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2606{
2c4f035f
FW
2607 enum print_line_t ret;
2608
ee5e51f5
JO
2609 if (iter->lost_events &&
2610 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2611 iter->cpu, iter->lost_events))
2612 return TRACE_TYPE_PARTIAL_LINE;
bc21b478 2613
2c4f035f
FW
2614 if (iter->trace && iter->trace->print_line) {
2615 ret = iter->trace->print_line(iter);
2616 if (ret != TRACE_TYPE_UNHANDLED)
2617 return ret;
2618 }
72829bc3 2619
09ae7234
SRRH
2620 if (iter->ent->type == TRACE_BPUTS &&
2621 trace_flags & TRACE_ITER_PRINTK &&
2622 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2623 return trace_print_bputs_msg_only(iter);
2624
48ead020
FW
2625 if (iter->ent->type == TRACE_BPRINT &&
2626 trace_flags & TRACE_ITER_PRINTK &&
2627 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2628 return trace_print_bprintk_msg_only(iter);
48ead020 2629
66896a85
FW
2630 if (iter->ent->type == TRACE_PRINT &&
2631 trace_flags & TRACE_ITER_PRINTK &&
2632 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2633 return trace_print_printk_msg_only(iter);
66896a85 2634
cb0f12aa
IM
2635 if (trace_flags & TRACE_ITER_BIN)
2636 return print_bin_fmt(iter);
2637
5e3ca0ec
IM
2638 if (trace_flags & TRACE_ITER_HEX)
2639 return print_hex_fmt(iter);
2640
f9896bf3
IM
2641 if (trace_flags & TRACE_ITER_RAW)
2642 return print_raw_fmt(iter);
2643
f9896bf3
IM
2644 return print_trace_fmt(iter);
2645}
2646
7e9a49ef
JO
2647void trace_latency_header(struct seq_file *m)
2648{
2649 struct trace_iterator *iter = m->private;
2650
2651 /* print nothing if the buffers are empty */
2652 if (trace_empty(iter))
2653 return;
2654
2655 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2656 print_trace_header(m, iter);
2657
2658 if (!(trace_flags & TRACE_ITER_VERBOSE))
2659 print_lat_help_header(m);
2660}
2661
62b915f1
JO
2662void trace_default_header(struct seq_file *m)
2663{
2664 struct trace_iterator *iter = m->private;
2665
f56e7f8e
JO
2666 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2667 return;
2668
62b915f1
JO
2669 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2670 /* print nothing if the buffers are empty */
2671 if (trace_empty(iter))
2672 return;
2673 print_trace_header(m, iter);
2674 if (!(trace_flags & TRACE_ITER_VERBOSE))
2675 print_lat_help_header(m);
2676 } else {
77271ce4
SR
2677 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2678 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2679 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2680 else
12883efb 2681 print_func_help_header(iter->trace_buffer, m);
77271ce4 2682 }
62b915f1
JO
2683 }
2684}
2685
e0a413f6
SR
2686static void test_ftrace_alive(struct seq_file *m)
2687{
2688 if (!ftrace_is_dead())
2689 return;
2690 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2691 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2692}
2693
d8741e2e 2694#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2695static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2696{
d8741e2e
SRRH
2697 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2698 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2699 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2700 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2701 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2702 seq_printf(m, "# is not a '0' or '1')\n");
2703}
f1affcaa
SRRH
2704
2705static void show_snapshot_percpu_help(struct seq_file *m)
2706{
2707 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2708#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2709 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2710 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2711#else
2712 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2713 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2714#endif
2715 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2716 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2717 seq_printf(m, "# is not a '0' or '1')\n");
2718}
2719
d8741e2e
SRRH
2720static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2721{
45ad21ca 2722 if (iter->tr->allocated_snapshot)
d8741e2e
SRRH
2723 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2724 else
2725 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2726
2727 seq_printf(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2728 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2729 show_snapshot_main_help(m);
2730 else
2731 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2732}
2733#else
2734/* Should never be called */
2735static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2736#endif
2737
bc0c38d1
SR
2738static int s_show(struct seq_file *m, void *v)
2739{
2740 struct trace_iterator *iter = v;
a63ce5b3 2741 int ret;
bc0c38d1
SR
2742
2743 if (iter->ent == NULL) {
2744 if (iter->tr) {
2745 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2746 seq_puts(m, "#\n");
e0a413f6 2747 test_ftrace_alive(m);
bc0c38d1 2748 }
d8741e2e
SRRH
2749 if (iter->snapshot && trace_empty(iter))
2750 print_snapshot_help(m, iter);
2751 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2752 iter->trace->print_header(m);
62b915f1
JO
2753 else
2754 trace_default_header(m);
2755
a63ce5b3
SR
2756 } else if (iter->leftover) {
2757 /*
2758 * If we filled the seq_file buffer earlier, we
2759 * want to just show it now.
2760 */
2761 ret = trace_print_seq(m, &iter->seq);
2762
2763 /* ret should this time be zero, but you never know */
2764 iter->leftover = ret;
2765
bc0c38d1 2766 } else {
f9896bf3 2767 print_trace_line(iter);
a63ce5b3
SR
2768 ret = trace_print_seq(m, &iter->seq);
2769 /*
2770 * If we overflow the seq_file buffer, then it will
2771 * ask us for this data again at start up.
2772 * Use that instead.
2773 * ret is 0 if seq_file write succeeded.
2774 * -1 otherwise.
2775 */
2776 iter->leftover = ret;
bc0c38d1
SR
2777 }
2778
2779 return 0;
2780}
2781
88e9d34c 2782static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2783 .start = s_start,
2784 .next = s_next,
2785 .stop = s_stop,
2786 .show = s_show,
bc0c38d1
SR
2787};
2788
e309b41d 2789static struct trace_iterator *
debdd57f 2790__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 2791{
2b6080f2
SR
2792 struct trace_cpu *tc = inode->i_private;
2793 struct trace_array *tr = tc->tr;
bc0c38d1 2794 struct trace_iterator *iter;
50e18b94 2795 int cpu;
bc0c38d1 2796
85a2f9b4
SR
2797 if (tracing_disabled)
2798 return ERR_PTR(-ENODEV);
60a11774 2799
50e18b94 2800 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
2801 if (!iter)
2802 return ERR_PTR(-ENOMEM);
bc0c38d1 2803
6d158a81
SR
2804 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2805 GFP_KERNEL);
93574fcc
DC
2806 if (!iter->buffer_iter)
2807 goto release;
2808
d7350c3f
FW
2809 /*
2810 * We make a copy of the current tracer to avoid concurrent
2811 * changes on it while we are reading.
2812 */
bc0c38d1 2813 mutex_lock(&trace_types_lock);
d7350c3f 2814 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 2815 if (!iter->trace)
d7350c3f 2816 goto fail;
85a2f9b4 2817
2b6080f2 2818 *iter->trace = *tr->current_trace;
d7350c3f 2819
79f55997 2820 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
2821 goto fail;
2822
12883efb
SRRH
2823 iter->tr = tr;
2824
2825#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
2826 /* Currently only the top directory has a snapshot */
2827 if (tr->current_trace->print_max || snapshot)
12883efb 2828 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 2829 else
12883efb
SRRH
2830#endif
2831 iter->trace_buffer = &tr->trace_buffer;
debdd57f 2832 iter->snapshot = snapshot;
bc0c38d1 2833 iter->pos = -1;
d7350c3f 2834 mutex_init(&iter->mutex);
2b6080f2 2835 iter->cpu_file = tc->cpu;
bc0c38d1 2836
8bba1bf5
MM
2837 /* Notify the tracer early; before we stop tracing. */
2838 if (iter->trace && iter->trace->open)
a93751ca 2839 iter->trace->open(iter);
8bba1bf5 2840
12ef7d44 2841 /* Annotate start of buffers if we had overruns */
12883efb 2842 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
2843 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2844
8be0709f
DS
2845 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2846 if (trace_clocks[trace_clock_id].in_ns)
2847 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2848
debdd57f
HT
2849 /* stop the trace while dumping if we are not opening "snapshot" */
2850 if (!iter->snapshot)
2b6080f2 2851 tracing_stop_tr(tr);
2f26ebd5 2852
ae3b5093 2853 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2854 for_each_tracing_cpu(cpu) {
b04cc6b1 2855 iter->buffer_iter[cpu] =
12883efb 2856 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2857 }
2858 ring_buffer_read_prepare_sync();
2859 for_each_tracing_cpu(cpu) {
2860 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2861 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
2862 }
2863 } else {
2864 cpu = iter->cpu_file;
3928a8a2 2865 iter->buffer_iter[cpu] =
12883efb 2866 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2867 ring_buffer_read_prepare_sync();
2868 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2869 tracing_iter_reset(iter, cpu);
3928a8a2
SR
2870 }
2871
a695cb58
SRRH
2872 tr->ref++;
2873
bc0c38d1
SR
2874 mutex_unlock(&trace_types_lock);
2875
bc0c38d1 2876 return iter;
3928a8a2 2877
d7350c3f 2878 fail:
3928a8a2 2879 mutex_unlock(&trace_types_lock);
d7350c3f 2880 kfree(iter->trace);
6d158a81 2881 kfree(iter->buffer_iter);
93574fcc 2882release:
50e18b94
JO
2883 seq_release_private(inode, file);
2884 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
2885}
2886
2887int tracing_open_generic(struct inode *inode, struct file *filp)
2888{
60a11774
SR
2889 if (tracing_disabled)
2890 return -ENODEV;
2891
bc0c38d1
SR
2892 filp->private_data = inode->i_private;
2893 return 0;
2894}
2895
4fd27358 2896static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 2897{
907f2784 2898 struct seq_file *m = file->private_data;
4acd4d00 2899 struct trace_iterator *iter;
2b6080f2 2900 struct trace_array *tr;
3928a8a2 2901 int cpu;
bc0c38d1 2902
4acd4d00
SR
2903 if (!(file->f_mode & FMODE_READ))
2904 return 0;
2905
2906 iter = m->private;
12883efb 2907 tr = iter->tr;
4acd4d00 2908
bc0c38d1 2909 mutex_lock(&trace_types_lock);
a695cb58
SRRH
2910
2911 WARN_ON(!tr->ref);
2912 tr->ref--;
2913
3928a8a2
SR
2914 for_each_tracing_cpu(cpu) {
2915 if (iter->buffer_iter[cpu])
2916 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2917 }
2918
bc0c38d1
SR
2919 if (iter->trace && iter->trace->close)
2920 iter->trace->close(iter);
2921
debdd57f
HT
2922 if (!iter->snapshot)
2923 /* reenable tracing if it was previously enabled */
2b6080f2 2924 tracing_start_tr(tr);
bc0c38d1
SR
2925 mutex_unlock(&trace_types_lock);
2926
d7350c3f 2927 mutex_destroy(&iter->mutex);
b0dfa978 2928 free_cpumask_var(iter->started);
d7350c3f 2929 kfree(iter->trace);
6d158a81 2930 kfree(iter->buffer_iter);
50e18b94 2931 seq_release_private(inode, file);
bc0c38d1
SR
2932 return 0;
2933}
2934
2935static int tracing_open(struct inode *inode, struct file *file)
2936{
85a2f9b4
SR
2937 struct trace_iterator *iter;
2938 int ret = 0;
bc0c38d1 2939
4acd4d00
SR
2940 /* If this file was open for write, then erase contents */
2941 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2942 (file->f_flags & O_TRUNC)) {
2b6080f2
SR
2943 struct trace_cpu *tc = inode->i_private;
2944 struct trace_array *tr = tc->tr;
bc0c38d1 2945
2b6080f2 2946 if (tc->cpu == RING_BUFFER_ALL_CPUS)
12883efb 2947 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 2948 else
12883efb 2949 tracing_reset(&tr->trace_buffer, tc->cpu);
4acd4d00 2950 }
bc0c38d1 2951
4acd4d00 2952 if (file->f_mode & FMODE_READ) {
debdd57f 2953 iter = __tracing_open(inode, file, false);
4acd4d00
SR
2954 if (IS_ERR(iter))
2955 ret = PTR_ERR(iter);
2956 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2957 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2958 }
bc0c38d1
SR
2959 return ret;
2960}
2961
e309b41d 2962static void *
bc0c38d1
SR
2963t_next(struct seq_file *m, void *v, loff_t *pos)
2964{
f129e965 2965 struct tracer *t = v;
bc0c38d1
SR
2966
2967 (*pos)++;
2968
2969 if (t)
2970 t = t->next;
2971
bc0c38d1
SR
2972 return t;
2973}
2974
2975static void *t_start(struct seq_file *m, loff_t *pos)
2976{
f129e965 2977 struct tracer *t;
bc0c38d1
SR
2978 loff_t l = 0;
2979
2980 mutex_lock(&trace_types_lock);
f129e965 2981 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
bc0c38d1
SR
2982 ;
2983
2984 return t;
2985}
2986
2987static void t_stop(struct seq_file *m, void *p)
2988{
2989 mutex_unlock(&trace_types_lock);
2990}
2991
2992static int t_show(struct seq_file *m, void *v)
2993{
2994 struct tracer *t = v;
2995
2996 if (!t)
2997 return 0;
2998
2999 seq_printf(m, "%s", t->name);
3000 if (t->next)
3001 seq_putc(m, ' ');
3002 else
3003 seq_putc(m, '\n');
3004
3005 return 0;
3006}
3007
88e9d34c 3008static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3009 .start = t_start,
3010 .next = t_next,
3011 .stop = t_stop,
3012 .show = t_show,
bc0c38d1
SR
3013};
3014
3015static int show_traces_open(struct inode *inode, struct file *file)
3016{
60a11774
SR
3017 if (tracing_disabled)
3018 return -ENODEV;
3019
f129e965 3020 return seq_open(file, &show_traces_seq_ops);
bc0c38d1
SR
3021}
3022
4acd4d00
SR
3023static ssize_t
3024tracing_write_stub(struct file *filp, const char __user *ubuf,
3025 size_t count, loff_t *ppos)
3026{
3027 return count;
3028}
3029
364829b1
SP
3030static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3031{
3032 if (file->f_mode & FMODE_READ)
3033 return seq_lseek(file, offset, origin);
3034 else
3035 return 0;
3036}
3037
5e2336a0 3038static const struct file_operations tracing_fops = {
4bf39a94
IM
3039 .open = tracing_open,
3040 .read = seq_read,
4acd4d00 3041 .write = tracing_write_stub,
364829b1 3042 .llseek = tracing_seek,
4bf39a94 3043 .release = tracing_release,
bc0c38d1
SR
3044};
3045
5e2336a0 3046static const struct file_operations show_traces_fops = {
c7078de1
IM
3047 .open = show_traces_open,
3048 .read = seq_read,
3049 .release = seq_release,
b444786f 3050 .llseek = seq_lseek,
c7078de1
IM
3051};
3052
36dfe925
IM
3053/*
3054 * Only trace on a CPU if the bitmask is set:
3055 */
9e01c1b7 3056static cpumask_var_t tracing_cpumask;
36dfe925
IM
3057
3058/*
3059 * The tracer itself will not take this lock, but still we want
3060 * to provide a consistent cpumask to user-space:
3061 */
3062static DEFINE_MUTEX(tracing_cpumask_update_lock);
3063
3064/*
3065 * Temporary storage for the character representation of the
3066 * CPU bitmask (and one more byte for the newline):
3067 */
3068static char mask_str[NR_CPUS + 1];
3069
c7078de1
IM
3070static ssize_t
3071tracing_cpumask_read(struct file *filp, char __user *ubuf,
3072 size_t count, loff_t *ppos)
3073{
36dfe925 3074 int len;
c7078de1
IM
3075
3076 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3077
9e01c1b7 3078 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
36dfe925
IM
3079 if (count - len < 2) {
3080 count = -EINVAL;
3081 goto out_err;
3082 }
3083 len += sprintf(mask_str + len, "\n");
3084 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3085
3086out_err:
c7078de1
IM
3087 mutex_unlock(&tracing_cpumask_update_lock);
3088
3089 return count;
3090}
3091
3092static ssize_t
3093tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3094 size_t count, loff_t *ppos)
3095{
2b6080f2 3096 struct trace_array *tr = filp->private_data;
9e01c1b7 3097 cpumask_var_t tracing_cpumask_new;
2b6080f2 3098 int err, cpu;
9e01c1b7
RR
3099
3100 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3101 return -ENOMEM;
c7078de1 3102
9e01c1b7 3103 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3104 if (err)
36dfe925
IM
3105 goto err_unlock;
3106
215368e8
LZ
3107 mutex_lock(&tracing_cpumask_update_lock);
3108
a5e25883 3109 local_irq_disable();
0199c4e6 3110 arch_spin_lock(&ftrace_max_lock);
ab46428c 3111 for_each_tracing_cpu(cpu) {
36dfe925
IM
3112 /*
3113 * Increase/decrease the disabled counter if we are
3114 * about to flip a bit in the cpumask:
3115 */
9e01c1b7
RR
3116 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3117 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3118 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3119 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3120 }
9e01c1b7
RR
3121 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3122 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3123 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3124 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3125 }
3126 }
0199c4e6 3127 arch_spin_unlock(&ftrace_max_lock);
a5e25883 3128 local_irq_enable();
36dfe925 3129
9e01c1b7 3130 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3131
3132 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3133 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3134
3135 return count;
36dfe925
IM
3136
3137err_unlock:
215368e8 3138 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3139
3140 return err;
c7078de1
IM
3141}
3142
5e2336a0 3143static const struct file_operations tracing_cpumask_fops = {
c7078de1
IM
3144 .open = tracing_open_generic,
3145 .read = tracing_cpumask_read,
3146 .write = tracing_cpumask_write,
b444786f 3147 .llseek = generic_file_llseek,
bc0c38d1
SR
3148};
3149
fdb372ed 3150static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3151{
d8e83d26 3152 struct tracer_opt *trace_opts;
2b6080f2 3153 struct trace_array *tr = m->private;
d8e83d26 3154 u32 tracer_flags;
d8e83d26 3155 int i;
adf9f195 3156
d8e83d26 3157 mutex_lock(&trace_types_lock);
2b6080f2
SR
3158 tracer_flags = tr->current_trace->flags->val;
3159 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3160
bc0c38d1
SR
3161 for (i = 0; trace_options[i]; i++) {
3162 if (trace_flags & (1 << i))
fdb372ed 3163 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3164 else
fdb372ed 3165 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3166 }
3167
adf9f195
FW
3168 for (i = 0; trace_opts[i].name; i++) {
3169 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3170 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3171 else
fdb372ed 3172 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3173 }
d8e83d26 3174 mutex_unlock(&trace_types_lock);
adf9f195 3175
fdb372ed 3176 return 0;
bc0c38d1 3177}
bc0c38d1 3178
8d18eaaf
LZ
3179static int __set_tracer_option(struct tracer *trace,
3180 struct tracer_flags *tracer_flags,
3181 struct tracer_opt *opts, int neg)
3182{
3183 int ret;
bc0c38d1 3184
8d18eaaf
LZ
3185 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3186 if (ret)
3187 return ret;
3188
3189 if (neg)
3190 tracer_flags->val &= ~opts->bit;
3191 else
3192 tracer_flags->val |= opts->bit;
3193 return 0;
bc0c38d1
SR
3194}
3195
adf9f195
FW
3196/* Try to assign a tracer specific option */
3197static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3198{
7770841e 3199 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3200 struct tracer_opt *opts = NULL;
8d18eaaf 3201 int i;
adf9f195 3202
7770841e
Z
3203 for (i = 0; tracer_flags->opts[i].name; i++) {
3204 opts = &tracer_flags->opts[i];
adf9f195 3205
8d18eaaf
LZ
3206 if (strcmp(cmp, opts->name) == 0)
3207 return __set_tracer_option(trace, trace->flags,
3208 opts, neg);
adf9f195 3209 }
adf9f195 3210
8d18eaaf 3211 return -EINVAL;
adf9f195
FW
3212}
3213
613f04a0
SRRH
3214/* Some tracers require overwrite to stay enabled */
3215int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3216{
3217 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3218 return -1;
3219
3220 return 0;
3221}
3222
2b6080f2 3223int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3224{
3225 /* do nothing if flag is already set */
3226 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3227 return 0;
3228
3229 /* Give the tracer a chance to approve the change */
2b6080f2
SR
3230 if (tr->current_trace->flag_changed)
3231 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
613f04a0 3232 return -EINVAL;
af4617bd
SR
3233
3234 if (enabled)
3235 trace_flags |= mask;
3236 else
3237 trace_flags &= ~mask;
e870e9a1
LZ
3238
3239 if (mask == TRACE_ITER_RECORD_CMD)
3240 trace_event_enable_cmd_record(enabled);
750912fa 3241
80902822 3242 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3243 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3244#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3245 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3246#endif
3247 }
81698831
SR
3248
3249 if (mask == TRACE_ITER_PRINTK)
3250 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3251
3252 return 0;
af4617bd
SR
3253}
3254
2b6080f2 3255static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3256{
8d18eaaf 3257 char *cmp;
bc0c38d1 3258 int neg = 0;
613f04a0 3259 int ret = -ENODEV;
bc0c38d1
SR
3260 int i;
3261
7bcfaf54 3262 cmp = strstrip(option);
bc0c38d1 3263
8d18eaaf 3264 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3265 neg = 1;
3266 cmp += 2;
3267 }
3268
69d34da2
SRRH
3269 mutex_lock(&trace_types_lock);
3270
bc0c38d1 3271 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3272 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3273 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3274 break;
3275 }
3276 }
adf9f195
FW
3277
3278 /* If no option could be set, test the specific tracer options */
69d34da2 3279 if (!trace_options[i])
2b6080f2 3280 ret = set_tracer_option(tr->current_trace, cmp, neg);
69d34da2
SRRH
3281
3282 mutex_unlock(&trace_types_lock);
bc0c38d1 3283
7bcfaf54
SR
3284 return ret;
3285}
3286
3287static ssize_t
3288tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3289 size_t cnt, loff_t *ppos)
3290{
2b6080f2
SR
3291 struct seq_file *m = filp->private_data;
3292 struct trace_array *tr = m->private;
7bcfaf54 3293 char buf[64];
613f04a0 3294 int ret;
7bcfaf54
SR
3295
3296 if (cnt >= sizeof(buf))
3297 return -EINVAL;
3298
3299 if (copy_from_user(&buf, ubuf, cnt))
3300 return -EFAULT;
3301
a8dd2176
SR
3302 buf[cnt] = 0;
3303
2b6080f2 3304 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3305 if (ret < 0)
3306 return ret;
7bcfaf54 3307
cf8517cf 3308 *ppos += cnt;
bc0c38d1
SR
3309
3310 return cnt;
3311}
3312
fdb372ed
LZ
3313static int tracing_trace_options_open(struct inode *inode, struct file *file)
3314{
3315 if (tracing_disabled)
3316 return -ENODEV;
2b6080f2
SR
3317
3318 return single_open(file, tracing_trace_options_show, inode->i_private);
fdb372ed
LZ
3319}
3320
5e2336a0 3321static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3322 .open = tracing_trace_options_open,
3323 .read = seq_read,
3324 .llseek = seq_lseek,
3325 .release = single_release,
ee6bce52 3326 .write = tracing_trace_options_write,
bc0c38d1
SR
3327};
3328
7bd2f24c
IM
3329static const char readme_msg[] =
3330 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3331 "# echo 0 > tracing_on : quick way to disable tracing\n"
3332 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3333 " Important files:\n"
3334 " trace\t\t\t- The static contents of the buffer\n"
3335 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3336 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3337 " current_tracer\t- function and latency tracers\n"
3338 " available_tracers\t- list of configured tracers for current_tracer\n"
3339 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3340 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3341 " trace_clock\t\t-change the clock used to order events\n"
3342 " local: Per cpu clock but may not be synced across CPUs\n"
3343 " global: Synced across CPUs but slows tracing down.\n"
3344 " counter: Not a clock, but just an increment\n"
3345 " uptime: Jiffy counter from time of boot\n"
3346 " perf: Same clock that perf events use\n"
3347#ifdef CONFIG_X86_64
3348 " x86-tsc: TSC cycle counter\n"
3349#endif
3350 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3351 " tracing_cpumask\t- Limit which CPUs to trace\n"
3352 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3353 "\t\t\t Remove sub-buffer with rmdir\n"
3354 " trace_options\t\t- Set format or modify how tracing happens\n"
3355 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n"
3356#ifdef CONFIG_DYNAMIC_FTRACE
3357 "\n available_filter_functions - list of functions that can be filtered on\n"
3358 " set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3359 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3360 " modules: Can select a group via module\n"
3361 " Format: :mod:<module-name>\n"
3362 " example: echo :mod:ext3 > set_ftrace_filter\n"
3363 " triggers: a command to perform when function is hit\n"
3364 " Format: <function>:<trigger>[:count]\n"
3365 " trigger: traceon, traceoff\n"
3366 " enable_event:<system>:<event>\n"
3367 " disable_event:<system>:<event>\n"
3368#ifdef CONFIG_STACKTRACE
3369 " stacktrace\n"
3370#endif
3371#ifdef CONFIG_TRACER_SNAPSHOT
3372 " snapshot\n"
3373#endif
3374 " example: echo do_fault:traceoff > set_ftrace_filter\n"
3375 " echo do_trap:traceoff:3 > set_ftrace_filter\n"
3376 " The first one will disable tracing every time do_fault is hit\n"
3377 " The second will disable tracing at most 3 times when do_trap is hit\n"
3378 " The first time do trap is hit and it disables tracing, the counter\n"
3379 " will decrement to 2. If tracing is already disabled, the counter\n"
3380 " will not decrement. It only decrements when the trigger did work\n"
3381 " To remove trigger without count:\n"
3382 " echo '!<function>:<trigger> > set_ftrace_filter\n"
3383 " To remove trigger with a count:\n"
3384 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3385 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3386 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3387 " modules: Can select a group via module command :mod:\n"
3388 " Does not accept triggers\n"
3389#endif /* CONFIG_DYNAMIC_FTRACE */
3390#ifdef CONFIG_FUNCTION_TRACER
3391 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3392#endif
3393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3394 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3395 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3396#endif
3397#ifdef CONFIG_TRACER_SNAPSHOT
3398 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3399 "\t\t\t Read the contents for more information\n"
3400#endif
3401#ifdef CONFIG_STACKTRACE
3402 " stack_trace\t\t- Shows the max stack trace when active\n"
3403 " stack_max_size\t- Shows current max stack size that was traced\n"
3404 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
3405#ifdef CONFIG_DYNAMIC_FTRACE
3406 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3407#endif
3408#endif /* CONFIG_STACKTRACE */
7bd2f24c
IM
3409;
3410
3411static ssize_t
3412tracing_readme_read(struct file *filp, char __user *ubuf,
3413 size_t cnt, loff_t *ppos)
3414{
3415 return simple_read_from_buffer(ubuf, cnt, ppos,
3416 readme_msg, strlen(readme_msg));
3417}
3418
5e2336a0 3419static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3420 .open = tracing_open_generic,
3421 .read = tracing_readme_read,
b444786f 3422 .llseek = generic_file_llseek,
7bd2f24c
IM
3423};
3424
69abe6a5
AP
3425static ssize_t
3426tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3427 size_t cnt, loff_t *ppos)
3428{
3429 char *buf_comm;
3430 char *file_buf;
3431 char *buf;
3432 int len = 0;
3433 int pid;
3434 int i;
3435
3436 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3437 if (!file_buf)
3438 return -ENOMEM;
3439
3440 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3441 if (!buf_comm) {
3442 kfree(file_buf);
3443 return -ENOMEM;
3444 }
3445
3446 buf = file_buf;
3447
3448 for (i = 0; i < SAVED_CMDLINES; i++) {
3449 int r;
3450
3451 pid = map_cmdline_to_pid[i];
3452 if (pid == -1 || pid == NO_CMDLINE_MAP)
3453 continue;
3454
3455 trace_find_cmdline(pid, buf_comm);
3456 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3457 buf += r;
3458 len += r;
3459 }
3460
3461 len = simple_read_from_buffer(ubuf, cnt, ppos,
3462 file_buf, len);
3463
3464 kfree(file_buf);
3465 kfree(buf_comm);
3466
3467 return len;
3468}
3469
3470static const struct file_operations tracing_saved_cmdlines_fops = {
3471 .open = tracing_open_generic,
3472 .read = tracing_saved_cmdlines_read,
b444786f 3473 .llseek = generic_file_llseek,
69abe6a5
AP
3474};
3475
bc0c38d1
SR
3476static ssize_t
3477tracing_set_trace_read(struct file *filp, char __user *ubuf,
3478 size_t cnt, loff_t *ppos)
3479{
2b6080f2 3480 struct trace_array *tr = filp->private_data;
ee6c2c1b 3481 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
3482 int r;
3483
3484 mutex_lock(&trace_types_lock);
2b6080f2 3485 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
3486 mutex_unlock(&trace_types_lock);
3487
4bf39a94 3488 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3489}
3490
b6f11df2
ACM
3491int tracer_init(struct tracer *t, struct trace_array *tr)
3492{
12883efb 3493 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
3494 return t->init(tr);
3495}
3496
12883efb 3497static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
3498{
3499 int cpu;
737223fb 3500
438ced17 3501 for_each_tracing_cpu(cpu)
12883efb 3502 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
3503}
3504
12883efb 3505#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 3506/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
3507static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3508 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
3509{
3510 int cpu, ret = 0;
3511
3512 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3513 for_each_tracing_cpu(cpu) {
12883efb
SRRH
3514 ret = ring_buffer_resize(trace_buf->buffer,
3515 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
3516 if (ret < 0)
3517 break;
12883efb
SRRH
3518 per_cpu_ptr(trace_buf->data, cpu)->entries =
3519 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
3520 }
3521 } else {
12883efb
SRRH
3522 ret = ring_buffer_resize(trace_buf->buffer,
3523 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 3524 if (ret == 0)
12883efb
SRRH
3525 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3526 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
3527 }
3528
3529 return ret;
3530}
12883efb 3531#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 3532
2b6080f2
SR
3533static int __tracing_resize_ring_buffer(struct trace_array *tr,
3534 unsigned long size, int cpu)
73c5162a
SR
3535{
3536 int ret;
3537
3538 /*
3539 * If kernel or user changes the size of the ring buffer
a123c52b
SR
3540 * we use the size that was given, and we can forget about
3541 * expanding it later.
73c5162a 3542 */
55034cd6 3543 ring_buffer_expanded = true;
73c5162a 3544
b382ede6 3545 /* May be called before buffers are initialized */
12883efb 3546 if (!tr->trace_buffer.buffer)
b382ede6
SR
3547 return 0;
3548
12883efb 3549 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
3550 if (ret < 0)
3551 return ret;
3552
12883efb 3553#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3554 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3555 !tr->current_trace->use_max_tr)
ef710e10
KM
3556 goto out;
3557
12883efb 3558 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 3559 if (ret < 0) {
12883efb
SRRH
3560 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3561 &tr->trace_buffer, cpu);
73c5162a 3562 if (r < 0) {
a123c52b
SR
3563 /*
3564 * AARGH! We are left with different
3565 * size max buffer!!!!
3566 * The max buffer is our "snapshot" buffer.
3567 * When a tracer needs a snapshot (one of the
3568 * latency tracers), it swaps the max buffer
3569 * with the saved snap shot. We succeeded to
3570 * update the size of the main buffer, but failed to
3571 * update the size of the max buffer. But when we tried
3572 * to reset the main buffer to the original size, we
3573 * failed there too. This is very unlikely to
3574 * happen, but if it does, warn and kill all
3575 * tracing.
3576 */
73c5162a
SR
3577 WARN_ON(1);
3578 tracing_disabled = 1;
3579 }
3580 return ret;
3581 }
3582
438ced17 3583 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3584 set_buffer_entries(&tr->max_buffer, size);
438ced17 3585 else
12883efb 3586 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 3587
ef710e10 3588 out:
12883efb
SRRH
3589#endif /* CONFIG_TRACER_MAX_TRACE */
3590
438ced17 3591 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3592 set_buffer_entries(&tr->trace_buffer, size);
438ced17 3593 else
12883efb 3594 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
3595
3596 return ret;
3597}
3598
2b6080f2
SR
3599static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3600 unsigned long size, int cpu_id)
4f271a2a 3601{
83f40318 3602 int ret = size;
4f271a2a
VN
3603
3604 mutex_lock(&trace_types_lock);
3605
438ced17
VN
3606 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3607 /* make sure, this cpu is enabled in the mask */
3608 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3609 ret = -EINVAL;
3610 goto out;
3611 }
3612 }
4f271a2a 3613
2b6080f2 3614 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
3615 if (ret < 0)
3616 ret = -ENOMEM;
3617
438ced17 3618out:
4f271a2a
VN
3619 mutex_unlock(&trace_types_lock);
3620
3621 return ret;
3622}
3623
ef710e10 3624
1852fcce
SR
3625/**
3626 * tracing_update_buffers - used by tracing facility to expand ring buffers
3627 *
3628 * To save on memory when the tracing is never used on a system with it
3629 * configured in. The ring buffers are set to a minimum size. But once
3630 * a user starts to use the tracing facility, then they need to grow
3631 * to their default size.
3632 *
3633 * This function is to be called when a tracer is about to be used.
3634 */
3635int tracing_update_buffers(void)
3636{
3637 int ret = 0;
3638
1027fcb2 3639 mutex_lock(&trace_types_lock);
1852fcce 3640 if (!ring_buffer_expanded)
2b6080f2 3641 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 3642 RING_BUFFER_ALL_CPUS);
1027fcb2 3643 mutex_unlock(&trace_types_lock);
1852fcce
SR
3644
3645 return ret;
3646}
3647
577b785f
SR
3648struct trace_option_dentry;
3649
3650static struct trace_option_dentry *
2b6080f2 3651create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
3652
3653static void
3654destroy_trace_option_files(struct trace_option_dentry *topts);
3655
b2821ae6 3656static int tracing_set_tracer(const char *buf)
bc0c38d1 3657{
577b785f 3658 static struct trace_option_dentry *topts;
bc0c38d1
SR
3659 struct trace_array *tr = &global_trace;
3660 struct tracer *t;
12883efb 3661#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3662 bool had_max_tr;
12883efb 3663#endif
d9e54076 3664 int ret = 0;
bc0c38d1 3665
1027fcb2
SR
3666 mutex_lock(&trace_types_lock);
3667
73c5162a 3668 if (!ring_buffer_expanded) {
2b6080f2 3669 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 3670 RING_BUFFER_ALL_CPUS);
73c5162a 3671 if (ret < 0)
59f586db 3672 goto out;
73c5162a
SR
3673 ret = 0;
3674 }
3675
bc0c38d1
SR
3676 for (t = trace_types; t; t = t->next) {
3677 if (strcmp(t->name, buf) == 0)
3678 break;
3679 }
c2931e05
FW
3680 if (!t) {
3681 ret = -EINVAL;
3682 goto out;
3683 }
2b6080f2 3684 if (t == tr->current_trace)
bc0c38d1
SR
3685 goto out;
3686
9f029e83 3687 trace_branch_disable();
613f04a0 3688
2b6080f2 3689 tr->current_trace->enabled = false;
613f04a0 3690
2b6080f2
SR
3691 if (tr->current_trace->reset)
3692 tr->current_trace->reset(tr);
34600f0e 3693
12883efb 3694 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 3695 tr->current_trace = &nop_trace;
34600f0e 3696
45ad21ca
SRRH
3697#ifdef CONFIG_TRACER_MAX_TRACE
3698 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
3699
3700 if (had_max_tr && !t->use_max_tr) {
3701 /*
3702 * We need to make sure that the update_max_tr sees that
3703 * current_trace changed to nop_trace to keep it from
3704 * swapping the buffers after we resize it.
3705 * The update_max_tr is called from interrupts disabled
3706 * so a synchronized_sched() is sufficient.
3707 */
3708 synchronize_sched();
3209cff4 3709 free_snapshot(tr);
ef710e10 3710 }
12883efb 3711#endif
577b785f
SR
3712 destroy_trace_option_files(topts);
3713
2b6080f2 3714 topts = create_trace_option_files(tr, t);
12883efb
SRRH
3715
3716#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3717 if (t->use_max_tr && !had_max_tr) {
3209cff4 3718 ret = alloc_snapshot(tr);
d60da506
HT
3719 if (ret < 0)
3720 goto out;
ef710e10 3721 }
12883efb 3722#endif
577b785f 3723
1c80025a 3724 if (t->init) {
b6f11df2 3725 ret = tracer_init(t, tr);
1c80025a
FW
3726 if (ret)
3727 goto out;
3728 }
bc0c38d1 3729
2b6080f2
SR
3730 tr->current_trace = t;
3731 tr->current_trace->enabled = true;
9f029e83 3732 trace_branch_enable(tr);
bc0c38d1
SR
3733 out:
3734 mutex_unlock(&trace_types_lock);
3735
d9e54076
PZ
3736 return ret;
3737}
3738
3739static ssize_t
3740tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3741 size_t cnt, loff_t *ppos)
3742{
ee6c2c1b 3743 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
3744 int i;
3745 size_t ret;
e6e7a65a
FW
3746 int err;
3747
3748 ret = cnt;
d9e54076 3749
ee6c2c1b
LZ
3750 if (cnt > MAX_TRACER_SIZE)
3751 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
3752
3753 if (copy_from_user(&buf, ubuf, cnt))
3754 return -EFAULT;
3755
3756 buf[cnt] = 0;
3757
3758 /* strip ending whitespace. */
3759 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3760 buf[i] = 0;
3761
e6e7a65a
FW
3762 err = tracing_set_tracer(buf);
3763 if (err)
3764 return err;
d9e54076 3765
cf8517cf 3766 *ppos += ret;
bc0c38d1 3767
c2931e05 3768 return ret;
bc0c38d1
SR
3769}
3770
3771static ssize_t
3772tracing_max_lat_read(struct file *filp, char __user *ubuf,
3773 size_t cnt, loff_t *ppos)
3774{
3775 unsigned long *ptr = filp->private_data;
3776 char buf[64];
3777 int r;
3778
cffae437 3779 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 3780 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
3781 if (r > sizeof(buf))
3782 r = sizeof(buf);
4bf39a94 3783 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3784}
3785
3786static ssize_t
3787tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3788 size_t cnt, loff_t *ppos)
3789{
5e39841c 3790 unsigned long *ptr = filp->private_data;
5e39841c 3791 unsigned long val;
c6caeeb1 3792 int ret;
bc0c38d1 3793
22fe9b54
PH
3794 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3795 if (ret)
c6caeeb1 3796 return ret;
bc0c38d1
SR
3797
3798 *ptr = val * 1000;
3799
3800 return cnt;
3801}
3802
b3806b43
SR
3803static int tracing_open_pipe(struct inode *inode, struct file *filp)
3804{
2b6080f2
SR
3805 struct trace_cpu *tc = inode->i_private;
3806 struct trace_array *tr = tc->tr;
b3806b43 3807 struct trace_iterator *iter;
b04cc6b1 3808 int ret = 0;
b3806b43
SR
3809
3810 if (tracing_disabled)
3811 return -ENODEV;
3812
b04cc6b1
FW
3813 mutex_lock(&trace_types_lock);
3814
b3806b43
SR
3815 /* create a buffer to store the information to pass to userspace */
3816 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
3817 if (!iter) {
3818 ret = -ENOMEM;
3819 goto out;
3820 }
b3806b43 3821
d7350c3f
FW
3822 /*
3823 * We make a copy of the current tracer to avoid concurrent
3824 * changes on it while we are reading.
3825 */
3826 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3827 if (!iter->trace) {
3828 ret = -ENOMEM;
3829 goto fail;
3830 }
2b6080f2 3831 *iter->trace = *tr->current_trace;
d7350c3f 3832
4462344e 3833 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 3834 ret = -ENOMEM;
d7350c3f 3835 goto fail;
4462344e
RR
3836 }
3837
a309720c 3838 /* trace pipe does not show start of buffer */
4462344e 3839 cpumask_setall(iter->started);
a309720c 3840
112f38a7
SR
3841 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3842 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3843
8be0709f
DS
3844 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3845 if (trace_clocks[trace_clock_id].in_ns)
3846 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3847
2b6080f2
SR
3848 iter->cpu_file = tc->cpu;
3849 iter->tr = tc->tr;
12883efb 3850 iter->trace_buffer = &tc->tr->trace_buffer;
d7350c3f 3851 mutex_init(&iter->mutex);
b3806b43
SR
3852 filp->private_data = iter;
3853
107bad8b
SR
3854 if (iter->trace->pipe_open)
3855 iter->trace->pipe_open(iter);
107bad8b 3856
b444786f 3857 nonseekable_open(inode, filp);
b04cc6b1
FW
3858out:
3859 mutex_unlock(&trace_types_lock);
3860 return ret;
d7350c3f
FW
3861
3862fail:
3863 kfree(iter->trace);
3864 kfree(iter);
3865 mutex_unlock(&trace_types_lock);
3866 return ret;
b3806b43
SR
3867}
3868
3869static int tracing_release_pipe(struct inode *inode, struct file *file)
3870{
3871 struct trace_iterator *iter = file->private_data;
3872
b04cc6b1
FW
3873 mutex_lock(&trace_types_lock);
3874
29bf4a5e 3875 if (iter->trace->pipe_close)
c521efd1
SR
3876 iter->trace->pipe_close(iter);
3877
b04cc6b1
FW
3878 mutex_unlock(&trace_types_lock);
3879
4462344e 3880 free_cpumask_var(iter->started);
d7350c3f
FW
3881 mutex_destroy(&iter->mutex);
3882 kfree(iter->trace);
b3806b43 3883 kfree(iter);
b3806b43
SR
3884
3885 return 0;
3886}
3887
2a2cc8f7 3888static unsigned int
cc60cdc9 3889trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 3890{
15693458
SRRH
3891 /* Iterators are static, they should be filled or empty */
3892 if (trace_buffer_iter(iter, iter->cpu_file))
3893 return POLLIN | POLLRDNORM;
2a2cc8f7 3894
15693458 3895 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
3896 /*
3897 * Always select as readable when in blocking mode
3898 */
3899 return POLLIN | POLLRDNORM;
15693458 3900 else
12883efb 3901 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 3902 filp, poll_table);
2a2cc8f7 3903}
2a2cc8f7 3904
cc60cdc9
SR
3905static unsigned int
3906tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3907{
3908 struct trace_iterator *iter = filp->private_data;
3909
3910 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
3911}
3912
6eaaa5d5
FW
3913/*
3914 * This is a make-shift waitqueue.
3915 * A tracer might use this callback on some rare cases:
3916 *
3917 * 1) the current tracer might hold the runqueue lock when it wakes up
3918 * a reader, hence a deadlock (sched, function, and function graph tracers)
3919 * 2) the function tracers, trace all functions, we don't want
3920 * the overhead of calling wake_up and friends
3921 * (and tracing them too)
3922 *
3923 * Anyway, this is really very primitive wakeup.
3924 */
3925void poll_wait_pipe(struct trace_iterator *iter)
3926{
3927 set_current_state(TASK_INTERRUPTIBLE);
3928 /* sleep for 100 msecs, and try again. */
3929 schedule_timeout(HZ / 10);
3930}
3931
ff98781b
EGM
3932/* Must be called with trace_types_lock mutex held. */
3933static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
3934{
3935 struct trace_iterator *iter = filp->private_data;
b3806b43 3936
b3806b43 3937 while (trace_empty(iter)) {
2dc8f095 3938
107bad8b 3939 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 3940 return -EAGAIN;
107bad8b 3941 }
2dc8f095 3942
d7350c3f 3943 mutex_unlock(&iter->mutex);
107bad8b 3944
6eaaa5d5 3945 iter->trace->wait_pipe(iter);
b3806b43 3946
d7350c3f 3947 mutex_lock(&iter->mutex);
107bad8b 3948
6eaaa5d5 3949 if (signal_pending(current))
ff98781b 3950 return -EINTR;
b3806b43
SR
3951
3952 /*
250bfd3d 3953 * We block until we read something and tracing is disabled.
b3806b43
SR
3954 * We still block if tracing is disabled, but we have never
3955 * read anything. This allows a user to cat this file, and
3956 * then enable tracing. But after we have read something,
3957 * we give an EOF when tracing is again disabled.
3958 *
3959 * iter->pos will be 0 if we haven't read anything.
3960 */
250bfd3d 3961 if (!tracing_is_enabled() && iter->pos)
b3806b43 3962 break;
b3806b43
SR
3963 }
3964
ff98781b
EGM
3965 return 1;
3966}
3967
3968/*
3969 * Consumer reader.
3970 */
3971static ssize_t
3972tracing_read_pipe(struct file *filp, char __user *ubuf,
3973 size_t cnt, loff_t *ppos)
3974{
3975 struct trace_iterator *iter = filp->private_data;
2b6080f2 3976 struct trace_array *tr = iter->tr;
ff98781b
EGM
3977 ssize_t sret;
3978
3979 /* return any leftover data */
3980 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3981 if (sret != -EBUSY)
3982 return sret;
3983
f9520750 3984 trace_seq_init(&iter->seq);
ff98781b 3985
d7350c3f 3986 /* copy the tracer to avoid using a global lock all around */
ff98781b 3987 mutex_lock(&trace_types_lock);
2b6080f2
SR
3988 if (unlikely(iter->trace->name != tr->current_trace->name))
3989 *iter->trace = *tr->current_trace;
d7350c3f
FW
3990 mutex_unlock(&trace_types_lock);
3991
3992 /*
3993 * Avoid more than one consumer on a single file descriptor
3994 * This is just a matter of traces coherency, the ring buffer itself
3995 * is protected.
3996 */
3997 mutex_lock(&iter->mutex);
ff98781b
EGM
3998 if (iter->trace->read) {
3999 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4000 if (sret)
4001 goto out;
4002 }
4003
4004waitagain:
4005 sret = tracing_wait_pipe(filp);
4006 if (sret <= 0)
4007 goto out;
4008
b3806b43 4009 /* stop when tracing is finished */
ff98781b
EGM
4010 if (trace_empty(iter)) {
4011 sret = 0;
107bad8b 4012 goto out;
ff98781b 4013 }
b3806b43
SR
4014
4015 if (cnt >= PAGE_SIZE)
4016 cnt = PAGE_SIZE - 1;
4017
53d0aa77 4018 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4019 memset(&iter->seq, 0,
4020 sizeof(struct trace_iterator) -
4021 offsetof(struct trace_iterator, seq));
4823ed7e 4022 iter->pos = -1;
b3806b43 4023
4f535968 4024 trace_event_read_lock();
7e53bd42 4025 trace_access_lock(iter->cpu_file);
955b61e5 4026 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4027 enum print_line_t ret;
088b1e42
SR
4028 int len = iter->seq.len;
4029
f9896bf3 4030 ret = print_trace_line(iter);
2c4f035f 4031 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
4032 /* don't print partial lines */
4033 iter->seq.len = len;
b3806b43 4034 break;
088b1e42 4035 }
b91facc3
FW
4036 if (ret != TRACE_TYPE_NO_CONSUME)
4037 trace_consume(iter);
b3806b43
SR
4038
4039 if (iter->seq.len >= cnt)
4040 break;
ee5e51f5
JO
4041
4042 /*
4043 * Setting the full flag means we reached the trace_seq buffer
4044 * size and we should leave by partial output condition above.
4045 * One of the trace_seq_* functions is not used properly.
4046 */
4047 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4048 iter->ent->type);
b3806b43 4049 }
7e53bd42 4050 trace_access_unlock(iter->cpu_file);
4f535968 4051 trace_event_read_unlock();
b3806b43 4052
b3806b43 4053 /* Now copy what we have to the user */
6c6c2796
PP
4054 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4055 if (iter->seq.readpos >= iter->seq.len)
f9520750 4056 trace_seq_init(&iter->seq);
9ff4b974
PP
4057
4058 /*
25985edc 4059 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4060 * entries, go back to wait for more entries.
4061 */
6c6c2796 4062 if (sret == -EBUSY)
9ff4b974 4063 goto waitagain;
b3806b43 4064
107bad8b 4065out:
d7350c3f 4066 mutex_unlock(&iter->mutex);
107bad8b 4067
6c6c2796 4068 return sret;
b3806b43
SR
4069}
4070
3c56819b
EGM
4071static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
4072 struct pipe_buffer *buf)
4073{
4074 __free_page(buf->page);
4075}
4076
4077static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4078 unsigned int idx)
4079{
4080 __free_page(spd->pages[idx]);
4081}
4082
28dfef8f 4083static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998
SR
4084 .can_merge = 0,
4085 .map = generic_pipe_buf_map,
4086 .unmap = generic_pipe_buf_unmap,
4087 .confirm = generic_pipe_buf_confirm,
4088 .release = tracing_pipe_buf_release,
4089 .steal = generic_pipe_buf_steal,
4090 .get = generic_pipe_buf_get,
3c56819b
EGM
4091};
4092
34cd4998 4093static size_t
fa7c7f6e 4094tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4095{
4096 size_t count;
4097 int ret;
4098
4099 /* Seq buffer is page-sized, exactly what we need. */
4100 for (;;) {
4101 count = iter->seq.len;
4102 ret = print_trace_line(iter);
4103 count = iter->seq.len - count;
4104 if (rem < count) {
4105 rem = 0;
4106 iter->seq.len -= count;
4107 break;
4108 }
4109 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4110 iter->seq.len -= count;
4111 break;
4112 }
4113
74e7ff8c
LJ
4114 if (ret != TRACE_TYPE_NO_CONSUME)
4115 trace_consume(iter);
34cd4998 4116 rem -= count;
955b61e5 4117 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4118 rem = 0;
4119 iter->ent = NULL;
4120 break;
4121 }
4122 }
4123
4124 return rem;
4125}
4126
3c56819b
EGM
4127static ssize_t tracing_splice_read_pipe(struct file *filp,
4128 loff_t *ppos,
4129 struct pipe_inode_info *pipe,
4130 size_t len,
4131 unsigned int flags)
4132{
35f3d14d
JA
4133 struct page *pages_def[PIPE_DEF_BUFFERS];
4134 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4135 struct trace_iterator *iter = filp->private_data;
4136 struct splice_pipe_desc spd = {
35f3d14d
JA
4137 .pages = pages_def,
4138 .partial = partial_def,
34cd4998 4139 .nr_pages = 0, /* This gets updated below. */
047fe360 4140 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4141 .flags = flags,
4142 .ops = &tracing_pipe_buf_ops,
4143 .spd_release = tracing_spd_release_pipe,
3c56819b 4144 };
2b6080f2 4145 struct trace_array *tr = iter->tr;
3c56819b 4146 ssize_t ret;
34cd4998 4147 size_t rem;
3c56819b
EGM
4148 unsigned int i;
4149
35f3d14d
JA
4150 if (splice_grow_spd(pipe, &spd))
4151 return -ENOMEM;
4152
d7350c3f 4153 /* copy the tracer to avoid using a global lock all around */
3c56819b 4154 mutex_lock(&trace_types_lock);
2b6080f2
SR
4155 if (unlikely(iter->trace->name != tr->current_trace->name))
4156 *iter->trace = *tr->current_trace;
d7350c3f
FW
4157 mutex_unlock(&trace_types_lock);
4158
4159 mutex_lock(&iter->mutex);
3c56819b
EGM
4160
4161 if (iter->trace->splice_read) {
4162 ret = iter->trace->splice_read(iter, filp,
4163 ppos, pipe, len, flags);
4164 if (ret)
34cd4998 4165 goto out_err;
3c56819b
EGM
4166 }
4167
4168 ret = tracing_wait_pipe(filp);
4169 if (ret <= 0)
34cd4998 4170 goto out_err;
3c56819b 4171
955b61e5 4172 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4173 ret = -EFAULT;
34cd4998 4174 goto out_err;
3c56819b
EGM
4175 }
4176
4f535968 4177 trace_event_read_lock();
7e53bd42 4178 trace_access_lock(iter->cpu_file);
4f535968 4179
3c56819b 4180 /* Fill as many pages as possible. */
35f3d14d
JA
4181 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4182 spd.pages[i] = alloc_page(GFP_KERNEL);
4183 if (!spd.pages[i])
34cd4998 4184 break;
3c56819b 4185
fa7c7f6e 4186 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4187
4188 /* Copy the data into the page, so we can start over. */
4189 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4190 page_address(spd.pages[i]),
3c56819b
EGM
4191 iter->seq.len);
4192 if (ret < 0) {
35f3d14d 4193 __free_page(spd.pages[i]);
3c56819b
EGM
4194 break;
4195 }
35f3d14d
JA
4196 spd.partial[i].offset = 0;
4197 spd.partial[i].len = iter->seq.len;
3c56819b 4198
f9520750 4199 trace_seq_init(&iter->seq);
3c56819b
EGM
4200 }
4201
7e53bd42 4202 trace_access_unlock(iter->cpu_file);
4f535968 4203 trace_event_read_unlock();
d7350c3f 4204 mutex_unlock(&iter->mutex);
3c56819b
EGM
4205
4206 spd.nr_pages = i;
4207
35f3d14d
JA
4208 ret = splice_to_pipe(pipe, &spd);
4209out:
047fe360 4210 splice_shrink_spd(&spd);
35f3d14d 4211 return ret;
3c56819b 4212
34cd4998 4213out_err:
d7350c3f 4214 mutex_unlock(&iter->mutex);
35f3d14d 4215 goto out;
3c56819b
EGM
4216}
4217
a98a3c3f
SR
4218static ssize_t
4219tracing_entries_read(struct file *filp, char __user *ubuf,
4220 size_t cnt, loff_t *ppos)
4221{
2b6080f2
SR
4222 struct trace_cpu *tc = filp->private_data;
4223 struct trace_array *tr = tc->tr;
438ced17
VN
4224 char buf[64];
4225 int r = 0;
4226 ssize_t ret;
a98a3c3f 4227
db526ca3 4228 mutex_lock(&trace_types_lock);
438ced17 4229
2b6080f2 4230 if (tc->cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4231 int cpu, buf_size_same;
4232 unsigned long size;
4233
4234 size = 0;
4235 buf_size_same = 1;
4236 /* check if all cpu sizes are same */
4237 for_each_tracing_cpu(cpu) {
4238 /* fill in the size from first enabled cpu */
4239 if (size == 0)
12883efb
SRRH
4240 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4241 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4242 buf_size_same = 0;
4243 break;
4244 }
4245 }
4246
4247 if (buf_size_same) {
4248 if (!ring_buffer_expanded)
4249 r = sprintf(buf, "%lu (expanded: %lu)\n",
4250 size >> 10,
4251 trace_buf_size >> 10);
4252 else
4253 r = sprintf(buf, "%lu\n", size >> 10);
4254 } else
4255 r = sprintf(buf, "X\n");
4256 } else
12883efb 4257 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
438ced17 4258
db526ca3
SR
4259 mutex_unlock(&trace_types_lock);
4260
438ced17
VN
4261 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4262 return ret;
a98a3c3f
SR
4263}
4264
4265static ssize_t
4266tracing_entries_write(struct file *filp, const char __user *ubuf,
4267 size_t cnt, loff_t *ppos)
4268{
2b6080f2 4269 struct trace_cpu *tc = filp->private_data;
a98a3c3f 4270 unsigned long val;
4f271a2a 4271 int ret;
a98a3c3f 4272
22fe9b54
PH
4273 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4274 if (ret)
c6caeeb1 4275 return ret;
a98a3c3f
SR
4276
4277 /* must have at least 1 entry */
4278 if (!val)
4279 return -EINVAL;
4280
1696b2b0
SR
4281 /* value is in KB */
4282 val <<= 10;
4283
2b6080f2 4284 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4f271a2a
VN
4285 if (ret < 0)
4286 return ret;
a98a3c3f 4287
cf8517cf 4288 *ppos += cnt;
a98a3c3f 4289
4f271a2a
VN
4290 return cnt;
4291}
bf5e6519 4292
f81ab074
VN
4293static ssize_t
4294tracing_total_entries_read(struct file *filp, char __user *ubuf,
4295 size_t cnt, loff_t *ppos)
4296{
4297 struct trace_array *tr = filp->private_data;
4298 char buf[64];
4299 int r, cpu;
4300 unsigned long size = 0, expanded_size = 0;
4301
4302 mutex_lock(&trace_types_lock);
4303 for_each_tracing_cpu(cpu) {
12883efb 4304 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
4305 if (!ring_buffer_expanded)
4306 expanded_size += trace_buf_size >> 10;
4307 }
4308 if (ring_buffer_expanded)
4309 r = sprintf(buf, "%lu\n", size);
4310 else
4311 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4312 mutex_unlock(&trace_types_lock);
4313
4314 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4315}
4316
4f271a2a
VN
4317static ssize_t
4318tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4319 size_t cnt, loff_t *ppos)
4320{
4321 /*
4322 * There is no need to read what the user has written, this function
4323 * is just to make sure that there is no error when "echo" is used
4324 */
4325
4326 *ppos += cnt;
a98a3c3f
SR
4327
4328 return cnt;
4329}
4330
4f271a2a
VN
4331static int
4332tracing_free_buffer_release(struct inode *inode, struct file *filp)
4333{
2b6080f2
SR
4334 struct trace_array *tr = inode->i_private;
4335
cf30cf67
SR
4336 /* disable tracing ? */
4337 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4338 tracing_off();
4f271a2a 4339 /* resize the ring buffer to 0 */
2b6080f2 4340 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a
VN
4341
4342 return 0;
4343}
4344
5bf9a1ee
PP
4345static ssize_t
4346tracing_mark_write(struct file *filp, const char __user *ubuf,
4347 size_t cnt, loff_t *fpos)
4348{
d696b58c
SR
4349 unsigned long addr = (unsigned long)ubuf;
4350 struct ring_buffer_event *event;
4351 struct ring_buffer *buffer;
4352 struct print_entry *entry;
4353 unsigned long irq_flags;
4354 struct page *pages[2];
6edb2a8a 4355 void *map_page[2];
d696b58c
SR
4356 int nr_pages = 1;
4357 ssize_t written;
d696b58c
SR
4358 int offset;
4359 int size;
4360 int len;
4361 int ret;
6edb2a8a 4362 int i;
5bf9a1ee 4363
c76f0694 4364 if (tracing_disabled)
5bf9a1ee
PP
4365 return -EINVAL;
4366
5224c3a3
MSB
4367 if (!(trace_flags & TRACE_ITER_MARKERS))
4368 return -EINVAL;
4369
5bf9a1ee
PP
4370 if (cnt > TRACE_BUF_SIZE)
4371 cnt = TRACE_BUF_SIZE;
4372
d696b58c
SR
4373 /*
4374 * Userspace is injecting traces into the kernel trace buffer.
4375 * We want to be as non intrusive as possible.
4376 * To do so, we do not want to allocate any special buffers
4377 * or take any locks, but instead write the userspace data
4378 * straight into the ring buffer.
4379 *
4380 * First we need to pin the userspace buffer into memory,
4381 * which, most likely it is, because it just referenced it.
4382 * But there's no guarantee that it is. By using get_user_pages_fast()
4383 * and kmap_atomic/kunmap_atomic() we can get access to the
4384 * pages directly. We then write the data directly into the
4385 * ring buffer.
4386 */
4387 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 4388
d696b58c
SR
4389 /* check if we cross pages */
4390 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4391 nr_pages = 2;
4392
4393 offset = addr & (PAGE_SIZE - 1);
4394 addr &= PAGE_MASK;
4395
4396 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4397 if (ret < nr_pages) {
4398 while (--ret >= 0)
4399 put_page(pages[ret]);
4400 written = -EFAULT;
4401 goto out;
5bf9a1ee 4402 }
d696b58c 4403
6edb2a8a
SR
4404 for (i = 0; i < nr_pages; i++)
4405 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
4406
4407 local_save_flags(irq_flags);
4408 size = sizeof(*entry) + cnt + 2; /* possible \n added */
12883efb 4409 buffer = global_trace.trace_buffer.buffer;
d696b58c
SR
4410 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4411 irq_flags, preempt_count());
4412 if (!event) {
4413 /* Ring buffer disabled, return as if not open for write */
4414 written = -EBADF;
4415 goto out_unlock;
5bf9a1ee 4416 }
d696b58c
SR
4417
4418 entry = ring_buffer_event_data(event);
4419 entry->ip = _THIS_IP_;
4420
4421 if (nr_pages == 2) {
4422 len = PAGE_SIZE - offset;
6edb2a8a
SR
4423 memcpy(&entry->buf, map_page[0] + offset, len);
4424 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 4425 } else
6edb2a8a 4426 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 4427
d696b58c
SR
4428 if (entry->buf[cnt - 1] != '\n') {
4429 entry->buf[cnt] = '\n';
4430 entry->buf[cnt + 1] = '\0';
4431 } else
4432 entry->buf[cnt] = '\0';
4433
7ffbd48d 4434 __buffer_unlock_commit(buffer, event);
5bf9a1ee 4435
d696b58c 4436 written = cnt;
5bf9a1ee 4437
d696b58c 4438 *fpos += written;
1aa54bca 4439
d696b58c 4440 out_unlock:
6edb2a8a
SR
4441 for (i = 0; i < nr_pages; i++){
4442 kunmap_atomic(map_page[i]);
4443 put_page(pages[i]);
4444 }
d696b58c 4445 out:
1aa54bca 4446 return written;
5bf9a1ee
PP
4447}
4448
13f16d20 4449static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 4450{
2b6080f2 4451 struct trace_array *tr = m->private;
5079f326
Z
4452 int i;
4453
4454 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 4455 seq_printf(m,
5079f326 4456 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
4457 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4458 i == tr->clock_id ? "]" : "");
13f16d20 4459 seq_putc(m, '\n');
5079f326 4460
13f16d20 4461 return 0;
5079f326
Z
4462}
4463
4464static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4465 size_t cnt, loff_t *fpos)
4466{
2b6080f2
SR
4467 struct seq_file *m = filp->private_data;
4468 struct trace_array *tr = m->private;
5079f326
Z
4469 char buf[64];
4470 const char *clockstr;
4471 int i;
4472
4473 if (cnt >= sizeof(buf))
4474 return -EINVAL;
4475
4476 if (copy_from_user(&buf, ubuf, cnt))
4477 return -EFAULT;
4478
4479 buf[cnt] = 0;
4480
4481 clockstr = strstrip(buf);
4482
4483 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4484 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4485 break;
4486 }
4487 if (i == ARRAY_SIZE(trace_clocks))
4488 return -EINVAL;
4489
5079f326
Z
4490 mutex_lock(&trace_types_lock);
4491
2b6080f2
SR
4492 tr->clock_id = i;
4493
12883efb 4494 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 4495
60303ed3
DS
4496 /*
4497 * New clock may not be consistent with the previous clock.
4498 * Reset the buffer so that it doesn't have incomparable timestamps.
4499 */
12883efb
SRRH
4500 tracing_reset_online_cpus(&global_trace.trace_buffer);
4501
4502#ifdef CONFIG_TRACER_MAX_TRACE
4503 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4504 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4505 tracing_reset_online_cpus(&global_trace.max_buffer);
4506#endif
60303ed3 4507
5079f326
Z
4508 mutex_unlock(&trace_types_lock);
4509
4510 *fpos += cnt;
4511
4512 return cnt;
4513}
4514
13f16d20
LZ
4515static int tracing_clock_open(struct inode *inode, struct file *file)
4516{
4517 if (tracing_disabled)
4518 return -ENODEV;
2b6080f2
SR
4519
4520 return single_open(file, tracing_clock_show, inode->i_private);
13f16d20
LZ
4521}
4522
6de58e62
SRRH
4523struct ftrace_buffer_info {
4524 struct trace_iterator iter;
4525 void *spare;
4526 unsigned int read;
4527};
4528
debdd57f
HT
4529#ifdef CONFIG_TRACER_SNAPSHOT
4530static int tracing_snapshot_open(struct inode *inode, struct file *file)
4531{
2b6080f2 4532 struct trace_cpu *tc = inode->i_private;
debdd57f 4533 struct trace_iterator *iter;
2b6080f2 4534 struct seq_file *m;
debdd57f
HT
4535 int ret = 0;
4536
4537 if (file->f_mode & FMODE_READ) {
4538 iter = __tracing_open(inode, file, true);
4539 if (IS_ERR(iter))
4540 ret = PTR_ERR(iter);
2b6080f2
SR
4541 } else {
4542 /* Writes still need the seq_file to hold the private data */
4543 m = kzalloc(sizeof(*m), GFP_KERNEL);
4544 if (!m)
4545 return -ENOMEM;
4546 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4547 if (!iter) {
4548 kfree(m);
4549 return -ENOMEM;
4550 }
4551 iter->tr = tc->tr;
12883efb 4552 iter->trace_buffer = &tc->tr->max_buffer;
f1affcaa 4553 iter->cpu_file = tc->cpu;
2b6080f2
SR
4554 m->private = iter;
4555 file->private_data = m;
debdd57f 4556 }
2b6080f2 4557
debdd57f
HT
4558 return ret;
4559}
4560
4561static ssize_t
4562tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4563 loff_t *ppos)
4564{
2b6080f2
SR
4565 struct seq_file *m = filp->private_data;
4566 struct trace_iterator *iter = m->private;
4567 struct trace_array *tr = iter->tr;
debdd57f
HT
4568 unsigned long val;
4569 int ret;
4570
4571 ret = tracing_update_buffers();
4572 if (ret < 0)
4573 return ret;
4574
4575 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4576 if (ret)
4577 return ret;
4578
4579 mutex_lock(&trace_types_lock);
4580
2b6080f2 4581 if (tr->current_trace->use_max_tr) {
debdd57f
HT
4582 ret = -EBUSY;
4583 goto out;
4584 }
4585
4586 switch (val) {
4587 case 0:
f1affcaa
SRRH
4588 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4589 ret = -EINVAL;
4590 break;
debdd57f 4591 }
3209cff4
SRRH
4592 if (tr->allocated_snapshot)
4593 free_snapshot(tr);
debdd57f
HT
4594 break;
4595 case 1:
f1affcaa
SRRH
4596/* Only allow per-cpu swap if the ring buffer supports it */
4597#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4598 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4599 ret = -EINVAL;
4600 break;
4601 }
4602#endif
45ad21ca 4603 if (!tr->allocated_snapshot) {
3209cff4 4604 ret = alloc_snapshot(tr);
debdd57f
HT
4605 if (ret < 0)
4606 break;
debdd57f 4607 }
debdd57f
HT
4608 local_irq_disable();
4609 /* Now, we're going to swap */
f1affcaa 4610 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 4611 update_max_tr(tr, current, smp_processor_id());
f1affcaa 4612 else
ce9bae55 4613 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
4614 local_irq_enable();
4615 break;
4616 default:
45ad21ca 4617 if (tr->allocated_snapshot) {
f1affcaa
SRRH
4618 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4619 tracing_reset_online_cpus(&tr->max_buffer);
4620 else
4621 tracing_reset(&tr->max_buffer, iter->cpu_file);
4622 }
debdd57f
HT
4623 break;
4624 }
4625
4626 if (ret >= 0) {
4627 *ppos += cnt;
4628 ret = cnt;
4629 }
4630out:
4631 mutex_unlock(&trace_types_lock);
4632 return ret;
4633}
2b6080f2
SR
4634
4635static int tracing_snapshot_release(struct inode *inode, struct file *file)
4636{
4637 struct seq_file *m = file->private_data;
4638
4639 if (file->f_mode & FMODE_READ)
4640 return tracing_release(inode, file);
4641
4642 /* If write only, the seq_file is just a stub */
4643 if (m)
4644 kfree(m->private);
4645 kfree(m);
4646
4647 return 0;
4648}
4649
6de58e62
SRRH
4650static int tracing_buffers_open(struct inode *inode, struct file *filp);
4651static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4652 size_t count, loff_t *ppos);
4653static int tracing_buffers_release(struct inode *inode, struct file *file);
4654static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4655 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4656
4657static int snapshot_raw_open(struct inode *inode, struct file *filp)
4658{
4659 struct ftrace_buffer_info *info;
4660 int ret;
4661
4662 ret = tracing_buffers_open(inode, filp);
4663 if (ret < 0)
4664 return ret;
4665
4666 info = filp->private_data;
4667
4668 if (info->iter.trace->use_max_tr) {
4669 tracing_buffers_release(inode, filp);
4670 return -EBUSY;
4671 }
4672
4673 info->iter.snapshot = true;
4674 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4675
4676 return ret;
4677}
4678
debdd57f
HT
4679#endif /* CONFIG_TRACER_SNAPSHOT */
4680
4681
5e2336a0 4682static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
4683 .open = tracing_open_generic,
4684 .read = tracing_max_lat_read,
4685 .write = tracing_max_lat_write,
b444786f 4686 .llseek = generic_file_llseek,
bc0c38d1
SR
4687};
4688
5e2336a0 4689static const struct file_operations set_tracer_fops = {
4bf39a94
IM
4690 .open = tracing_open_generic,
4691 .read = tracing_set_trace_read,
4692 .write = tracing_set_trace_write,
b444786f 4693 .llseek = generic_file_llseek,
bc0c38d1
SR
4694};
4695
5e2336a0 4696static const struct file_operations tracing_pipe_fops = {
4bf39a94 4697 .open = tracing_open_pipe,
2a2cc8f7 4698 .poll = tracing_poll_pipe,
4bf39a94 4699 .read = tracing_read_pipe,
3c56819b 4700 .splice_read = tracing_splice_read_pipe,
4bf39a94 4701 .release = tracing_release_pipe,
b444786f 4702 .llseek = no_llseek,
b3806b43
SR
4703};
4704
5e2336a0 4705static const struct file_operations tracing_entries_fops = {
2b6080f2 4706 .open = tracing_open_generic,
a98a3c3f
SR
4707 .read = tracing_entries_read,
4708 .write = tracing_entries_write,
b444786f 4709 .llseek = generic_file_llseek,
a98a3c3f
SR
4710};
4711
f81ab074
VN
4712static const struct file_operations tracing_total_entries_fops = {
4713 .open = tracing_open_generic,
4714 .read = tracing_total_entries_read,
4715 .llseek = generic_file_llseek,
4716};
4717
4f271a2a
VN
4718static const struct file_operations tracing_free_buffer_fops = {
4719 .write = tracing_free_buffer_write,
4720 .release = tracing_free_buffer_release,
4721};
4722
5e2336a0 4723static const struct file_operations tracing_mark_fops = {
43a15386 4724 .open = tracing_open_generic,
5bf9a1ee 4725 .write = tracing_mark_write,
b444786f 4726 .llseek = generic_file_llseek,
5bf9a1ee
PP
4727};
4728
5079f326 4729static const struct file_operations trace_clock_fops = {
13f16d20
LZ
4730 .open = tracing_clock_open,
4731 .read = seq_read,
4732 .llseek = seq_lseek,
4733 .release = single_release,
5079f326
Z
4734 .write = tracing_clock_write,
4735};
4736
debdd57f
HT
4737#ifdef CONFIG_TRACER_SNAPSHOT
4738static const struct file_operations snapshot_fops = {
4739 .open = tracing_snapshot_open,
4740 .read = seq_read,
4741 .write = tracing_snapshot_write,
4742 .llseek = tracing_seek,
2b6080f2 4743 .release = tracing_snapshot_release,
debdd57f 4744};
debdd57f 4745
6de58e62
SRRH
4746static const struct file_operations snapshot_raw_fops = {
4747 .open = snapshot_raw_open,
4748 .read = tracing_buffers_read,
4749 .release = tracing_buffers_release,
4750 .splice_read = tracing_buffers_splice_read,
4751 .llseek = no_llseek,
2cadf913
SR
4752};
4753
6de58e62
SRRH
4754#endif /* CONFIG_TRACER_SNAPSHOT */
4755
2cadf913
SR
4756static int tracing_buffers_open(struct inode *inode, struct file *filp)
4757{
2b6080f2
SR
4758 struct trace_cpu *tc = inode->i_private;
4759 struct trace_array *tr = tc->tr;
2cadf913
SR
4760 struct ftrace_buffer_info *info;
4761
4762 if (tracing_disabled)
4763 return -ENODEV;
4764
4765 info = kzalloc(sizeof(*info), GFP_KERNEL);
4766 if (!info)
4767 return -ENOMEM;
4768
a695cb58
SRRH
4769 mutex_lock(&trace_types_lock);
4770
4771 tr->ref++;
4772
cc60cdc9
SR
4773 info->iter.tr = tr;
4774 info->iter.cpu_file = tc->cpu;
b627344f 4775 info->iter.trace = tr->current_trace;
12883efb 4776 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 4777 info->spare = NULL;
2cadf913 4778 /* Force reading ring buffer for first read */
cc60cdc9 4779 info->read = (unsigned int)-1;
2cadf913
SR
4780
4781 filp->private_data = info;
4782
a695cb58
SRRH
4783 mutex_unlock(&trace_types_lock);
4784
d1e7e02f 4785 return nonseekable_open(inode, filp);
2cadf913
SR
4786}
4787
cc60cdc9
SR
4788static unsigned int
4789tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4790{
4791 struct ftrace_buffer_info *info = filp->private_data;
4792 struct trace_iterator *iter = &info->iter;
4793
4794 return trace_poll(iter, filp, poll_table);
4795}
4796
2cadf913
SR
4797static ssize_t
4798tracing_buffers_read(struct file *filp, char __user *ubuf,
4799 size_t count, loff_t *ppos)
4800{
4801 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 4802 struct trace_iterator *iter = &info->iter;
2cadf913 4803 ssize_t ret;
6de58e62 4804 ssize_t size;
2cadf913 4805
2dc5d12b
SR
4806 if (!count)
4807 return 0;
4808
6de58e62
SRRH
4809 mutex_lock(&trace_types_lock);
4810
4811#ifdef CONFIG_TRACER_MAX_TRACE
4812 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4813 size = -EBUSY;
4814 goto out_unlock;
4815 }
4816#endif
4817
ddd538f3 4818 if (!info->spare)
12883efb
SRRH
4819 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4820 iter->cpu_file);
6de58e62 4821 size = -ENOMEM;
ddd538f3 4822 if (!info->spare)
6de58e62 4823 goto out_unlock;
ddd538f3 4824
2cadf913
SR
4825 /* Do we have previous read data to read? */
4826 if (info->read < PAGE_SIZE)
4827 goto read;
4828
b627344f 4829 again:
cc60cdc9 4830 trace_access_lock(iter->cpu_file);
12883efb 4831 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
4832 &info->spare,
4833 count,
cc60cdc9
SR
4834 iter->cpu_file, 0);
4835 trace_access_unlock(iter->cpu_file);
2cadf913 4836
b627344f
SR
4837 if (ret < 0) {
4838 if (trace_empty(iter)) {
6de58e62
SRRH
4839 if ((filp->f_flags & O_NONBLOCK)) {
4840 size = -EAGAIN;
4841 goto out_unlock;
4842 }
4843 mutex_unlock(&trace_types_lock);
b627344f 4844 iter->trace->wait_pipe(iter);
6de58e62
SRRH
4845 mutex_lock(&trace_types_lock);
4846 if (signal_pending(current)) {
4847 size = -EINTR;
4848 goto out_unlock;
4849 }
b627344f
SR
4850 goto again;
4851 }
6de58e62
SRRH
4852 size = 0;
4853 goto out_unlock;
b627344f 4854 }
436fc280 4855
436fc280 4856 info->read = 0;
b627344f 4857 read:
2cadf913
SR
4858 size = PAGE_SIZE - info->read;
4859 if (size > count)
4860 size = count;
4861
4862 ret = copy_to_user(ubuf, info->spare + info->read, size);
6de58e62
SRRH
4863 if (ret == size) {
4864 size = -EFAULT;
4865 goto out_unlock;
4866 }
2dc5d12b
SR
4867 size -= ret;
4868
2cadf913
SR
4869 *ppos += size;
4870 info->read += size;
4871
6de58e62
SRRH
4872 out_unlock:
4873 mutex_unlock(&trace_types_lock);
4874
2cadf913
SR
4875 return size;
4876}
4877
4878static int tracing_buffers_release(struct inode *inode, struct file *file)
4879{
4880 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 4881 struct trace_iterator *iter = &info->iter;
2cadf913 4882
a695cb58
SRRH
4883 mutex_lock(&trace_types_lock);
4884
4885 WARN_ON(!iter->tr->ref);
4886 iter->tr->ref--;
2cadf913 4887
ddd538f3 4888 if (info->spare)
12883efb 4889 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
4890 kfree(info);
4891
a695cb58
SRRH
4892 mutex_unlock(&trace_types_lock);
4893
2cadf913
SR
4894 return 0;
4895}
4896
4897struct buffer_ref {
4898 struct ring_buffer *buffer;
4899 void *page;
4900 int ref;
4901};
4902
4903static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4904 struct pipe_buffer *buf)
4905{
4906 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4907
4908 if (--ref->ref)
4909 return;
4910
4911 ring_buffer_free_read_page(ref->buffer, ref->page);
4912 kfree(ref);
4913 buf->private = 0;
4914}
4915
2cadf913
SR
4916static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4917 struct pipe_buffer *buf)
4918{
4919 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4920
4921 ref->ref++;
4922}
4923
4924/* Pipe buffer operations for a buffer. */
28dfef8f 4925static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913
SR
4926 .can_merge = 0,
4927 .map = generic_pipe_buf_map,
4928 .unmap = generic_pipe_buf_unmap,
4929 .confirm = generic_pipe_buf_confirm,
4930 .release = buffer_pipe_buf_release,
d55cb6cf 4931 .steal = generic_pipe_buf_steal,
2cadf913
SR
4932 .get = buffer_pipe_buf_get,
4933};
4934
4935/*
4936 * Callback from splice_to_pipe(), if we need to release some pages
4937 * at the end of the spd in case we error'ed out in filling the pipe.
4938 */
4939static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4940{
4941 struct buffer_ref *ref =
4942 (struct buffer_ref *)spd->partial[i].private;
4943
4944 if (--ref->ref)
4945 return;
4946
4947 ring_buffer_free_read_page(ref->buffer, ref->page);
4948 kfree(ref);
4949 spd->partial[i].private = 0;
4950}
4951
4952static ssize_t
4953tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4954 struct pipe_inode_info *pipe, size_t len,
4955 unsigned int flags)
4956{
4957 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 4958 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
4959 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4960 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 4961 struct splice_pipe_desc spd = {
35f3d14d
JA
4962 .pages = pages_def,
4963 .partial = partial_def,
047fe360 4964 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
4965 .flags = flags,
4966 .ops = &buffer_pipe_buf_ops,
4967 .spd_release = buffer_spd_release,
4968 };
4969 struct buffer_ref *ref;
93459c6c 4970 int entries, size, i;
6de58e62 4971 ssize_t ret;
2cadf913 4972
6de58e62
SRRH
4973 mutex_lock(&trace_types_lock);
4974
4975#ifdef CONFIG_TRACER_MAX_TRACE
4976 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4977 ret = -EBUSY;
4978 goto out;
4979 }
4980#endif
4981
4982 if (splice_grow_spd(pipe, &spd)) {
4983 ret = -ENOMEM;
4984 goto out;
4985 }
35f3d14d 4986
93cfb3c9 4987 if (*ppos & (PAGE_SIZE - 1)) {
35f3d14d
JA
4988 ret = -EINVAL;
4989 goto out;
93cfb3c9
LJ
4990 }
4991
4992 if (len & (PAGE_SIZE - 1)) {
35f3d14d
JA
4993 if (len < PAGE_SIZE) {
4994 ret = -EINVAL;
4995 goto out;
4996 }
93cfb3c9
LJ
4997 len &= PAGE_MASK;
4998 }
4999
cc60cdc9
SR
5000 again:
5001 trace_access_lock(iter->cpu_file);
12883efb 5002 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5003
35f3d14d 5004 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5005 struct page *page;
5006 int r;
5007
5008 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5009 if (!ref)
5010 break;
5011
7267fa68 5012 ref->ref = 1;
12883efb 5013 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5014 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913
SR
5015 if (!ref->page) {
5016 kfree(ref);
5017 break;
5018 }
5019
5020 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5021 len, iter->cpu_file, 1);
2cadf913 5022 if (r < 0) {
7ea59064 5023 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5024 kfree(ref);
5025 break;
5026 }
5027
5028 /*
5029 * zero out any left over data, this is going to
5030 * user land.
5031 */
5032 size = ring_buffer_page_len(ref->page);
5033 if (size < PAGE_SIZE)
5034 memset(ref->page + size, 0, PAGE_SIZE - size);
5035
5036 page = virt_to_page(ref->page);
5037
5038 spd.pages[i] = page;
5039 spd.partial[i].len = PAGE_SIZE;
5040 spd.partial[i].offset = 0;
5041 spd.partial[i].private = (unsigned long)ref;
5042 spd.nr_pages++;
93cfb3c9 5043 *ppos += PAGE_SIZE;
93459c6c 5044
12883efb 5045 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5046 }
5047
cc60cdc9 5048 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5049 spd.nr_pages = i;
5050
5051 /* did we read anything? */
5052 if (!spd.nr_pages) {
cc60cdc9 5053 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
2cadf913 5054 ret = -EAGAIN;
cc60cdc9
SR
5055 goto out;
5056 }
6de58e62 5057 mutex_unlock(&trace_types_lock);
b627344f 5058 iter->trace->wait_pipe(iter);
6de58e62 5059 mutex_lock(&trace_types_lock);
cc60cdc9
SR
5060 if (signal_pending(current)) {
5061 ret = -EINTR;
5062 goto out;
5063 }
5064 goto again;
2cadf913
SR
5065 }
5066
5067 ret = splice_to_pipe(pipe, &spd);
047fe360 5068 splice_shrink_spd(&spd);
35f3d14d 5069out:
6de58e62
SRRH
5070 mutex_unlock(&trace_types_lock);
5071
2cadf913
SR
5072 return ret;
5073}
5074
5075static const struct file_operations tracing_buffers_fops = {
5076 .open = tracing_buffers_open,
5077 .read = tracing_buffers_read,
cc60cdc9 5078 .poll = tracing_buffers_poll,
2cadf913
SR
5079 .release = tracing_buffers_release,
5080 .splice_read = tracing_buffers_splice_read,
5081 .llseek = no_llseek,
5082};
5083
c8d77183
SR
5084static ssize_t
5085tracing_stats_read(struct file *filp, char __user *ubuf,
5086 size_t count, loff_t *ppos)
5087{
2b6080f2
SR
5088 struct trace_cpu *tc = filp->private_data;
5089 struct trace_array *tr = tc->tr;
12883efb 5090 struct trace_buffer *trace_buf = &tr->trace_buffer;
c8d77183
SR
5091 struct trace_seq *s;
5092 unsigned long cnt;
c64e148a
VN
5093 unsigned long long t;
5094 unsigned long usec_rem;
2b6080f2 5095 int cpu = tc->cpu;
c8d77183 5096
e4f2d10f 5097 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5098 if (!s)
a646365c 5099 return -ENOMEM;
c8d77183
SR
5100
5101 trace_seq_init(s);
5102
12883efb 5103 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5104 trace_seq_printf(s, "entries: %ld\n", cnt);
5105
12883efb 5106 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5107 trace_seq_printf(s, "overrun: %ld\n", cnt);
5108
12883efb 5109 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5110 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5111
12883efb 5112 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5113 trace_seq_printf(s, "bytes: %ld\n", cnt);
5114
11043d8b
YY
5115 if (trace_clocks[trace_clock_id].in_ns) {
5116 /* local or global for trace_clock */
12883efb 5117 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5118 usec_rem = do_div(t, USEC_PER_SEC);
5119 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5120 t, usec_rem);
5121
12883efb 5122 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5123 usec_rem = do_div(t, USEC_PER_SEC);
5124 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5125 } else {
5126 /* counter or tsc mode for trace_clock */
5127 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5128 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5129
11043d8b 5130 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5131 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5132 }
c64e148a 5133
12883efb 5134 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5135 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5136
12883efb 5137 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5138 trace_seq_printf(s, "read events: %ld\n", cnt);
5139
c8d77183
SR
5140 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5141
5142 kfree(s);
5143
5144 return count;
5145}
5146
5147static const struct file_operations tracing_stats_fops = {
5148 .open = tracing_open_generic,
5149 .read = tracing_stats_read,
b444786f 5150 .llseek = generic_file_llseek,
c8d77183
SR
5151};
5152
bc0c38d1
SR
5153#ifdef CONFIG_DYNAMIC_FTRACE
5154
b807c3d0
SR
5155int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5156{
5157 return 0;
5158}
5159
bc0c38d1 5160static ssize_t
b807c3d0 5161tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5162 size_t cnt, loff_t *ppos)
5163{
a26a2a27
SR
5164 static char ftrace_dyn_info_buffer[1024];
5165 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5166 unsigned long *p = filp->private_data;
b807c3d0 5167 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5168 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5169 int r;
5170
b807c3d0
SR
5171 mutex_lock(&dyn_info_mutex);
5172 r = sprintf(buf, "%ld ", *p);
4bf39a94 5173
a26a2a27 5174 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5175 buf[r++] = '\n';
5176
5177 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5178
5179 mutex_unlock(&dyn_info_mutex);
5180
5181 return r;
bc0c38d1
SR
5182}
5183
5e2336a0 5184static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5185 .open = tracing_open_generic,
b807c3d0 5186 .read = tracing_read_dyn_info,
b444786f 5187 .llseek = generic_file_llseek,
bc0c38d1 5188};
77fd5c15 5189#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5190
77fd5c15
SRRH
5191#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5192static void
5193ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5194{
5195 tracing_snapshot();
5196}
bc0c38d1 5197
77fd5c15
SRRH
5198static void
5199ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5200{
77fd5c15
SRRH
5201 unsigned long *count = (long *)data;
5202
5203 if (!*count)
5204 return;
bc0c38d1 5205
77fd5c15
SRRH
5206 if (*count != -1)
5207 (*count)--;
5208
5209 tracing_snapshot();
5210}
5211
5212static int
5213ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5214 struct ftrace_probe_ops *ops, void *data)
5215{
5216 long count = (long)data;
5217
5218 seq_printf(m, "%ps:", (void *)ip);
5219
5220 seq_printf(m, "snapshot");
5221
5222 if (count == -1)
5223 seq_printf(m, ":unlimited\n");
5224 else
5225 seq_printf(m, ":count=%ld\n", count);
5226
5227 return 0;
5228}
5229
5230static struct ftrace_probe_ops snapshot_probe_ops = {
5231 .func = ftrace_snapshot,
5232 .print = ftrace_snapshot_print,
5233};
5234
5235static struct ftrace_probe_ops snapshot_count_probe_ops = {
5236 .func = ftrace_count_snapshot,
5237 .print = ftrace_snapshot_print,
5238};
5239
5240static int
5241ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5242 char *glob, char *cmd, char *param, int enable)
5243{
5244 struct ftrace_probe_ops *ops;
5245 void *count = (void *)-1;
5246 char *number;
5247 int ret;
5248
5249 /* hash funcs only work with set_ftrace_filter */
5250 if (!enable)
5251 return -EINVAL;
5252
5253 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5254
5255 if (glob[0] == '!') {
5256 unregister_ftrace_function_probe_func(glob+1, ops);
5257 return 0;
5258 }
5259
5260 if (!param)
5261 goto out_reg;
5262
5263 number = strsep(&param, ":");
5264
5265 if (!strlen(number))
5266 goto out_reg;
5267
5268 /*
5269 * We use the callback data field (which is a pointer)
5270 * as our counter.
5271 */
5272 ret = kstrtoul(number, 0, (unsigned long *)&count);
5273 if (ret)
5274 return ret;
5275
5276 out_reg:
5277 ret = register_ftrace_function_probe(glob, ops, count);
5278
5279 if (ret >= 0)
5280 alloc_snapshot(&global_trace);
5281
5282 return ret < 0 ? ret : 0;
5283}
5284
5285static struct ftrace_func_command ftrace_snapshot_cmd = {
5286 .name = "snapshot",
5287 .func = ftrace_trace_snapshot_callback,
5288};
5289
5290static int register_snapshot_cmd(void)
5291{
5292 return register_ftrace_command(&ftrace_snapshot_cmd);
5293}
5294#else
5295static inline int register_snapshot_cmd(void) { return 0; }
5296#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 5297
2b6080f2 5298struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
bc0c38d1 5299{
2b6080f2
SR
5300 if (tr->dir)
5301 return tr->dir;
bc0c38d1 5302
3e1f60b8
FW
5303 if (!debugfs_initialized())
5304 return NULL;
5305
2b6080f2
SR
5306 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5307 tr->dir = debugfs_create_dir("tracing", NULL);
bc0c38d1 5308
687c878a
J
5309 if (!tr->dir)
5310 pr_warn_once("Could not create debugfs directory 'tracing'\n");
bc0c38d1 5311
2b6080f2 5312 return tr->dir;
bc0c38d1
SR
5313}
5314
2b6080f2
SR
5315struct dentry *tracing_init_dentry(void)
5316{
5317 return tracing_init_dentry_tr(&global_trace);
5318}
b04cc6b1 5319
2b6080f2 5320static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 5321{
b04cc6b1
FW
5322 struct dentry *d_tracer;
5323
2b6080f2
SR
5324 if (tr->percpu_dir)
5325 return tr->percpu_dir;
b04cc6b1 5326
2b6080f2 5327 d_tracer = tracing_init_dentry_tr(tr);
b04cc6b1
FW
5328 if (!d_tracer)
5329 return NULL;
5330
2b6080f2 5331 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
b04cc6b1 5332
2b6080f2
SR
5333 WARN_ONCE(!tr->percpu_dir,
5334 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 5335
2b6080f2 5336 return tr->percpu_dir;
b04cc6b1
FW
5337}
5338
2b6080f2
SR
5339static void
5340tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 5341{
12883efb 5342 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
2b6080f2 5343 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 5344 struct dentry *d_cpu;
dd49a38c 5345 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 5346
0a3d7ce7
NK
5347 if (!d_percpu)
5348 return;
5349
dd49a38c 5350 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8656e7a2
FW
5351 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5352 if (!d_cpu) {
5353 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5354 return;
5355 }
b04cc6b1 5356
8656e7a2 5357 /* per cpu trace_pipe */
5452af66 5358 trace_create_file("trace_pipe", 0444, d_cpu,
2b6080f2 5359 (void *)&data->trace_cpu, &tracing_pipe_fops);
b04cc6b1
FW
5360
5361 /* per cpu trace */
5452af66 5362 trace_create_file("trace", 0644, d_cpu,
2b6080f2 5363 (void *)&data->trace_cpu, &tracing_fops);
7f96f93f 5364
5452af66 5365 trace_create_file("trace_pipe_raw", 0444, d_cpu,
2b6080f2 5366 (void *)&data->trace_cpu, &tracing_buffers_fops);
7f96f93f 5367
c8d77183 5368 trace_create_file("stats", 0444, d_cpu,
2b6080f2 5369 (void *)&data->trace_cpu, &tracing_stats_fops);
438ced17
VN
5370
5371 trace_create_file("buffer_size_kb", 0444, d_cpu,
2b6080f2 5372 (void *)&data->trace_cpu, &tracing_entries_fops);
f1affcaa
SRRH
5373
5374#ifdef CONFIG_TRACER_SNAPSHOT
5375 trace_create_file("snapshot", 0644, d_cpu,
5376 (void *)&data->trace_cpu, &snapshot_fops);
6de58e62
SRRH
5377
5378 trace_create_file("snapshot_raw", 0444, d_cpu,
5379 (void *)&data->trace_cpu, &snapshot_raw_fops);
f1affcaa 5380#endif
b04cc6b1
FW
5381}
5382
60a11774
SR
5383#ifdef CONFIG_FTRACE_SELFTEST
5384/* Let selftest have access to static functions in this file */
5385#include "trace_selftest.c"
5386#endif
5387
577b785f
SR
5388struct trace_option_dentry {
5389 struct tracer_opt *opt;
5390 struct tracer_flags *flags;
2b6080f2 5391 struct trace_array *tr;
577b785f
SR
5392 struct dentry *entry;
5393};
5394
5395static ssize_t
5396trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5397 loff_t *ppos)
5398{
5399 struct trace_option_dentry *topt = filp->private_data;
5400 char *buf;
5401
5402 if (topt->flags->val & topt->opt->bit)
5403 buf = "1\n";
5404 else
5405 buf = "0\n";
5406
5407 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5408}
5409
5410static ssize_t
5411trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5412 loff_t *ppos)
5413{
5414 struct trace_option_dentry *topt = filp->private_data;
5415 unsigned long val;
577b785f
SR
5416 int ret;
5417
22fe9b54
PH
5418 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5419 if (ret)
577b785f
SR
5420 return ret;
5421
8d18eaaf
LZ
5422 if (val != 0 && val != 1)
5423 return -EINVAL;
577b785f 5424
8d18eaaf 5425 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 5426 mutex_lock(&trace_types_lock);
2b6080f2 5427 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
c757bea9 5428 topt->opt, !val);
577b785f
SR
5429 mutex_unlock(&trace_types_lock);
5430 if (ret)
5431 return ret;
577b785f
SR
5432 }
5433
5434 *ppos += cnt;
5435
5436 return cnt;
5437}
5438
5439
5440static const struct file_operations trace_options_fops = {
5441 .open = tracing_open_generic,
5442 .read = trace_options_read,
5443 .write = trace_options_write,
b444786f 5444 .llseek = generic_file_llseek,
577b785f
SR
5445};
5446
a8259075
SR
5447static ssize_t
5448trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5449 loff_t *ppos)
5450{
5451 long index = (long)filp->private_data;
5452 char *buf;
5453
5454 if (trace_flags & (1 << index))
5455 buf = "1\n";
5456 else
5457 buf = "0\n";
5458
5459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5460}
5461
5462static ssize_t
5463trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5464 loff_t *ppos)
5465{
2b6080f2 5466 struct trace_array *tr = &global_trace;
a8259075 5467 long index = (long)filp->private_data;
a8259075
SR
5468 unsigned long val;
5469 int ret;
5470
22fe9b54
PH
5471 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5472 if (ret)
a8259075
SR
5473 return ret;
5474
f2d84b65 5475 if (val != 0 && val != 1)
a8259075 5476 return -EINVAL;
69d34da2
SRRH
5477
5478 mutex_lock(&trace_types_lock);
2b6080f2 5479 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 5480 mutex_unlock(&trace_types_lock);
a8259075 5481
613f04a0
SRRH
5482 if (ret < 0)
5483 return ret;
5484
a8259075
SR
5485 *ppos += cnt;
5486
5487 return cnt;
5488}
5489
a8259075
SR
5490static const struct file_operations trace_options_core_fops = {
5491 .open = tracing_open_generic,
5492 .read = trace_options_core_read,
5493 .write = trace_options_core_write,
b444786f 5494 .llseek = generic_file_llseek,
a8259075
SR
5495};
5496
5452af66 5497struct dentry *trace_create_file(const char *name,
f4ae40a6 5498 umode_t mode,
5452af66
FW
5499 struct dentry *parent,
5500 void *data,
5501 const struct file_operations *fops)
5502{
5503 struct dentry *ret;
5504
5505 ret = debugfs_create_file(name, mode, parent, data, fops);
5506 if (!ret)
5507 pr_warning("Could not create debugfs '%s' entry\n", name);
5508
5509 return ret;
5510}
5511
5512
2b6080f2 5513static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
5514{
5515 struct dentry *d_tracer;
a8259075 5516
2b6080f2
SR
5517 if (tr->options)
5518 return tr->options;
a8259075 5519
2b6080f2 5520 d_tracer = tracing_init_dentry_tr(tr);
a8259075
SR
5521 if (!d_tracer)
5522 return NULL;
5523
2b6080f2
SR
5524 tr->options = debugfs_create_dir("options", d_tracer);
5525 if (!tr->options) {
a8259075
SR
5526 pr_warning("Could not create debugfs directory 'options'\n");
5527 return NULL;
5528 }
5529
2b6080f2 5530 return tr->options;
a8259075
SR
5531}
5532
577b785f 5533static void
2b6080f2
SR
5534create_trace_option_file(struct trace_array *tr,
5535 struct trace_option_dentry *topt,
577b785f
SR
5536 struct tracer_flags *flags,
5537 struct tracer_opt *opt)
5538{
5539 struct dentry *t_options;
577b785f 5540
2b6080f2 5541 t_options = trace_options_init_dentry(tr);
577b785f
SR
5542 if (!t_options)
5543 return;
5544
5545 topt->flags = flags;
5546 topt->opt = opt;
2b6080f2 5547 topt->tr = tr;
577b785f 5548
5452af66 5549 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
5550 &trace_options_fops);
5551
577b785f
SR
5552}
5553
5554static struct trace_option_dentry *
2b6080f2 5555create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
5556{
5557 struct trace_option_dentry *topts;
5558 struct tracer_flags *flags;
5559 struct tracer_opt *opts;
5560 int cnt;
5561
5562 if (!tracer)
5563 return NULL;
5564
5565 flags = tracer->flags;
5566
5567 if (!flags || !flags->opts)
5568 return NULL;
5569
5570 opts = flags->opts;
5571
5572 for (cnt = 0; opts[cnt].name; cnt++)
5573 ;
5574
0cfe8245 5575 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
5576 if (!topts)
5577 return NULL;
5578
5579 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 5580 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
5581 &opts[cnt]);
5582
5583 return topts;
5584}
5585
5586static void
5587destroy_trace_option_files(struct trace_option_dentry *topts)
5588{
5589 int cnt;
5590
5591 if (!topts)
5592 return;
5593
5594 for (cnt = 0; topts[cnt].opt; cnt++) {
5595 if (topts[cnt].entry)
5596 debugfs_remove(topts[cnt].entry);
5597 }
5598
5599 kfree(topts);
5600}
5601
a8259075 5602static struct dentry *
2b6080f2
SR
5603create_trace_option_core_file(struct trace_array *tr,
5604 const char *option, long index)
a8259075
SR
5605{
5606 struct dentry *t_options;
a8259075 5607
2b6080f2 5608 t_options = trace_options_init_dentry(tr);
a8259075
SR
5609 if (!t_options)
5610 return NULL;
5611
5452af66 5612 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 5613 &trace_options_core_fops);
a8259075
SR
5614}
5615
2b6080f2 5616static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
5617{
5618 struct dentry *t_options;
a8259075
SR
5619 int i;
5620
2b6080f2 5621 t_options = trace_options_init_dentry(tr);
a8259075
SR
5622 if (!t_options)
5623 return;
5624
5452af66 5625 for (i = 0; trace_options[i]; i++)
2b6080f2 5626 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
5627}
5628
499e5470
SR
5629static ssize_t
5630rb_simple_read(struct file *filp, char __user *ubuf,
5631 size_t cnt, loff_t *ppos)
5632{
348f0fc2 5633 struct trace_array *tr = filp->private_data;
12883efb 5634 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
5635 char buf[64];
5636 int r;
5637
5638 if (buffer)
5639 r = ring_buffer_record_is_on(buffer);
5640 else
5641 r = 0;
5642
5643 r = sprintf(buf, "%d\n", r);
5644
5645 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5646}
5647
5648static ssize_t
5649rb_simple_write(struct file *filp, const char __user *ubuf,
5650 size_t cnt, loff_t *ppos)
5651{
348f0fc2 5652 struct trace_array *tr = filp->private_data;
12883efb 5653 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
5654 unsigned long val;
5655 int ret;
5656
5657 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5658 if (ret)
5659 return ret;
5660
5661 if (buffer) {
2df8f8a6
SR
5662 mutex_lock(&trace_types_lock);
5663 if (val) {
499e5470 5664 ring_buffer_record_on(buffer);
2b6080f2
SR
5665 if (tr->current_trace->start)
5666 tr->current_trace->start(tr);
2df8f8a6 5667 } else {
499e5470 5668 ring_buffer_record_off(buffer);
2b6080f2
SR
5669 if (tr->current_trace->stop)
5670 tr->current_trace->stop(tr);
2df8f8a6
SR
5671 }
5672 mutex_unlock(&trace_types_lock);
499e5470
SR
5673 }
5674
5675 (*ppos)++;
5676
5677 return cnt;
5678}
5679
5680static const struct file_operations rb_simple_fops = {
5681 .open = tracing_open_generic,
5682 .read = rb_simple_read,
5683 .write = rb_simple_write,
5684 .llseek = default_llseek,
5685};
5686
277ba044
SR
5687struct dentry *trace_instance_dir;
5688
5689static void
5690init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5691
737223fb
SRRH
5692static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5693{
5694 int cpu;
5695
5696 for_each_tracing_cpu(cpu) {
5697 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5698 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5699 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5700 }
5701}
5702
55034cd6
SRRH
5703static int
5704allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
5705{
5706 enum ring_buffer_flags rb_flags;
737223fb
SRRH
5707
5708 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5709
55034cd6
SRRH
5710 buf->buffer = ring_buffer_alloc(size, rb_flags);
5711 if (!buf->buffer)
5712 return -ENOMEM;
737223fb 5713
55034cd6
SRRH
5714 buf->data = alloc_percpu(struct trace_array_cpu);
5715 if (!buf->data) {
5716 ring_buffer_free(buf->buffer);
5717 return -ENOMEM;
5718 }
737223fb 5719
55034cd6 5720 init_trace_buffers(tr, buf);
737223fb
SRRH
5721
5722 /* Allocate the first page for all buffers */
5723 set_buffer_entries(&tr->trace_buffer,
5724 ring_buffer_size(tr->trace_buffer.buffer, 0));
5725
55034cd6
SRRH
5726 return 0;
5727}
737223fb 5728
55034cd6
SRRH
5729static int allocate_trace_buffers(struct trace_array *tr, int size)
5730{
5731 int ret;
737223fb 5732
55034cd6
SRRH
5733 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5734 if (ret)
5735 return ret;
737223fb 5736
55034cd6
SRRH
5737#ifdef CONFIG_TRACER_MAX_TRACE
5738 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5739 allocate_snapshot ? size : 1);
5740 if (WARN_ON(ret)) {
737223fb 5741 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
5742 free_percpu(tr->trace_buffer.data);
5743 return -ENOMEM;
5744 }
5745 tr->allocated_snapshot = allocate_snapshot;
737223fb 5746
55034cd6
SRRH
5747 /*
5748 * Only the top level trace array gets its snapshot allocated
5749 * from the kernel command line.
5750 */
5751 allocate_snapshot = false;
737223fb 5752#endif
55034cd6 5753 return 0;
737223fb
SRRH
5754}
5755
5756static int new_instance_create(const char *name)
5757{
277ba044
SR
5758 struct trace_array *tr;
5759 int ret;
277ba044
SR
5760
5761 mutex_lock(&trace_types_lock);
5762
5763 ret = -EEXIST;
5764 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5765 if (tr->name && strcmp(tr->name, name) == 0)
5766 goto out_unlock;
5767 }
5768
5769 ret = -ENOMEM;
5770 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5771 if (!tr)
5772 goto out_unlock;
5773
5774 tr->name = kstrdup(name, GFP_KERNEL);
5775 if (!tr->name)
5776 goto out_free_tr;
5777
5778 raw_spin_lock_init(&tr->start_lock);
5779
5780 tr->current_trace = &nop_trace;
5781
5782 INIT_LIST_HEAD(&tr->systems);
5783 INIT_LIST_HEAD(&tr->events);
5784
737223fb 5785 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
5786 goto out_free_tr;
5787
277ba044
SR
5788 /* Holder for file callbacks */
5789 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5790 tr->trace_cpu.tr = tr;
5791
5792 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5793 if (!tr->dir)
5794 goto out_free_tr;
5795
5796 ret = event_trace_add_tracer(tr->dir, tr);
5797 if (ret)
5798 goto out_free_tr;
5799
5800 init_tracer_debugfs(tr, tr->dir);
5801
5802 list_add(&tr->list, &ftrace_trace_arrays);
5803
5804 mutex_unlock(&trace_types_lock);
5805
5806 return 0;
5807
5808 out_free_tr:
12883efb
SRRH
5809 if (tr->trace_buffer.buffer)
5810 ring_buffer_free(tr->trace_buffer.buffer);
277ba044
SR
5811 kfree(tr->name);
5812 kfree(tr);
5813
5814 out_unlock:
5815 mutex_unlock(&trace_types_lock);
5816
5817 return ret;
5818
5819}
5820
0c8916c3
SR
5821static int instance_delete(const char *name)
5822{
5823 struct trace_array *tr;
5824 int found = 0;
5825 int ret;
5826
5827 mutex_lock(&trace_types_lock);
5828
5829 ret = -ENODEV;
5830 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5831 if (tr->name && strcmp(tr->name, name) == 0) {
5832 found = 1;
5833 break;
5834 }
5835 }
5836 if (!found)
5837 goto out_unlock;
5838
a695cb58
SRRH
5839 ret = -EBUSY;
5840 if (tr->ref)
5841 goto out_unlock;
5842
0c8916c3
SR
5843 list_del(&tr->list);
5844
5845 event_trace_del_tracer(tr);
5846 debugfs_remove_recursive(tr->dir);
12883efb
SRRH
5847 free_percpu(tr->trace_buffer.data);
5848 ring_buffer_free(tr->trace_buffer.buffer);
0c8916c3
SR
5849
5850 kfree(tr->name);
5851 kfree(tr);
5852
5853 ret = 0;
5854
5855 out_unlock:
5856 mutex_unlock(&trace_types_lock);
5857
5858 return ret;
5859}
5860
277ba044
SR
5861static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
5862{
5863 struct dentry *parent;
5864 int ret;
5865
5866 /* Paranoid: Make sure the parent is the "instances" directory */
5867 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5868 if (WARN_ON_ONCE(parent != trace_instance_dir))
5869 return -ENOENT;
5870
5871 /*
5872 * The inode mutex is locked, but debugfs_create_dir() will also
5873 * take the mutex. As the instances directory can not be destroyed
5874 * or changed in any other way, it is safe to unlock it, and
5875 * let the dentry try. If two users try to make the same dir at
5876 * the same time, then the new_instance_create() will determine the
5877 * winner.
5878 */
5879 mutex_unlock(&inode->i_mutex);
5880
5881 ret = new_instance_create(dentry->d_iname);
5882
5883 mutex_lock(&inode->i_mutex);
5884
5885 return ret;
5886}
5887
0c8916c3
SR
5888static int instance_rmdir(struct inode *inode, struct dentry *dentry)
5889{
5890 struct dentry *parent;
5891 int ret;
5892
5893 /* Paranoid: Make sure the parent is the "instances" directory */
5894 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5895 if (WARN_ON_ONCE(parent != trace_instance_dir))
5896 return -ENOENT;
5897
5898 /* The caller did a dget() on dentry */
5899 mutex_unlock(&dentry->d_inode->i_mutex);
5900
5901 /*
5902 * The inode mutex is locked, but debugfs_create_dir() will also
5903 * take the mutex. As the instances directory can not be destroyed
5904 * or changed in any other way, it is safe to unlock it, and
5905 * let the dentry try. If two users try to make the same dir at
5906 * the same time, then the instance_delete() will determine the
5907 * winner.
5908 */
5909 mutex_unlock(&inode->i_mutex);
5910
5911 ret = instance_delete(dentry->d_iname);
5912
5913 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
5914 mutex_lock(&dentry->d_inode->i_mutex);
5915
5916 return ret;
5917}
5918
277ba044
SR
5919static const struct inode_operations instance_dir_inode_operations = {
5920 .lookup = simple_lookup,
5921 .mkdir = instance_mkdir,
0c8916c3 5922 .rmdir = instance_rmdir,
277ba044
SR
5923};
5924
5925static __init void create_trace_instances(struct dentry *d_tracer)
5926{
5927 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
5928 if (WARN_ON(!trace_instance_dir))
5929 return;
5930
5931 /* Hijack the dir inode operations, to allow mkdir */
5932 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
5933}
5934
2b6080f2
SR
5935static void
5936init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5937{
121aaee7 5938 int cpu;
2b6080f2
SR
5939
5940 trace_create_file("trace_options", 0644, d_tracer,
5941 tr, &tracing_iter_fops);
5942
5943 trace_create_file("trace", 0644, d_tracer,
5944 (void *)&tr->trace_cpu, &tracing_fops);
5945
5946 trace_create_file("trace_pipe", 0444, d_tracer,
5947 (void *)&tr->trace_cpu, &tracing_pipe_fops);
5948
5949 trace_create_file("buffer_size_kb", 0644, d_tracer,
5950 (void *)&tr->trace_cpu, &tracing_entries_fops);
5951
5952 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
5953 tr, &tracing_total_entries_fops);
5954
238ae93d 5955 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
5956 tr, &tracing_free_buffer_fops);
5957
5958 trace_create_file("trace_marker", 0220, d_tracer,
5959 tr, &tracing_mark_fops);
5960
5961 trace_create_file("trace_clock", 0644, d_tracer, tr,
5962 &trace_clock_fops);
5963
5964 trace_create_file("tracing_on", 0644, d_tracer,
5965 tr, &rb_simple_fops);
ce9bae55
SRRH
5966
5967#ifdef CONFIG_TRACER_SNAPSHOT
5968 trace_create_file("snapshot", 0644, d_tracer,
5969 (void *)&tr->trace_cpu, &snapshot_fops);
5970#endif
121aaee7
SRRH
5971
5972 for_each_tracing_cpu(cpu)
5973 tracing_init_debugfs_percpu(tr, cpu);
5974
2b6080f2
SR
5975}
5976
b5ad384e 5977static __init int tracer_init_debugfs(void)
bc0c38d1
SR
5978{
5979 struct dentry *d_tracer;
bc0c38d1 5980
7e53bd42
LJ
5981 trace_access_lock_init();
5982
bc0c38d1 5983 d_tracer = tracing_init_dentry();
ed6f1c99
NK
5984 if (!d_tracer)
5985 return 0;
bc0c38d1 5986
2b6080f2 5987 init_tracer_debugfs(&global_trace, d_tracer);
bc0c38d1 5988
5452af66 5989 trace_create_file("tracing_cpumask", 0644, d_tracer,
2b6080f2 5990 &global_trace, &tracing_cpumask_fops);
a8259075 5991
5452af66
FW
5992 trace_create_file("available_tracers", 0444, d_tracer,
5993 &global_trace, &show_traces_fops);
5994
339ae5d3 5995 trace_create_file("current_tracer", 0644, d_tracer,
5452af66
FW
5996 &global_trace, &set_tracer_fops);
5997
5d4a9dba 5998#ifdef CONFIG_TRACER_MAX_TRACE
5452af66
FW
5999 trace_create_file("tracing_max_latency", 0644, d_tracer,
6000 &tracing_max_latency, &tracing_max_lat_fops);
0e950173 6001#endif
5452af66
FW
6002
6003 trace_create_file("tracing_thresh", 0644, d_tracer,
6004 &tracing_thresh, &tracing_max_lat_fops);
a8259075 6005
339ae5d3 6006 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6007 NULL, &tracing_readme_fops);
6008
69abe6a5
AP
6009 trace_create_file("saved_cmdlines", 0444, d_tracer,
6010 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6011
bc0c38d1 6012#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6013 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6014 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6015#endif
b04cc6b1 6016
277ba044 6017 create_trace_instances(d_tracer);
5452af66 6018
2b6080f2 6019 create_trace_options_dir(&global_trace);
b04cc6b1 6020
b5ad384e 6021 return 0;
bc0c38d1
SR
6022}
6023
3f5a54e3
SR
6024static int trace_panic_handler(struct notifier_block *this,
6025 unsigned long event, void *unused)
6026{
944ac425 6027 if (ftrace_dump_on_oops)
cecbca96 6028 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6029 return NOTIFY_OK;
6030}
6031
6032static struct notifier_block trace_panic_notifier = {
6033 .notifier_call = trace_panic_handler,
6034 .next = NULL,
6035 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6036};
6037
6038static int trace_die_handler(struct notifier_block *self,
6039 unsigned long val,
6040 void *data)
6041{
6042 switch (val) {
6043 case DIE_OOPS:
944ac425 6044 if (ftrace_dump_on_oops)
cecbca96 6045 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6046 break;
6047 default:
6048 break;
6049 }
6050 return NOTIFY_OK;
6051}
6052
6053static struct notifier_block trace_die_notifier = {
6054 .notifier_call = trace_die_handler,
6055 .priority = 200
6056};
6057
6058/*
6059 * printk is set to max of 1024, we really don't need it that big.
6060 * Nothing should be printing 1000 characters anyway.
6061 */
6062#define TRACE_MAX_PRINT 1000
6063
6064/*
6065 * Define here KERN_TRACE so that we have one place to modify
6066 * it if we decide to change what log level the ftrace dump
6067 * should be at.
6068 */
428aee14 6069#define KERN_TRACE KERN_EMERG
3f5a54e3 6070
955b61e5 6071void
3f5a54e3
SR
6072trace_printk_seq(struct trace_seq *s)
6073{
6074 /* Probably should print a warning here. */
bd6df187
J
6075 if (s->len >= TRACE_MAX_PRINT)
6076 s->len = TRACE_MAX_PRINT;
3f5a54e3
SR
6077
6078 /* should be zero ended, but we are paranoid. */
6079 s->buffer[s->len] = 0;
6080
6081 printk(KERN_TRACE "%s", s->buffer);
6082
f9520750 6083 trace_seq_init(s);
3f5a54e3
SR
6084}
6085
955b61e5
JW
6086void trace_init_global_iter(struct trace_iterator *iter)
6087{
6088 iter->tr = &global_trace;
2b6080f2 6089 iter->trace = iter->tr->current_trace;
ae3b5093 6090 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6091 iter->trace_buffer = &global_trace.trace_buffer;
955b61e5
JW
6092}
6093
7fe70b57 6094void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6095{
3f5a54e3
SR
6096 /* use static because iter can be a bit big for the stack */
6097 static struct trace_iterator iter;
7fe70b57 6098 static atomic_t dump_running;
cf586b61 6099 unsigned int old_userobj;
d769041f
SR
6100 unsigned long flags;
6101 int cnt = 0, cpu;
3f5a54e3 6102
7fe70b57
SRRH
6103 /* Only allow one dump user at a time. */
6104 if (atomic_inc_return(&dump_running) != 1) {
6105 atomic_dec(&dump_running);
6106 return;
6107 }
3f5a54e3 6108
7fe70b57
SRRH
6109 /*
6110 * Always turn off tracing when we dump.
6111 * We don't need to show trace output of what happens
6112 * between multiple crashes.
6113 *
6114 * If the user does a sysrq-z, then they can re-enable
6115 * tracing with echo 1 > tracing_on.
6116 */
0ee6b6cf 6117 tracing_off();
cf586b61 6118
7fe70b57 6119 local_irq_save(flags);
3f5a54e3 6120
38dbe0b1 6121 /* Simulate the iterator */
955b61e5
JW
6122 trace_init_global_iter(&iter);
6123
d769041f 6124 for_each_tracing_cpu(cpu) {
12883efb 6125 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6126 }
6127
cf586b61
FW
6128 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6129
b54d3de9
TE
6130 /* don't look at user memory in panic mode */
6131 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6132
cecbca96
FW
6133 switch (oops_dump_mode) {
6134 case DUMP_ALL:
ae3b5093 6135 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6136 break;
6137 case DUMP_ORIG:
6138 iter.cpu_file = raw_smp_processor_id();
6139 break;
6140 case DUMP_NONE:
6141 goto out_enable;
6142 default:
6143 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6144 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6145 }
6146
6147 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 6148
7fe70b57
SRRH
6149 /* Did function tracer already get disabled? */
6150 if (ftrace_is_dead()) {
6151 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6152 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6153 }
6154
3f5a54e3
SR
6155 /*
6156 * We need to stop all tracing on all CPUS to read the
6157 * the next buffer. This is a bit expensive, but is
6158 * not done often. We fill all what we can read,
6159 * and then release the locks again.
6160 */
6161
3f5a54e3
SR
6162 while (!trace_empty(&iter)) {
6163
6164 if (!cnt)
6165 printk(KERN_TRACE "---------------------------------\n");
6166
6167 cnt++;
6168
6169 /* reset all but tr, trace, and overruns */
6170 memset(&iter.seq, 0,
6171 sizeof(struct trace_iterator) -
6172 offsetof(struct trace_iterator, seq));
6173 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6174 iter.pos = -1;
6175
955b61e5 6176 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
6177 int ret;
6178
6179 ret = print_trace_line(&iter);
6180 if (ret != TRACE_TYPE_NO_CONSUME)
6181 trace_consume(&iter);
3f5a54e3 6182 }
b892e5c8 6183 touch_nmi_watchdog();
3f5a54e3
SR
6184
6185 trace_printk_seq(&iter.seq);
6186 }
6187
6188 if (!cnt)
6189 printk(KERN_TRACE " (ftrace buffer empty)\n");
6190 else
6191 printk(KERN_TRACE "---------------------------------\n");
6192
cecbca96 6193 out_enable:
7fe70b57 6194 trace_flags |= old_userobj;
cf586b61 6195
7fe70b57
SRRH
6196 for_each_tracing_cpu(cpu) {
6197 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 6198 }
7fe70b57 6199 atomic_dec(&dump_running);
cd891ae0 6200 local_irq_restore(flags);
3f5a54e3 6201}
a8eecf22 6202EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 6203
3928a8a2 6204__init static int tracer_alloc_buffers(void)
bc0c38d1 6205{
73c5162a 6206 int ring_buf_size;
9e01c1b7 6207 int ret = -ENOMEM;
4c11d7ae 6208
750912fa 6209
9e01c1b7
RR
6210 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6211 goto out;
6212
6213 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6214 goto out_free_buffer_mask;
4c11d7ae 6215
07d777fe
SR
6216 /* Only allocate trace_printk buffers if a trace_printk exists */
6217 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 6218 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
6219 trace_printk_init_buffers();
6220
73c5162a
SR
6221 /* To save memory, keep the ring buffer size to its minimum */
6222 if (ring_buffer_expanded)
6223 ring_buf_size = trace_buf_size;
6224 else
6225 ring_buf_size = 1;
6226
9e01c1b7
RR
6227 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6228 cpumask_copy(tracing_cpumask, cpu_all_mask);
6229
2b6080f2
SR
6230 raw_spin_lock_init(&global_trace.start_lock);
6231
9e01c1b7 6232 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 6233 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
6234 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6235 WARN_ON(1);
9e01c1b7 6236 goto out_free_cpumask;
4c11d7ae 6237 }
a7603ff4 6238
499e5470
SR
6239 if (global_trace.buffer_disabled)
6240 tracing_off();
4c11d7ae 6241
bc0c38d1
SR
6242 trace_init_cmdlines();
6243
ca164318
SRRH
6244 /*
6245 * register_tracer() might reference current_trace, so it
6246 * needs to be set before we register anything. This is
6247 * just a bootstrap of current_trace anyway.
6248 */
2b6080f2
SR
6249 global_trace.current_trace = &nop_trace;
6250
ca164318
SRRH
6251 register_tracer(&nop_trace);
6252
60a11774
SR
6253 /* All seems OK, enable tracing */
6254 tracing_disabled = 0;
3928a8a2 6255
3f5a54e3
SR
6256 atomic_notifier_chain_register(&panic_notifier_list,
6257 &trace_panic_notifier);
6258
6259 register_die_notifier(&trace_die_notifier);
2fc1dfbe 6260
ae63b31e
SR
6261 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6262
2b6080f2
SR
6263 /* Holder for file callbacks */
6264 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6265 global_trace.trace_cpu.tr = &global_trace;
6266
ae63b31e
SR
6267 INIT_LIST_HEAD(&global_trace.systems);
6268 INIT_LIST_HEAD(&global_trace.events);
6269 list_add(&global_trace.list, &ftrace_trace_arrays);
6270
7bcfaf54
SR
6271 while (trace_boot_options) {
6272 char *option;
6273
6274 option = strsep(&trace_boot_options, ",");
2b6080f2 6275 trace_set_options(&global_trace, option);
7bcfaf54
SR
6276 }
6277
77fd5c15
SRRH
6278 register_snapshot_cmd();
6279
2fc1dfbe 6280 return 0;
3f5a54e3 6281
9e01c1b7 6282out_free_cpumask:
12883efb
SRRH
6283 free_percpu(global_trace.trace_buffer.data);
6284#ifdef CONFIG_TRACER_MAX_TRACE
6285 free_percpu(global_trace.max_buffer.data);
6286#endif
9e01c1b7
RR
6287 free_cpumask_var(tracing_cpumask);
6288out_free_buffer_mask:
6289 free_cpumask_var(tracing_buffer_mask);
6290out:
6291 return ret;
bc0c38d1 6292}
b2821ae6
SR
6293
6294__init static int clear_boot_tracer(void)
6295{
6296 /*
6297 * The default tracer at boot buffer is an init section.
6298 * This function is called in lateinit. If we did not
6299 * find the boot tracer, then clear it out, to prevent
6300 * later registration from accessing the buffer that is
6301 * about to be freed.
6302 */
6303 if (!default_bootup_tracer)
6304 return 0;
6305
6306 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6307 default_bootup_tracer);
6308 default_bootup_tracer = NULL;
6309
6310 return 0;
6311}
6312
b5ad384e
FW
6313early_initcall(tracer_alloc_buffers);
6314fs_initcall(tracer_init_debugfs);
b2821ae6 6315late_initcall(clear_boot_tracer);