]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/trace/trace.c
tracing, x86: add low level support for ftrace return tracing
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
3f5a54e3 17#include <linux/notifier.h>
bc0c38d1 18#include <linux/debugfs.h>
4c11d7ae 19#include <linux/pagemap.h>
bc0c38d1
SR
20#include <linux/hardirq.h>
21#include <linux/linkage.h>
22#include <linux/uaccess.h>
23#include <linux/ftrace.h>
24#include <linux/module.h>
25#include <linux/percpu.h>
3f5a54e3 26#include <linux/kdebug.h>
bc0c38d1
SR
27#include <linux/ctype.h>
28#include <linux/init.h>
2a2cc8f7 29#include <linux/poll.h>
bc0c38d1
SR
30#include <linux/gfp.h>
31#include <linux/fs.h>
76094a2c 32#include <linux/kprobes.h>
3eefae99 33#include <linux/writeback.h>
bc0c38d1 34
86387f7e 35#include <linux/stacktrace.h>
3928a8a2 36#include <linux/ring_buffer.h>
21798a84 37#include <linux/irqflags.h>
86387f7e 38
bc0c38d1
SR
39#include "trace.h"
40
3928a8a2
SR
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42
bc0c38d1
SR
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh;
45
0f048701
SR
46
47/*
48 * Kill all tracing for good (never come back).
49 * It is initialized to 1 but will turn to zero if the initialization
50 * of the tracer is successful. But that is the only place that sets
51 * this back to zero.
52 */
53int tracing_disabled = 1;
54
d769041f
SR
55static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
56
57static inline void ftrace_disable_cpu(void)
58{
59 preempt_disable();
60 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
61}
62
63static inline void ftrace_enable_cpu(void)
64{
65 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
66 preempt_enable();
67}
68
ab46428c
SR
69static cpumask_t __read_mostly tracing_buffer_mask;
70
71#define for_each_tracing_cpu(cpu) \
72 for_each_cpu_mask(cpu, tracing_buffer_mask)
73
944ac425
SR
74/*
75 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
76 *
77 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
78 * is set, then ftrace_dump is called. This will output the contents
79 * of the ftrace buffers to the console. This is very useful for
80 * capturing traces that lead to crashes and outputing it to a
81 * serial console.
82 *
83 * It is default off, but you can enable it with either specifying
84 * "ftrace_dump_on_oops" in the kernel command line, or setting
85 * /proc/sys/kernel/ftrace_dump_on_oops to true.
86 */
87int ftrace_dump_on_oops;
88
d9e54076
PZ
89static int tracing_set_tracer(char *buf);
90
91static int __init set_ftrace(char *str)
92{
93 tracing_set_tracer(str);
94 return 1;
95}
96__setup("ftrace", set_ftrace);
97
944ac425
SR
98static int __init set_ftrace_dump_on_oops(char *str)
99{
100 ftrace_dump_on_oops = 1;
101 return 1;
102}
103__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
104
72829bc3 105long
bc0c38d1
SR
106ns2usecs(cycle_t nsec)
107{
108 nsec += 500;
109 do_div(nsec, 1000);
110 return nsec;
111}
112
e309b41d 113cycle_t ftrace_now(int cpu)
750ed1a4 114{
3928a8a2
SR
115 u64 ts = ring_buffer_time_stamp(cpu);
116 ring_buffer_normalize_time_stamp(cpu, &ts);
117 return ts;
750ed1a4
IM
118}
119
4fcdae83
SR
120/*
121 * The global_trace is the descriptor that holds the tracing
122 * buffers for the live tracing. For each CPU, it contains
123 * a link list of pages that will store trace entries. The
124 * page descriptor of the pages in the memory is used to hold
125 * the link list by linking the lru item in the page descriptor
126 * to each of the pages in the buffer per CPU.
127 *
128 * For each active CPU there is a data field that holds the
129 * pages for the buffer for that CPU. Each CPU has the same number
130 * of pages allocated for its buffer.
131 */
bc0c38d1
SR
132static struct trace_array global_trace;
133
134static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
135
4fcdae83
SR
136/*
137 * The max_tr is used to snapshot the global_trace when a maximum
138 * latency is reached. Some tracers will use this to store a maximum
139 * trace while it continues examining live traces.
140 *
141 * The buffers for the max_tr are set up the same as the global_trace.
142 * When a snapshot is taken, the link list of the max_tr is swapped
143 * with the link list of the global_trace and the buffers are reset for
144 * the global_trace so the tracing can continue.
145 */
bc0c38d1
SR
146static struct trace_array max_tr;
147
148static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
149
4fcdae83 150/* tracer_enabled is used to toggle activation of a tracer */
26994ead 151static int tracer_enabled = 1;
4fcdae83 152
9036990d
SR
153/**
154 * tracing_is_enabled - return tracer_enabled status
155 *
156 * This function is used by other tracers to know the status
157 * of the tracer_enabled flag. Tracers may use this function
158 * to know if it should enable their features when starting
159 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
160 */
161int tracing_is_enabled(void)
162{
163 return tracer_enabled;
164}
165
60bc0800
SR
166/* function tracing enabled */
167int ftrace_function_enabled;
168
4fcdae83 169/*
3928a8a2
SR
170 * trace_buf_size is the size in bytes that is allocated
171 * for a buffer. Note, the number of bytes is always rounded
172 * to page size.
3f5a54e3
SR
173 *
174 * This number is purposely set to a low number of 16384.
175 * If the dump on oops happens, it will be much appreciated
176 * to not have to wait for all that output. Anyway this can be
177 * boot time and run time configurable.
4fcdae83 178 */
3928a8a2 179#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 180
3928a8a2 181static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 182
4fcdae83 183/* trace_types holds a link list of available tracers. */
bc0c38d1 184static struct tracer *trace_types __read_mostly;
4fcdae83
SR
185
186/* current_trace points to the tracer that is currently active */
bc0c38d1 187static struct tracer *current_trace __read_mostly;
4fcdae83
SR
188
189/*
190 * max_tracer_type_len is used to simplify the allocating of
191 * buffers to read userspace tracer names. We keep track of
192 * the longest tracer name registered.
193 */
bc0c38d1
SR
194static int max_tracer_type_len;
195
4fcdae83
SR
196/*
197 * trace_types_lock is used to protect the trace_types list.
198 * This lock is also used to keep user access serialized.
199 * Accesses from userspace will grab this lock while userspace
200 * activities happen inside the kernel.
201 */
bc0c38d1 202static DEFINE_MUTEX(trace_types_lock);
4fcdae83
SR
203
204/* trace_wait is a waitqueue for tasks blocked on trace_poll */
4e655519
IM
205static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
206
4fcdae83 207/* trace_flags holds iter_ctrl options */
49833fc2 208unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK;
4e655519 209
4fcdae83
SR
210/**
211 * trace_wake_up - wake up tasks waiting for trace input
212 *
213 * Simply wakes up any task that is blocked on the trace_wait
214 * queue. These is used with trace_poll for tasks polling the trace.
215 */
4e655519
IM
216void trace_wake_up(void)
217{
017730c1
IM
218 /*
219 * The runqueue_is_locked() can fail, but this is the best we
220 * have for now:
221 */
222 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
4e655519
IM
223 wake_up(&trace_wait);
224}
bc0c38d1 225
3928a8a2 226static int __init set_buf_size(char *str)
bc0c38d1 227{
3928a8a2 228 unsigned long buf_size;
c6caeeb1
SR
229 int ret;
230
bc0c38d1
SR
231 if (!str)
232 return 0;
3928a8a2 233 ret = strict_strtoul(str, 0, &buf_size);
c6caeeb1 234 /* nr_entries can not be zero */
3928a8a2 235 if (ret < 0 || buf_size == 0)
c6caeeb1 236 return 0;
3928a8a2 237 trace_buf_size = buf_size;
bc0c38d1
SR
238 return 1;
239}
3928a8a2 240__setup("trace_buf_size=", set_buf_size);
bc0c38d1 241
57f50be1
SR
242unsigned long nsecs_to_usecs(unsigned long nsecs)
243{
244 return nsecs / 1000;
245}
246
4fcdae83
SR
247/*
248 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
249 * control the output of kernel symbols.
250 */
bc0c38d1
SR
251#define TRACE_ITER_SYM_MASK \
252 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
253
4fcdae83 254/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
255static const char *trace_options[] = {
256 "print-parent",
257 "sym-offset",
258 "sym-addr",
259 "verbose",
f9896bf3 260 "raw",
5e3ca0ec 261 "hex",
cb0f12aa 262 "bin",
2a2cc8f7 263 "block",
86387f7e 264 "stacktrace",
4ac3ba41 265 "sched-tree",
f09ce573 266 "ftrace_printk",
b2a866f9 267 "ftrace_preempt",
bc0c38d1
SR
268 NULL
269};
270
4fcdae83
SR
271/*
272 * ftrace_max_lock is used to protect the swapping of buffers
273 * when taking a max snapshot. The buffers themselves are
274 * protected by per_cpu spinlocks. But the action of the swap
275 * needs its own lock.
276 *
277 * This is defined as a raw_spinlock_t in order to help
278 * with performance when lockdep debugging is enabled.
279 */
92205c23
SR
280static raw_spinlock_t ftrace_max_lock =
281 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
bc0c38d1
SR
282
283/*
284 * Copy the new maximum trace into the separate maximum-trace
285 * structure. (this way the maximum trace is permanently saved,
286 * for later retrieval via /debugfs/tracing/latency_trace)
287 */
e309b41d 288static void
bc0c38d1
SR
289__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
290{
291 struct trace_array_cpu *data = tr->data[cpu];
292
293 max_tr.cpu = cpu;
294 max_tr.time_start = data->preempt_timestamp;
295
296 data = max_tr.data[cpu];
297 data->saved_latency = tracing_max_latency;
298
299 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
300 data->pid = tsk->pid;
301 data->uid = tsk->uid;
302 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
303 data->policy = tsk->policy;
304 data->rt_priority = tsk->rt_priority;
305
306 /* record this tasks comm */
307 tracing_record_cmdline(current);
308}
309
4fcdae83
SR
310/**
311 * trace_seq_printf - sequence printing of trace information
312 * @s: trace sequence descriptor
313 * @fmt: printf format string
314 *
315 * The tracer may use either sequence operations or its own
316 * copy to user routines. To simplify formating of a trace
317 * trace_seq_printf is used to store strings into a special
318 * buffer (@s). Then the output may be either used by
319 * the sequencer or pulled into another buffer.
320 */
72829bc3 321int
214023c3
SR
322trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
323{
324 int len = (PAGE_SIZE - 1) - s->len;
325 va_list ap;
b3806b43 326 int ret;
214023c3
SR
327
328 if (!len)
329 return 0;
330
331 va_start(ap, fmt);
b3806b43 332 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
214023c3
SR
333 va_end(ap);
334
b3806b43 335 /* If we can't write it all, don't bother writing anything */
72829bc3 336 if (ret >= len)
b3806b43
SR
337 return 0;
338
339 s->len += ret;
214023c3
SR
340
341 return len;
342}
343
4fcdae83
SR
344/**
345 * trace_seq_puts - trace sequence printing of simple string
346 * @s: trace sequence descriptor
347 * @str: simple string to record
348 *
349 * The tracer may use either the sequence operations or its own
350 * copy to user routines. This function records a simple string
351 * into a special buffer (@s) for later retrieval by a sequencer
352 * or other mechanism.
353 */
e309b41d 354static int
214023c3
SR
355trace_seq_puts(struct trace_seq *s, const char *str)
356{
357 int len = strlen(str);
358
359 if (len > ((PAGE_SIZE - 1) - s->len))
b3806b43 360 return 0;
214023c3
SR
361
362 memcpy(s->buffer + s->len, str, len);
363 s->len += len;
364
365 return len;
366}
367
e309b41d 368static int
214023c3
SR
369trace_seq_putc(struct trace_seq *s, unsigned char c)
370{
371 if (s->len >= (PAGE_SIZE - 1))
372 return 0;
373
374 s->buffer[s->len++] = c;
375
376 return 1;
377}
378
e309b41d 379static int
cb0f12aa
IM
380trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
381{
382 if (len > ((PAGE_SIZE - 1) - s->len))
383 return 0;
384
385 memcpy(s->buffer + s->len, mem, len);
386 s->len += len;
387
388 return len;
389}
390
ad0a3b68
HH
391#define MAX_MEMHEX_BYTES 8
392#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
5e3ca0ec 393
e309b41d 394static int
5e3ca0ec
IM
395trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
396{
397 unsigned char hex[HEX_CHARS];
93dcc6ea 398 unsigned char *data = mem;
5e3ca0ec
IM
399 int i, j;
400
5e3ca0ec
IM
401#ifdef __BIG_ENDIAN
402 for (i = 0, j = 0; i < len; i++) {
403#else
404 for (i = len-1, j = 0; i >= 0; i--) {
405#endif
2fbc4749
HH
406 hex[j++] = hex_asc_hi(data[i]);
407 hex[j++] = hex_asc_lo(data[i]);
5e3ca0ec 408 }
93dcc6ea 409 hex[j++] = ' ';
5e3ca0ec
IM
410
411 return trace_seq_putmem(s, hex, j);
412}
413
e309b41d 414static void
214023c3
SR
415trace_seq_reset(struct trace_seq *s)
416{
417 s->len = 0;
6c6c2796
PP
418 s->readpos = 0;
419}
420
421ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
422{
423 int len;
424 int ret;
425
426 if (s->len <= s->readpos)
427 return -EBUSY;
428
429 len = s->len - s->readpos;
430 if (cnt > len)
431 cnt = len;
432 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
433 if (ret)
434 return -EFAULT;
435
436 s->readpos += len;
437 return cnt;
214023c3
SR
438}
439
e309b41d 440static void
214023c3
SR
441trace_print_seq(struct seq_file *m, struct trace_seq *s)
442{
443 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
444
445 s->buffer[len] = 0;
446 seq_puts(m, s->buffer);
447
448 trace_seq_reset(s);
449}
450
4fcdae83
SR
451/**
452 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
453 * @tr: tracer
454 * @tsk: the task with the latency
455 * @cpu: The cpu that initiated the trace.
456 *
457 * Flip the buffers between the @tr and the max_tr and record information
458 * about which task was the cause of this latency.
459 */
e309b41d 460void
bc0c38d1
SR
461update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
462{
3928a8a2 463 struct ring_buffer *buf = tr->buffer;
bc0c38d1 464
4c11d7ae 465 WARN_ON_ONCE(!irqs_disabled());
92205c23 466 __raw_spin_lock(&ftrace_max_lock);
3928a8a2
SR
467
468 tr->buffer = max_tr.buffer;
469 max_tr.buffer = buf;
470
d769041f 471 ftrace_disable_cpu();
3928a8a2 472 ring_buffer_reset(tr->buffer);
d769041f 473 ftrace_enable_cpu();
bc0c38d1
SR
474
475 __update_max_tr(tr, tsk, cpu);
92205c23 476 __raw_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
477}
478
479/**
480 * update_max_tr_single - only copy one trace over, and reset the rest
481 * @tr - tracer
482 * @tsk - task with the latency
483 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
484 *
485 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 486 */
e309b41d 487void
bc0c38d1
SR
488update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
489{
3928a8a2 490 int ret;
bc0c38d1 491
4c11d7ae 492 WARN_ON_ONCE(!irqs_disabled());
92205c23 493 __raw_spin_lock(&ftrace_max_lock);
bc0c38d1 494
d769041f
SR
495 ftrace_disable_cpu();
496
3928a8a2
SR
497 ring_buffer_reset(max_tr.buffer);
498 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
499
d769041f
SR
500 ftrace_enable_cpu();
501
3928a8a2 502 WARN_ON_ONCE(ret);
bc0c38d1
SR
503
504 __update_max_tr(tr, tsk, cpu);
92205c23 505 __raw_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
506}
507
4fcdae83
SR
508/**
509 * register_tracer - register a tracer with the ftrace system.
510 * @type - the plugin for the tracer
511 *
512 * Register a new plugin tracer.
513 */
bc0c38d1
SR
514int register_tracer(struct tracer *type)
515{
516 struct tracer *t;
517 int len;
518 int ret = 0;
519
520 if (!type->name) {
521 pr_info("Tracer must have a name\n");
522 return -1;
523 }
524
525 mutex_lock(&trace_types_lock);
526 for (t = trace_types; t; t = t->next) {
527 if (strcmp(type->name, t->name) == 0) {
528 /* already found */
529 pr_info("Trace %s already registered\n",
530 type->name);
531 ret = -1;
532 goto out;
533 }
534 }
535
60a11774
SR
536#ifdef CONFIG_FTRACE_STARTUP_TEST
537 if (type->selftest) {
538 struct tracer *saved_tracer = current_trace;
60a11774 539 struct trace_array *tr = &global_trace;
60a11774
SR
540 int i;
541 /*
542 * Run a selftest on this tracer.
543 * Here we reset the trace buffer, and set the current
544 * tracer to be this tracer. The tracer can then run some
545 * internal tracing to verify that everything is in order.
546 * If we fail, we do not register this tracer.
547 */
ab46428c 548 for_each_tracing_cpu(i) {
3928a8a2 549 tracing_reset(tr, i);
60a11774
SR
550 }
551 current_trace = type;
60a11774
SR
552 /* the test is responsible for initializing and enabling */
553 pr_info("Testing tracer %s: ", type->name);
554 ret = type->selftest(type, tr);
555 /* the test is responsible for resetting too */
556 current_trace = saved_tracer;
60a11774
SR
557 if (ret) {
558 printk(KERN_CONT "FAILED!\n");
559 goto out;
560 }
1d4db00a 561 /* Only reset on passing, to avoid touching corrupted buffers */
ab46428c 562 for_each_tracing_cpu(i) {
3928a8a2 563 tracing_reset(tr, i);
1d4db00a 564 }
60a11774
SR
565 printk(KERN_CONT "PASSED\n");
566 }
567#endif
568
bc0c38d1
SR
569 type->next = trace_types;
570 trace_types = type;
571 len = strlen(type->name);
572 if (len > max_tracer_type_len)
573 max_tracer_type_len = len;
60a11774 574
bc0c38d1
SR
575 out:
576 mutex_unlock(&trace_types_lock);
577
578 return ret;
579}
580
581void unregister_tracer(struct tracer *type)
582{
583 struct tracer **t;
584 int len;
585
586 mutex_lock(&trace_types_lock);
587 for (t = &trace_types; *t; t = &(*t)->next) {
588 if (*t == type)
589 goto found;
590 }
591 pr_info("Trace %s not registered\n", type->name);
592 goto out;
593
594 found:
595 *t = (*t)->next;
596 if (strlen(type->name) != max_tracer_type_len)
597 goto out;
598
599 max_tracer_type_len = 0;
600 for (t = &trace_types; *t; t = &(*t)->next) {
601 len = strlen((*t)->name);
602 if (len > max_tracer_type_len)
603 max_tracer_type_len = len;
604 }
605 out:
606 mutex_unlock(&trace_types_lock);
607}
608
3928a8a2 609void tracing_reset(struct trace_array *tr, int cpu)
bc0c38d1 610{
d769041f 611 ftrace_disable_cpu();
3928a8a2 612 ring_buffer_reset_cpu(tr->buffer, cpu);
d769041f 613 ftrace_enable_cpu();
bc0c38d1
SR
614}
615
bc0c38d1
SR
616#define SAVED_CMDLINES 128
617static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
618static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
619static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
620static int cmdline_idx;
621static DEFINE_SPINLOCK(trace_cmdline_lock);
25b0b44a 622
25b0b44a
SR
623/* temporary disable recording */
624atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
625
626static void trace_init_cmdlines(void)
627{
628 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
629 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
630 cmdline_idx = 0;
631}
632
0f048701
SR
633static int trace_stop_count;
634static DEFINE_SPINLOCK(tracing_start_lock);
635
636/**
637 * tracing_start - quick start of the tracer
638 *
639 * If tracing is enabled but was stopped by tracing_stop,
640 * this will start the tracer back up.
641 */
642void tracing_start(void)
643{
644 struct ring_buffer *buffer;
645 unsigned long flags;
646
647 if (tracing_disabled)
648 return;
649
650 spin_lock_irqsave(&tracing_start_lock, flags);
651 if (--trace_stop_count)
652 goto out;
653
654 if (trace_stop_count < 0) {
655 /* Someone screwed up their debugging */
656 WARN_ON_ONCE(1);
657 trace_stop_count = 0;
658 goto out;
659 }
660
661
662 buffer = global_trace.buffer;
663 if (buffer)
664 ring_buffer_record_enable(buffer);
665
666 buffer = max_tr.buffer;
667 if (buffer)
668 ring_buffer_record_enable(buffer);
669
670 ftrace_start();
671 out:
672 spin_unlock_irqrestore(&tracing_start_lock, flags);
673}
674
675/**
676 * tracing_stop - quick stop of the tracer
677 *
678 * Light weight way to stop tracing. Use in conjunction with
679 * tracing_start.
680 */
681void tracing_stop(void)
682{
683 struct ring_buffer *buffer;
684 unsigned long flags;
685
686 ftrace_stop();
687 spin_lock_irqsave(&tracing_start_lock, flags);
688 if (trace_stop_count++)
689 goto out;
690
691 buffer = global_trace.buffer;
692 if (buffer)
693 ring_buffer_record_disable(buffer);
694
695 buffer = max_tr.buffer;
696 if (buffer)
697 ring_buffer_record_disable(buffer);
698
699 out:
700 spin_unlock_irqrestore(&tracing_start_lock, flags);
701}
702
e309b41d 703void trace_stop_cmdline_recording(void);
bc0c38d1 704
e309b41d 705static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1
SR
706{
707 unsigned map;
708 unsigned idx;
709
710 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
711 return;
712
713 /*
714 * It's not the end of the world if we don't get
715 * the lock, but we also don't want to spin
716 * nor do we want to disable interrupts,
717 * so if we miss here, then better luck next time.
718 */
719 if (!spin_trylock(&trace_cmdline_lock))
720 return;
721
722 idx = map_pid_to_cmdline[tsk->pid];
723 if (idx >= SAVED_CMDLINES) {
724 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
725
726 map = map_cmdline_to_pid[idx];
727 if (map <= PID_MAX_DEFAULT)
728 map_pid_to_cmdline[map] = (unsigned)-1;
729
730 map_pid_to_cmdline[tsk->pid] = idx;
731
732 cmdline_idx = idx;
733 }
734
735 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
736
737 spin_unlock(&trace_cmdline_lock);
738}
739
e309b41d 740static char *trace_find_cmdline(int pid)
bc0c38d1
SR
741{
742 char *cmdline = "<...>";
743 unsigned map;
744
745 if (!pid)
746 return "<idle>";
747
748 if (pid > PID_MAX_DEFAULT)
749 goto out;
750
751 map = map_pid_to_cmdline[pid];
752 if (map >= SAVED_CMDLINES)
753 goto out;
754
755 cmdline = saved_cmdlines[map];
756
757 out:
758 return cmdline;
759}
760
e309b41d 761void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1
SR
762{
763 if (atomic_read(&trace_record_cmdline_disabled))
764 return;
765
766 trace_save_cmdline(tsk);
767}
768
45dcd8b8 769void
38697053
SR
770tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
771 int pc)
bc0c38d1
SR
772{
773 struct task_struct *tsk = current;
bc0c38d1 774
777e208d
SR
775 entry->preempt_count = pc & 0xff;
776 entry->pid = (tsk) ? tsk->pid : 0;
777 entry->flags =
9244489a 778#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 779 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
780#else
781 TRACE_FLAG_IRQS_NOSUPPORT |
782#endif
bc0c38d1
SR
783 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
784 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
785 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
786}
787
e309b41d 788void
6fb44b71 789trace_function(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
790 unsigned long ip, unsigned long parent_ip, unsigned long flags,
791 int pc)
bc0c38d1 792{
3928a8a2 793 struct ring_buffer_event *event;
777e208d 794 struct ftrace_entry *entry;
dcb6308f 795 unsigned long irq_flags;
bc0c38d1 796
d769041f
SR
797 /* If we are reading the ring buffer, don't trace */
798 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
799 return;
800
3928a8a2
SR
801 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
802 &irq_flags);
803 if (!event)
804 return;
805 entry = ring_buffer_event_data(event);
38697053 806 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
807 entry->ent.type = TRACE_FN;
808 entry->ip = ip;
809 entry->parent_ip = parent_ip;
3928a8a2 810 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
bc0c38d1
SR
811}
812
e309b41d 813void
2e0f5761 814ftrace(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
815 unsigned long ip, unsigned long parent_ip, unsigned long flags,
816 int pc)
2e0f5761
IM
817{
818 if (likely(!atomic_read(&data->disabled)))
38697053 819 trace_function(tr, data, ip, parent_ip, flags, pc);
2e0f5761
IM
820}
821
38697053
SR
822static void ftrace_trace_stack(struct trace_array *tr,
823 struct trace_array_cpu *data,
824 unsigned long flags,
825 int skip, int pc)
86387f7e 826{
c2c80529 827#ifdef CONFIG_STACKTRACE
3928a8a2 828 struct ring_buffer_event *event;
777e208d 829 struct stack_entry *entry;
86387f7e 830 struct stack_trace trace;
3928a8a2 831 unsigned long irq_flags;
86387f7e
IM
832
833 if (!(trace_flags & TRACE_ITER_STACKTRACE))
834 return;
835
3928a8a2
SR
836 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
837 &irq_flags);
838 if (!event)
839 return;
840 entry = ring_buffer_event_data(event);
38697053 841 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d 842 entry->ent.type = TRACE_STACK;
86387f7e 843
777e208d 844 memset(&entry->caller, 0, sizeof(entry->caller));
86387f7e
IM
845
846 trace.nr_entries = 0;
847 trace.max_entries = FTRACE_STACK_ENTRIES;
848 trace.skip = skip;
777e208d 849 trace.entries = entry->caller;
86387f7e
IM
850
851 save_stack_trace(&trace);
3928a8a2 852 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
c2c80529 853#endif
f0a920d5
IM
854}
855
38697053
SR
856void __trace_stack(struct trace_array *tr,
857 struct trace_array_cpu *data,
858 unsigned long flags,
859 int skip)
860{
861 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
862}
863
864static void
865ftrace_trace_special(void *__tr, void *__data,
866 unsigned long arg1, unsigned long arg2, unsigned long arg3,
867 int pc)
a4feb834 868{
3928a8a2 869 struct ring_buffer_event *event;
a4feb834
IM
870 struct trace_array_cpu *data = __data;
871 struct trace_array *tr = __tr;
777e208d 872 struct special_entry *entry;
a4feb834
IM
873 unsigned long irq_flags;
874
3928a8a2
SR
875 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
876 &irq_flags);
877 if (!event)
878 return;
879 entry = ring_buffer_event_data(event);
38697053 880 tracing_generic_entry_update(&entry->ent, 0, pc);
777e208d
SR
881 entry->ent.type = TRACE_SPECIAL;
882 entry->arg1 = arg1;
883 entry->arg2 = arg2;
884 entry->arg3 = arg3;
3928a8a2 885 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 886 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
a4feb834
IM
887
888 trace_wake_up();
889}
890
38697053
SR
891void
892__trace_special(void *__tr, void *__data,
893 unsigned long arg1, unsigned long arg2, unsigned long arg3)
894{
895 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
896}
897
e309b41d 898void
bc0c38d1
SR
899tracing_sched_switch_trace(struct trace_array *tr,
900 struct trace_array_cpu *data,
86387f7e
IM
901 struct task_struct *prev,
902 struct task_struct *next,
38697053 903 unsigned long flags, int pc)
bc0c38d1 904{
3928a8a2 905 struct ring_buffer_event *event;
777e208d 906 struct ctx_switch_entry *entry;
dcb6308f 907 unsigned long irq_flags;
bc0c38d1 908
3928a8a2
SR
909 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
910 &irq_flags);
911 if (!event)
912 return;
913 entry = ring_buffer_event_data(event);
38697053 914 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
915 entry->ent.type = TRACE_CTX;
916 entry->prev_pid = prev->pid;
917 entry->prev_prio = prev->prio;
918 entry->prev_state = prev->state;
919 entry->next_pid = next->pid;
920 entry->next_prio = next->prio;
921 entry->next_state = next->state;
922 entry->next_cpu = task_cpu(next);
3928a8a2 923 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 924 ftrace_trace_stack(tr, data, flags, 5, pc);
bc0c38d1
SR
925}
926
57422797
IM
927void
928tracing_sched_wakeup_trace(struct trace_array *tr,
929 struct trace_array_cpu *data,
86387f7e
IM
930 struct task_struct *wakee,
931 struct task_struct *curr,
38697053 932 unsigned long flags, int pc)
57422797 933{
3928a8a2 934 struct ring_buffer_event *event;
777e208d 935 struct ctx_switch_entry *entry;
57422797
IM
936 unsigned long irq_flags;
937
3928a8a2
SR
938 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
939 &irq_flags);
940 if (!event)
941 return;
942 entry = ring_buffer_event_data(event);
38697053 943 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
944 entry->ent.type = TRACE_WAKE;
945 entry->prev_pid = curr->pid;
946 entry->prev_prio = curr->prio;
947 entry->prev_state = curr->state;
948 entry->next_pid = wakee->pid;
949 entry->next_prio = wakee->prio;
950 entry->next_state = wakee->state;
951 entry->next_cpu = task_cpu(wakee);
3928a8a2 952 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 953 ftrace_trace_stack(tr, data, flags, 6, pc);
017730c1
IM
954
955 trace_wake_up();
57422797
IM
956}
957
4902f884
SR
958void
959ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
960{
961 struct trace_array *tr = &global_trace;
962 struct trace_array_cpu *data;
5aa1ba6a 963 unsigned long flags;
4902f884 964 int cpu;
38697053 965 int pc;
4902f884 966
c76f0694 967 if (tracing_disabled)
4902f884
SR
968 return;
969
38697053 970 pc = preempt_count();
5aa1ba6a 971 local_irq_save(flags);
4902f884
SR
972 cpu = raw_smp_processor_id();
973 data = tr->data[cpu];
4902f884 974
5aa1ba6a 975 if (likely(atomic_inc_return(&data->disabled) == 1))
38697053 976 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
4902f884 977
5aa1ba6a
SR
978 atomic_dec(&data->disabled);
979 local_irq_restore(flags);
4902f884
SR
980}
981
606576ce 982#ifdef CONFIG_FUNCTION_TRACER
e309b41d 983static void
b2a866f9 984function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
2e0f5761
IM
985{
986 struct trace_array *tr = &global_trace;
987 struct trace_array_cpu *data;
988 unsigned long flags;
989 long disabled;
38697053
SR
990 int cpu, resched;
991 int pc;
2e0f5761 992
60bc0800 993 if (unlikely(!ftrace_function_enabled))
2e0f5761
IM
994 return;
995
38697053 996 pc = preempt_count();
182e9f5f 997 resched = ftrace_preempt_disable();
38697053 998 local_save_flags(flags);
2e0f5761
IM
999 cpu = raw_smp_processor_id();
1000 data = tr->data[cpu];
1001 disabled = atomic_inc_return(&data->disabled);
1002
1003 if (likely(disabled == 1))
38697053 1004 trace_function(tr, data, ip, parent_ip, flags, pc);
2e0f5761
IM
1005
1006 atomic_dec(&data->disabled);
182e9f5f 1007 ftrace_preempt_enable(resched);
2e0f5761
IM
1008}
1009
b2a866f9
SR
1010static void
1011function_trace_call(unsigned long ip, unsigned long parent_ip)
1012{
1013 struct trace_array *tr = &global_trace;
1014 struct trace_array_cpu *data;
1015 unsigned long flags;
1016 long disabled;
1017 int cpu;
1018 int pc;
1019
1020 if (unlikely(!ftrace_function_enabled))
1021 return;
1022
1023 /*
1024 * Need to use raw, since this must be called before the
1025 * recursive protection is performed.
1026 */
1027 raw_local_irq_save(flags);
1028 cpu = raw_smp_processor_id();
1029 data = tr->data[cpu];
1030 disabled = atomic_inc_return(&data->disabled);
1031
1032 if (likely(disabled == 1)) {
1033 pc = preempt_count();
1034 trace_function(tr, data, ip, parent_ip, flags, pc);
1035 }
1036
1037 atomic_dec(&data->disabled);
1038 raw_local_irq_restore(flags);
1039}
1040
2e0f5761
IM
1041static struct ftrace_ops trace_ops __read_mostly =
1042{
1043 .func = function_trace_call,
1044};
1045
e309b41d 1046void tracing_start_function_trace(void)
2e0f5761 1047{
60bc0800 1048 ftrace_function_enabled = 0;
b2a866f9
SR
1049
1050 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1051 trace_ops.func = function_trace_call_preempt_only;
1052 else
1053 trace_ops.func = function_trace_call;
1054
2e0f5761 1055 register_ftrace_function(&trace_ops);
9036990d 1056 ftrace_function_enabled = 1;
2e0f5761
IM
1057}
1058
e309b41d 1059void tracing_stop_function_trace(void)
2e0f5761 1060{
60bc0800 1061 ftrace_function_enabled = 0;
2e0f5761
IM
1062 unregister_ftrace_function(&trace_ops);
1063}
1064#endif
1065
bc0c38d1
SR
1066enum trace_file_type {
1067 TRACE_FILE_LAT_FMT = 1,
1068};
1069
5a90f577
SR
1070static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1071{
d769041f
SR
1072 /* Don't allow ftrace to trace into the ring buffers */
1073 ftrace_disable_cpu();
1074
5a90f577 1075 iter->idx++;
d769041f
SR
1076 if (iter->buffer_iter[iter->cpu])
1077 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1078
1079 ftrace_enable_cpu();
5a90f577
SR
1080}
1081
e309b41d 1082static struct trace_entry *
3928a8a2 1083peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
dd0e545f 1084{
3928a8a2
SR
1085 struct ring_buffer_event *event;
1086 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
dd0e545f 1087
d769041f
SR
1088 /* Don't allow ftrace to trace into the ring buffers */
1089 ftrace_disable_cpu();
1090
1091 if (buf_iter)
1092 event = ring_buffer_iter_peek(buf_iter, ts);
1093 else
1094 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1095
1096 ftrace_enable_cpu();
1097
3928a8a2 1098 return event ? ring_buffer_event_data(event) : NULL;
dd0e545f 1099}
d769041f 1100
dd0e545f 1101static struct trace_entry *
3928a8a2 1102__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
bc0c38d1 1103{
3928a8a2 1104 struct ring_buffer *buffer = iter->tr->buffer;
bc0c38d1 1105 struct trace_entry *ent, *next = NULL;
3928a8a2 1106 u64 next_ts = 0, ts;
bc0c38d1
SR
1107 int next_cpu = -1;
1108 int cpu;
1109
ab46428c 1110 for_each_tracing_cpu(cpu) {
dd0e545f 1111
3928a8a2
SR
1112 if (ring_buffer_empty_cpu(buffer, cpu))
1113 continue;
dd0e545f 1114
3928a8a2 1115 ent = peek_next_entry(iter, cpu, &ts);
dd0e545f 1116
cdd31cd2
IM
1117 /*
1118 * Pick the entry with the smallest timestamp:
1119 */
3928a8a2 1120 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
1121 next = ent;
1122 next_cpu = cpu;
3928a8a2 1123 next_ts = ts;
bc0c38d1
SR
1124 }
1125 }
1126
1127 if (ent_cpu)
1128 *ent_cpu = next_cpu;
1129
3928a8a2
SR
1130 if (ent_ts)
1131 *ent_ts = next_ts;
1132
bc0c38d1
SR
1133 return next;
1134}
1135
dd0e545f
SR
1136/* Find the next real entry, without updating the iterator itself */
1137static struct trace_entry *
3928a8a2 1138find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
bc0c38d1 1139{
3928a8a2 1140 return __find_next_entry(iter, ent_cpu, ent_ts);
dd0e545f
SR
1141}
1142
1143/* Find the next real entry, and increment the iterator to the next entry */
1144static void *find_next_entry_inc(struct trace_iterator *iter)
1145{
3928a8a2 1146 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
dd0e545f 1147
3928a8a2 1148 if (iter->ent)
dd0e545f
SR
1149 trace_iterator_increment(iter, iter->cpu);
1150
3928a8a2 1151 return iter->ent ? iter : NULL;
b3806b43 1152}
bc0c38d1 1153
e309b41d 1154static void trace_consume(struct trace_iterator *iter)
b3806b43 1155{
d769041f
SR
1156 /* Don't allow ftrace to trace into the ring buffers */
1157 ftrace_disable_cpu();
3928a8a2 1158 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
d769041f 1159 ftrace_enable_cpu();
bc0c38d1
SR
1160}
1161
e309b41d 1162static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
1163{
1164 struct trace_iterator *iter = m->private;
bc0c38d1 1165 int i = (int)*pos;
4e3c3333 1166 void *ent;
bc0c38d1
SR
1167
1168 (*pos)++;
1169
1170 /* can't go backwards */
1171 if (iter->idx > i)
1172 return NULL;
1173
1174 if (iter->idx < 0)
1175 ent = find_next_entry_inc(iter);
1176 else
1177 ent = iter;
1178
1179 while (ent && iter->idx < i)
1180 ent = find_next_entry_inc(iter);
1181
1182 iter->pos = *pos;
1183
bc0c38d1
SR
1184 return ent;
1185}
1186
1187static void *s_start(struct seq_file *m, loff_t *pos)
1188{
1189 struct trace_iterator *iter = m->private;
1190 void *p = NULL;
1191 loff_t l = 0;
3928a8a2 1192 int cpu;
bc0c38d1
SR
1193
1194 mutex_lock(&trace_types_lock);
1195
d15f57f2
SR
1196 if (!current_trace || current_trace != iter->trace) {
1197 mutex_unlock(&trace_types_lock);
bc0c38d1 1198 return NULL;
d15f57f2 1199 }
bc0c38d1
SR
1200
1201 atomic_inc(&trace_record_cmdline_disabled);
1202
bc0c38d1
SR
1203 if (*pos != iter->pos) {
1204 iter->ent = NULL;
1205 iter->cpu = 0;
1206 iter->idx = -1;
1207
d769041f
SR
1208 ftrace_disable_cpu();
1209
3928a8a2
SR
1210 for_each_tracing_cpu(cpu) {
1211 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
4c11d7ae 1212 }
bc0c38d1 1213
d769041f
SR
1214 ftrace_enable_cpu();
1215
bc0c38d1
SR
1216 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1217 ;
1218
1219 } else {
4c11d7ae 1220 l = *pos - 1;
bc0c38d1
SR
1221 p = s_next(m, p, &l);
1222 }
1223
1224 return p;
1225}
1226
1227static void s_stop(struct seq_file *m, void *p)
1228{
bc0c38d1 1229 atomic_dec(&trace_record_cmdline_disabled);
bc0c38d1
SR
1230 mutex_unlock(&trace_types_lock);
1231}
1232
76094a2c 1233#ifdef CONFIG_KRETPROBES
b3aa5577 1234static inline const char *kretprobed(const char *name)
76094a2c 1235{
b3aa5577
SR
1236 static const char tramp_name[] = "kretprobe_trampoline";
1237 int size = sizeof(tramp_name);
1238
1239 if (strncmp(tramp_name, name, size) == 0)
1240 return "[unknown/kretprobe'd]";
1241 return name;
76094a2c
AS
1242}
1243#else
b3aa5577 1244static inline const char *kretprobed(const char *name)
76094a2c 1245{
b3aa5577 1246 return name;
76094a2c
AS
1247}
1248#endif /* CONFIG_KRETPROBES */
1249
b3806b43 1250static int
214023c3 1251seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
bc0c38d1
SR
1252{
1253#ifdef CONFIG_KALLSYMS
1254 char str[KSYM_SYMBOL_LEN];
b3aa5577 1255 const char *name;
bc0c38d1
SR
1256
1257 kallsyms_lookup(address, NULL, NULL, NULL, str);
1258
b3aa5577
SR
1259 name = kretprobed(str);
1260
1261 return trace_seq_printf(s, fmt, name);
bc0c38d1 1262#endif
b3806b43 1263 return 1;
bc0c38d1
SR
1264}
1265
b3806b43 1266static int
214023c3
SR
1267seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1268 unsigned long address)
bc0c38d1
SR
1269{
1270#ifdef CONFIG_KALLSYMS
1271 char str[KSYM_SYMBOL_LEN];
b3aa5577 1272 const char *name;
bc0c38d1
SR
1273
1274 sprint_symbol(str, address);
b3aa5577
SR
1275 name = kretprobed(str);
1276
1277 return trace_seq_printf(s, fmt, name);
bc0c38d1 1278#endif
b3806b43 1279 return 1;
bc0c38d1
SR
1280}
1281
1282#ifndef CONFIG_64BIT
1283# define IP_FMT "%08lx"
1284#else
1285# define IP_FMT "%016lx"
1286#endif
1287
e309b41d 1288static int
214023c3 1289seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
bc0c38d1 1290{
b3806b43
SR
1291 int ret;
1292
1293 if (!ip)
1294 return trace_seq_printf(s, "0");
bc0c38d1
SR
1295
1296 if (sym_flags & TRACE_ITER_SYM_OFFSET)
b3806b43 1297 ret = seq_print_sym_offset(s, "%s", ip);
bc0c38d1 1298 else
b3806b43
SR
1299 ret = seq_print_sym_short(s, "%s", ip);
1300
1301 if (!ret)
1302 return 0;
bc0c38d1
SR
1303
1304 if (sym_flags & TRACE_ITER_SYM_ADDR)
b3806b43
SR
1305 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1306 return ret;
bc0c38d1
SR
1307}
1308
e309b41d 1309static void print_lat_help_header(struct seq_file *m)
bc0c38d1 1310{
a6168353
ME
1311 seq_puts(m, "# _------=> CPU# \n");
1312 seq_puts(m, "# / _-----=> irqs-off \n");
1313 seq_puts(m, "# | / _----=> need-resched \n");
1314 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1315 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1316 seq_puts(m, "# |||| / \n");
1317 seq_puts(m, "# ||||| delay \n");
1318 seq_puts(m, "# cmd pid ||||| time | caller \n");
1319 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
1320}
1321
e309b41d 1322static void print_func_help_header(struct seq_file *m)
bc0c38d1 1323{
a6168353
ME
1324 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1325 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
1326}
1327
1328
e309b41d 1329static void
bc0c38d1
SR
1330print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1331{
1332 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1333 struct trace_array *tr = iter->tr;
1334 struct trace_array_cpu *data = tr->data[tr->cpu];
1335 struct tracer *type = current_trace;
3928a8a2
SR
1336 unsigned long total;
1337 unsigned long entries;
bc0c38d1
SR
1338 const char *name = "preemption";
1339
1340 if (type)
1341 name = type->name;
1342
3928a8a2
SR
1343 entries = ring_buffer_entries(iter->tr->buffer);
1344 total = entries +
1345 ring_buffer_overruns(iter->tr->buffer);
bc0c38d1
SR
1346
1347 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1348 name, UTS_RELEASE);
1349 seq_puts(m, "-----------------------------------"
1350 "---------------------------------\n");
1351 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1352 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 1353 nsecs_to_usecs(data->saved_latency),
bc0c38d1 1354 entries,
4c11d7ae 1355 total,
bc0c38d1
SR
1356 tr->cpu,
1357#if defined(CONFIG_PREEMPT_NONE)
1358 "server",
1359#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1360 "desktop",
b5c21b45 1361#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
1362 "preempt",
1363#else
1364 "unknown",
1365#endif
1366 /* These are reserved for later use */
1367 0, 0, 0, 0);
1368#ifdef CONFIG_SMP
1369 seq_printf(m, " #P:%d)\n", num_online_cpus());
1370#else
1371 seq_puts(m, ")\n");
1372#endif
1373 seq_puts(m, " -----------------\n");
1374 seq_printf(m, " | task: %.16s-%d "
1375 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1376 data->comm, data->pid, data->uid, data->nice,
1377 data->policy, data->rt_priority);
1378 seq_puts(m, " -----------------\n");
1379
1380 if (data->critical_start) {
1381 seq_puts(m, " => started at: ");
214023c3
SR
1382 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1383 trace_print_seq(m, &iter->seq);
bc0c38d1 1384 seq_puts(m, "\n => ended at: ");
214023c3
SR
1385 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1386 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
1387 seq_puts(m, "\n");
1388 }
1389
1390 seq_puts(m, "\n");
1391}
1392
e309b41d 1393static void
214023c3 1394lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
bc0c38d1
SR
1395{
1396 int hardirq, softirq;
1397 char *comm;
1398
777e208d 1399 comm = trace_find_cmdline(entry->pid);
bc0c38d1 1400
777e208d 1401 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
a6168353 1402 trace_seq_printf(s, "%3d", cpu);
214023c3 1403 trace_seq_printf(s, "%c%c",
9244489a
SR
1404 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1405 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
777e208d 1406 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
bc0c38d1 1407
777e208d
SR
1408 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1409 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
afc2abc0 1410 if (hardirq && softirq) {
214023c3 1411 trace_seq_putc(s, 'H');
afc2abc0
IM
1412 } else {
1413 if (hardirq) {
214023c3 1414 trace_seq_putc(s, 'h');
afc2abc0 1415 } else {
bc0c38d1 1416 if (softirq)
214023c3 1417 trace_seq_putc(s, 's');
bc0c38d1 1418 else
214023c3 1419 trace_seq_putc(s, '.');
bc0c38d1
SR
1420 }
1421 }
1422
777e208d
SR
1423 if (entry->preempt_count)
1424 trace_seq_printf(s, "%x", entry->preempt_count);
bc0c38d1 1425 else
214023c3 1426 trace_seq_puts(s, ".");
bc0c38d1
SR
1427}
1428
1429unsigned long preempt_mark_thresh = 100;
1430
e309b41d 1431static void
3928a8a2 1432lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
bc0c38d1
SR
1433 unsigned long rel_usecs)
1434{
214023c3 1435 trace_seq_printf(s, " %4lldus", abs_usecs);
bc0c38d1 1436 if (rel_usecs > preempt_mark_thresh)
214023c3 1437 trace_seq_puts(s, "!: ");
bc0c38d1 1438 else if (rel_usecs > 1)
214023c3 1439 trace_seq_puts(s, "+: ");
bc0c38d1 1440 else
214023c3 1441 trace_seq_puts(s, " : ");
bc0c38d1
SR
1442}
1443
1444static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1445
fc5e27ae
PP
1446/*
1447 * The message is supposed to contain an ending newline.
1448 * If the printing stops prematurely, try to add a newline of our own.
1449 */
1450void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
dd0e545f 1451{
dd0e545f 1452 struct trace_entry *ent;
777e208d 1453 struct trace_field_cont *cont;
fc5e27ae 1454 bool ok = true;
dd0e545f 1455
3928a8a2 1456 ent = peek_next_entry(iter, iter->cpu, NULL);
dd0e545f
SR
1457 if (!ent || ent->type != TRACE_CONT) {
1458 trace_seq_putc(s, '\n');
1459 return;
1460 }
1461
1462 do {
777e208d 1463 cont = (struct trace_field_cont *)ent;
fc5e27ae 1464 if (ok)
777e208d 1465 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
d769041f
SR
1466
1467 ftrace_disable_cpu();
1468
1469 if (iter->buffer_iter[iter->cpu])
1470 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1471 else
1472 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1473
1474 ftrace_enable_cpu();
1475
3928a8a2 1476 ent = peek_next_entry(iter, iter->cpu, NULL);
dd0e545f 1477 } while (ent && ent->type == TRACE_CONT);
fc5e27ae
PP
1478
1479 if (!ok)
1480 trace_seq_putc(s, '\n');
dd0e545f
SR
1481}
1482
a309720c
SR
1483static void test_cpu_buff_start(struct trace_iterator *iter)
1484{
1485 struct trace_seq *s = &iter->seq;
1486
1487 if (cpu_isset(iter->cpu, iter->started))
1488 return;
1489
1490 cpu_set(iter->cpu, iter->started);
1491 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1492}
1493
2c4f035f 1494static enum print_line_t
214023c3 1495print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
bc0c38d1 1496{
214023c3 1497 struct trace_seq *s = &iter->seq;
bc0c38d1 1498 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
3928a8a2 1499 struct trace_entry *next_entry;
bc0c38d1
SR
1500 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1501 struct trace_entry *entry = iter->ent;
1502 unsigned long abs_usecs;
1503 unsigned long rel_usecs;
3928a8a2 1504 u64 next_ts;
bc0c38d1 1505 char *comm;
bac524d3 1506 int S, T;
86387f7e 1507 int i;
d17d9691 1508 unsigned state;
bc0c38d1 1509
dd0e545f 1510 if (entry->type == TRACE_CONT)
2c4f035f 1511 return TRACE_TYPE_HANDLED;
dd0e545f 1512
a309720c
SR
1513 test_cpu_buff_start(iter);
1514
3928a8a2
SR
1515 next_entry = find_next_entry(iter, NULL, &next_ts);
1516 if (!next_entry)
1517 next_ts = iter->ts;
1518 rel_usecs = ns2usecs(next_ts - iter->ts);
1519 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
bc0c38d1
SR
1520
1521 if (verbose) {
777e208d 1522 comm = trace_find_cmdline(entry->pid);
a6168353 1523 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
214023c3
SR
1524 " %ld.%03ldms (+%ld.%03ldms): ",
1525 comm,
777e208d
SR
1526 entry->pid, cpu, entry->flags,
1527 entry->preempt_count, trace_idx,
3928a8a2 1528 ns2usecs(iter->ts),
214023c3
SR
1529 abs_usecs/1000,
1530 abs_usecs % 1000, rel_usecs/1000,
1531 rel_usecs % 1000);
bc0c38d1 1532 } else {
f29c73fe
IM
1533 lat_print_generic(s, entry, cpu);
1534 lat_print_timestamp(s, abs_usecs, rel_usecs);
bc0c38d1
SR
1535 }
1536 switch (entry->type) {
777e208d 1537 case TRACE_FN: {
7104f300
SR
1538 struct ftrace_entry *field;
1539
1540 trace_assign_type(field, entry);
777e208d
SR
1541
1542 seq_print_ip_sym(s, field->ip, sym_flags);
214023c3 1543 trace_seq_puts(s, " (");
b3aa5577 1544 seq_print_ip_sym(s, field->parent_ip, sym_flags);
214023c3 1545 trace_seq_puts(s, ")\n");
bc0c38d1 1546 break;
777e208d 1547 }
bc0c38d1 1548 case TRACE_CTX:
777e208d 1549 case TRACE_WAKE: {
7104f300
SR
1550 struct ctx_switch_entry *field;
1551
1552 trace_assign_type(field, entry);
777e208d
SR
1553
1554 T = field->next_state < sizeof(state_to_char) ?
1555 state_to_char[field->next_state] : 'X';
bac524d3 1556
777e208d
SR
1557 state = field->prev_state ?
1558 __ffs(field->prev_state) + 1 : 0;
d17d9691 1559 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
777e208d 1560 comm = trace_find_cmdline(field->next_pid);
80b5e940 1561 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
777e208d
SR
1562 field->prev_pid,
1563 field->prev_prio,
57422797 1564 S, entry->type == TRACE_CTX ? "==>" : " +",
777e208d
SR
1565 field->next_cpu,
1566 field->next_pid,
1567 field->next_prio,
bac524d3 1568 T, comm);
bc0c38d1 1569 break;
777e208d
SR
1570 }
1571 case TRACE_SPECIAL: {
7104f300
SR
1572 struct special_entry *field;
1573
1574 trace_assign_type(field, entry);
777e208d 1575
88a4216c 1576 trace_seq_printf(s, "# %ld %ld %ld\n",
777e208d
SR
1577 field->arg1,
1578 field->arg2,
1579 field->arg3);
f0a920d5 1580 break;
777e208d
SR
1581 }
1582 case TRACE_STACK: {
7104f300
SR
1583 struct stack_entry *field;
1584
1585 trace_assign_type(field, entry);
777e208d 1586
86387f7e
IM
1587 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1588 if (i)
1589 trace_seq_puts(s, " <= ");
777e208d 1590 seq_print_ip_sym(s, field->caller[i], sym_flags);
86387f7e
IM
1591 }
1592 trace_seq_puts(s, "\n");
1593 break;
777e208d
SR
1594 }
1595 case TRACE_PRINT: {
7104f300
SR
1596 struct print_entry *field;
1597
1598 trace_assign_type(field, entry);
777e208d
SR
1599
1600 seq_print_ip_sym(s, field->ip, sym_flags);
1601 trace_seq_printf(s, ": %s", field->buf);
1602 if (entry->flags & TRACE_FLAG_CONT)
dd0e545f
SR
1603 trace_seq_print_cont(s, iter);
1604 break;
777e208d 1605 }
89b2f978 1606 default:
214023c3 1607 trace_seq_printf(s, "Unknown type %d\n", entry->type);
bc0c38d1 1608 }
2c4f035f 1609 return TRACE_TYPE_HANDLED;
bc0c38d1
SR
1610}
1611
2c4f035f 1612static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 1613{
214023c3 1614 struct trace_seq *s = &iter->seq;
bc0c38d1 1615 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 1616 struct trace_entry *entry;
bc0c38d1
SR
1617 unsigned long usec_rem;
1618 unsigned long long t;
1619 unsigned long secs;
1620 char *comm;
b3806b43 1621 int ret;
bac524d3 1622 int S, T;
86387f7e 1623 int i;
bc0c38d1 1624
4e3c3333 1625 entry = iter->ent;
dd0e545f
SR
1626
1627 if (entry->type == TRACE_CONT)
2c4f035f 1628 return TRACE_TYPE_HANDLED;
dd0e545f 1629
a309720c
SR
1630 test_cpu_buff_start(iter);
1631
777e208d 1632 comm = trace_find_cmdline(iter->ent->pid);
bc0c38d1 1633
3928a8a2 1634 t = ns2usecs(iter->ts);
bc0c38d1
SR
1635 usec_rem = do_div(t, 1000000ULL);
1636 secs = (unsigned long)t;
1637
777e208d 1638 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
f29c73fe 1639 if (!ret)
2c4f035f 1640 return TRACE_TYPE_PARTIAL_LINE;
a6168353 1641 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
f29c73fe 1642 if (!ret)
2c4f035f 1643 return TRACE_TYPE_PARTIAL_LINE;
f29c73fe
IM
1644 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1645 if (!ret)
2c4f035f 1646 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1
SR
1647
1648 switch (entry->type) {
777e208d 1649 case TRACE_FN: {
7104f300
SR
1650 struct ftrace_entry *field;
1651
1652 trace_assign_type(field, entry);
777e208d
SR
1653
1654 ret = seq_print_ip_sym(s, field->ip, sym_flags);
b3806b43 1655 if (!ret)
2c4f035f 1656 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1657 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
777e208d 1658 field->parent_ip) {
b3806b43
SR
1659 ret = trace_seq_printf(s, " <-");
1660 if (!ret)
2c4f035f 1661 return TRACE_TYPE_PARTIAL_LINE;
b3aa5577
SR
1662 ret = seq_print_ip_sym(s,
1663 field->parent_ip,
1664 sym_flags);
b3806b43 1665 if (!ret)
2c4f035f 1666 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1667 }
b3806b43
SR
1668 ret = trace_seq_printf(s, "\n");
1669 if (!ret)
2c4f035f 1670 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1671 break;
777e208d 1672 }
bc0c38d1 1673 case TRACE_CTX:
777e208d 1674 case TRACE_WAKE: {
7104f300
SR
1675 struct ctx_switch_entry *field;
1676
1677 trace_assign_type(field, entry);
777e208d
SR
1678
1679 S = field->prev_state < sizeof(state_to_char) ?
1680 state_to_char[field->prev_state] : 'X';
1681 T = field->next_state < sizeof(state_to_char) ?
1682 state_to_char[field->next_state] : 'X';
80b5e940 1683 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
777e208d
SR
1684 field->prev_pid,
1685 field->prev_prio,
b3806b43 1686 S,
57422797 1687 entry->type == TRACE_CTX ? "==>" : " +",
777e208d
SR
1688 field->next_cpu,
1689 field->next_pid,
1690 field->next_prio,
bac524d3 1691 T);
b3806b43 1692 if (!ret)
2c4f035f 1693 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1694 break;
777e208d
SR
1695 }
1696 case TRACE_SPECIAL: {
7104f300
SR
1697 struct special_entry *field;
1698
1699 trace_assign_type(field, entry);
777e208d 1700
88a4216c 1701 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
777e208d
SR
1702 field->arg1,
1703 field->arg2,
1704 field->arg3);
f0a920d5 1705 if (!ret)
2c4f035f 1706 return TRACE_TYPE_PARTIAL_LINE;
f0a920d5 1707 break;
777e208d
SR
1708 }
1709 case TRACE_STACK: {
7104f300
SR
1710 struct stack_entry *field;
1711
1712 trace_assign_type(field, entry);
777e208d 1713
86387f7e
IM
1714 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1715 if (i) {
1716 ret = trace_seq_puts(s, " <= ");
1717 if (!ret)
2c4f035f 1718 return TRACE_TYPE_PARTIAL_LINE;
86387f7e 1719 }
777e208d 1720 ret = seq_print_ip_sym(s, field->caller[i],
86387f7e
IM
1721 sym_flags);
1722 if (!ret)
2c4f035f 1723 return TRACE_TYPE_PARTIAL_LINE;
86387f7e
IM
1724 }
1725 ret = trace_seq_puts(s, "\n");
1726 if (!ret)
2c4f035f 1727 return TRACE_TYPE_PARTIAL_LINE;
86387f7e 1728 break;
777e208d
SR
1729 }
1730 case TRACE_PRINT: {
7104f300
SR
1731 struct print_entry *field;
1732
1733 trace_assign_type(field, entry);
777e208d
SR
1734
1735 seq_print_ip_sym(s, field->ip, sym_flags);
1736 trace_seq_printf(s, ": %s", field->buf);
1737 if (entry->flags & TRACE_FLAG_CONT)
dd0e545f
SR
1738 trace_seq_print_cont(s, iter);
1739 break;
bc0c38d1 1740 }
777e208d 1741 }
2c4f035f 1742 return TRACE_TYPE_HANDLED;
bc0c38d1
SR
1743}
1744
2c4f035f 1745static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
1746{
1747 struct trace_seq *s = &iter->seq;
1748 struct trace_entry *entry;
1749 int ret;
bac524d3 1750 int S, T;
f9896bf3
IM
1751
1752 entry = iter->ent;
dd0e545f
SR
1753
1754 if (entry->type == TRACE_CONT)
2c4f035f 1755 return TRACE_TYPE_HANDLED;
dd0e545f 1756
f9896bf3 1757 ret = trace_seq_printf(s, "%d %d %llu ",
777e208d 1758 entry->pid, iter->cpu, iter->ts);
f9896bf3 1759 if (!ret)
2c4f035f 1760 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3
IM
1761
1762 switch (entry->type) {
777e208d 1763 case TRACE_FN: {
7104f300
SR
1764 struct ftrace_entry *field;
1765
1766 trace_assign_type(field, entry);
777e208d 1767
f9896bf3 1768 ret = trace_seq_printf(s, "%x %x\n",
777e208d
SR
1769 field->ip,
1770 field->parent_ip);
f9896bf3 1771 if (!ret)
2c4f035f 1772 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 1773 break;
777e208d 1774 }
f9896bf3 1775 case TRACE_CTX:
777e208d 1776 case TRACE_WAKE: {
7104f300
SR
1777 struct ctx_switch_entry *field;
1778
1779 trace_assign_type(field, entry);
777e208d
SR
1780
1781 S = field->prev_state < sizeof(state_to_char) ?
1782 state_to_char[field->prev_state] : 'X';
1783 T = field->next_state < sizeof(state_to_char) ?
1784 state_to_char[field->next_state] : 'X';
57422797
IM
1785 if (entry->type == TRACE_WAKE)
1786 S = '+';
80b5e940 1787 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
777e208d
SR
1788 field->prev_pid,
1789 field->prev_prio,
f9896bf3 1790 S,
777e208d
SR
1791 field->next_cpu,
1792 field->next_pid,
1793 field->next_prio,
bac524d3 1794 T);
f9896bf3 1795 if (!ret)
2c4f035f 1796 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 1797 break;
777e208d 1798 }
f0a920d5 1799 case TRACE_SPECIAL:
777e208d 1800 case TRACE_STACK: {
7104f300
SR
1801 struct special_entry *field;
1802
1803 trace_assign_type(field, entry);
777e208d 1804
88a4216c 1805 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
777e208d
SR
1806 field->arg1,
1807 field->arg2,
1808 field->arg3);
f0a920d5 1809 if (!ret)
2c4f035f 1810 return TRACE_TYPE_PARTIAL_LINE;
f0a920d5 1811 break;
777e208d
SR
1812 }
1813 case TRACE_PRINT: {
7104f300
SR
1814 struct print_entry *field;
1815
1816 trace_assign_type(field, entry);
777e208d
SR
1817
1818 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1819 if (entry->flags & TRACE_FLAG_CONT)
dd0e545f
SR
1820 trace_seq_print_cont(s, iter);
1821 break;
f9896bf3 1822 }
777e208d 1823 }
2c4f035f 1824 return TRACE_TYPE_HANDLED;
f9896bf3
IM
1825}
1826
cb0f12aa
IM
1827#define SEQ_PUT_FIELD_RET(s, x) \
1828do { \
1829 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
1830 return 0; \
1831} while (0)
1832
5e3ca0ec
IM
1833#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1834do { \
ad0a3b68 1835 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
5e3ca0ec
IM
1836 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1837 return 0; \
1838} while (0)
1839
2c4f035f 1840static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
1841{
1842 struct trace_seq *s = &iter->seq;
1843 unsigned char newline = '\n';
1844 struct trace_entry *entry;
bac524d3 1845 int S, T;
5e3ca0ec
IM
1846
1847 entry = iter->ent;
dd0e545f
SR
1848
1849 if (entry->type == TRACE_CONT)
2c4f035f 1850 return TRACE_TYPE_HANDLED;
dd0e545f 1851
777e208d 1852 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
5e3ca0ec 1853 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
3928a8a2 1854 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
5e3ca0ec
IM
1855
1856 switch (entry->type) {
777e208d 1857 case TRACE_FN: {
7104f300
SR
1858 struct ftrace_entry *field;
1859
1860 trace_assign_type(field, entry);
777e208d
SR
1861
1862 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1863 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
5e3ca0ec 1864 break;
777e208d 1865 }
5e3ca0ec 1866 case TRACE_CTX:
777e208d 1867 case TRACE_WAKE: {
7104f300
SR
1868 struct ctx_switch_entry *field;
1869
1870 trace_assign_type(field, entry);
777e208d
SR
1871
1872 S = field->prev_state < sizeof(state_to_char) ?
1873 state_to_char[field->prev_state] : 'X';
1874 T = field->next_state < sizeof(state_to_char) ?
1875 state_to_char[field->next_state] : 'X';
57422797
IM
1876 if (entry->type == TRACE_WAKE)
1877 S = '+';
777e208d
SR
1878 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1879 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
5e3ca0ec 1880 SEQ_PUT_HEX_FIELD_RET(s, S);
777e208d
SR
1881 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1882 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1883 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
bac524d3 1884 SEQ_PUT_HEX_FIELD_RET(s, T);
5e3ca0ec 1885 break;
777e208d 1886 }
5e3ca0ec 1887 case TRACE_SPECIAL:
777e208d 1888 case TRACE_STACK: {
7104f300
SR
1889 struct special_entry *field;
1890
1891 trace_assign_type(field, entry);
777e208d
SR
1892
1893 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1894 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1895 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
5e3ca0ec
IM
1896 break;
1897 }
777e208d 1898 }
5e3ca0ec
IM
1899 SEQ_PUT_FIELD_RET(s, newline);
1900
2c4f035f 1901 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
1902}
1903
2c4f035f 1904static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
1905{
1906 struct trace_seq *s = &iter->seq;
1907 struct trace_entry *entry;
1908
1909 entry = iter->ent;
dd0e545f
SR
1910
1911 if (entry->type == TRACE_CONT)
2c4f035f 1912 return TRACE_TYPE_HANDLED;
dd0e545f 1913
777e208d 1914 SEQ_PUT_FIELD_RET(s, entry->pid);
072ba498 1915 SEQ_PUT_FIELD_RET(s, entry->cpu);
3928a8a2 1916 SEQ_PUT_FIELD_RET(s, iter->ts);
cb0f12aa
IM
1917
1918 switch (entry->type) {
777e208d 1919 case TRACE_FN: {
7104f300
SR
1920 struct ftrace_entry *field;
1921
1922 trace_assign_type(field, entry);
777e208d
SR
1923
1924 SEQ_PUT_FIELD_RET(s, field->ip);
1925 SEQ_PUT_FIELD_RET(s, field->parent_ip);
cb0f12aa 1926 break;
777e208d
SR
1927 }
1928 case TRACE_CTX: {
7104f300
SR
1929 struct ctx_switch_entry *field;
1930
1931 trace_assign_type(field, entry);
777e208d
SR
1932
1933 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1934 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1935 SEQ_PUT_FIELD_RET(s, field->prev_state);
1936 SEQ_PUT_FIELD_RET(s, field->next_pid);
1937 SEQ_PUT_FIELD_RET(s, field->next_prio);
1938 SEQ_PUT_FIELD_RET(s, field->next_state);
cb0f12aa 1939 break;
777e208d 1940 }
f0a920d5 1941 case TRACE_SPECIAL:
777e208d 1942 case TRACE_STACK: {
7104f300
SR
1943 struct special_entry *field;
1944
1945 trace_assign_type(field, entry);
777e208d
SR
1946
1947 SEQ_PUT_FIELD_RET(s, field->arg1);
1948 SEQ_PUT_FIELD_RET(s, field->arg2);
1949 SEQ_PUT_FIELD_RET(s, field->arg3);
f0a920d5 1950 break;
cb0f12aa 1951 }
777e208d 1952 }
cb0f12aa
IM
1953 return 1;
1954}
1955
bc0c38d1
SR
1956static int trace_empty(struct trace_iterator *iter)
1957{
bc0c38d1
SR
1958 int cpu;
1959
ab46428c 1960 for_each_tracing_cpu(cpu) {
d769041f
SR
1961 if (iter->buffer_iter[cpu]) {
1962 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1963 return 0;
1964 } else {
1965 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1966 return 0;
1967 }
bc0c38d1 1968 }
d769041f 1969
797d3712 1970 return 1;
bc0c38d1
SR
1971}
1972
2c4f035f 1973static enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 1974{
2c4f035f
FW
1975 enum print_line_t ret;
1976
1977 if (iter->trace && iter->trace->print_line) {
1978 ret = iter->trace->print_line(iter);
1979 if (ret != TRACE_TYPE_UNHANDLED)
1980 return ret;
1981 }
72829bc3 1982
cb0f12aa
IM
1983 if (trace_flags & TRACE_ITER_BIN)
1984 return print_bin_fmt(iter);
1985
5e3ca0ec
IM
1986 if (trace_flags & TRACE_ITER_HEX)
1987 return print_hex_fmt(iter);
1988
f9896bf3
IM
1989 if (trace_flags & TRACE_ITER_RAW)
1990 return print_raw_fmt(iter);
1991
1992 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1993 return print_lat_fmt(iter, iter->idx, iter->cpu);
1994
1995 return print_trace_fmt(iter);
1996}
1997
bc0c38d1
SR
1998static int s_show(struct seq_file *m, void *v)
1999{
2000 struct trace_iterator *iter = v;
2001
2002 if (iter->ent == NULL) {
2003 if (iter->tr) {
2004 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2005 seq_puts(m, "#\n");
2006 }
2007 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2008 /* print nothing if the buffers are empty */
2009 if (trace_empty(iter))
2010 return 0;
2011 print_trace_header(m, iter);
2012 if (!(trace_flags & TRACE_ITER_VERBOSE))
2013 print_lat_help_header(m);
2014 } else {
2015 if (!(trace_flags & TRACE_ITER_VERBOSE))
2016 print_func_help_header(m);
2017 }
2018 } else {
f9896bf3 2019 print_trace_line(iter);
214023c3 2020 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
2021 }
2022
2023 return 0;
2024}
2025
2026static struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2027 .start = s_start,
2028 .next = s_next,
2029 .stop = s_stop,
2030 .show = s_show,
bc0c38d1
SR
2031};
2032
e309b41d 2033static struct trace_iterator *
bc0c38d1
SR
2034__tracing_open(struct inode *inode, struct file *file, int *ret)
2035{
2036 struct trace_iterator *iter;
3928a8a2
SR
2037 struct seq_file *m;
2038 int cpu;
bc0c38d1 2039
60a11774
SR
2040 if (tracing_disabled) {
2041 *ret = -ENODEV;
2042 return NULL;
2043 }
2044
bc0c38d1
SR
2045 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2046 if (!iter) {
2047 *ret = -ENOMEM;
2048 goto out;
2049 }
2050
2051 mutex_lock(&trace_types_lock);
2052 if (current_trace && current_trace->print_max)
2053 iter->tr = &max_tr;
2054 else
2055 iter->tr = inode->i_private;
2056 iter->trace = current_trace;
2057 iter->pos = -1;
2058
3928a8a2 2059 for_each_tracing_cpu(cpu) {
d769041f 2060
3928a8a2
SR
2061 iter->buffer_iter[cpu] =
2062 ring_buffer_read_start(iter->tr->buffer, cpu);
d769041f 2063
3928a8a2
SR
2064 if (!iter->buffer_iter[cpu])
2065 goto fail_buffer;
2066 }
2067
bc0c38d1
SR
2068 /* TODO stop tracer */
2069 *ret = seq_open(file, &tracer_seq_ops);
3928a8a2
SR
2070 if (*ret)
2071 goto fail_buffer;
bc0c38d1 2072
3928a8a2
SR
2073 m = file->private_data;
2074 m->private = iter;
bc0c38d1 2075
3928a8a2 2076 /* stop the trace while dumping */
9036990d 2077 tracing_stop();
3928a8a2
SR
2078
2079 if (iter->trace && iter->trace->open)
2080 iter->trace->open(iter);
2081
bc0c38d1
SR
2082 mutex_unlock(&trace_types_lock);
2083
2084 out:
2085 return iter;
3928a8a2
SR
2086
2087 fail_buffer:
2088 for_each_tracing_cpu(cpu) {
2089 if (iter->buffer_iter[cpu])
2090 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2091 }
2092 mutex_unlock(&trace_types_lock);
2093
2094 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
2095}
2096
2097int tracing_open_generic(struct inode *inode, struct file *filp)
2098{
60a11774
SR
2099 if (tracing_disabled)
2100 return -ENODEV;
2101
bc0c38d1
SR
2102 filp->private_data = inode->i_private;
2103 return 0;
2104}
2105
2106int tracing_release(struct inode *inode, struct file *file)
2107{
2108 struct seq_file *m = (struct seq_file *)file->private_data;
2109 struct trace_iterator *iter = m->private;
3928a8a2 2110 int cpu;
bc0c38d1
SR
2111
2112 mutex_lock(&trace_types_lock);
3928a8a2
SR
2113 for_each_tracing_cpu(cpu) {
2114 if (iter->buffer_iter[cpu])
2115 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2116 }
2117
bc0c38d1
SR
2118 if (iter->trace && iter->trace->close)
2119 iter->trace->close(iter);
2120
2121 /* reenable tracing if it was previously enabled */
9036990d 2122 tracing_start();
bc0c38d1
SR
2123 mutex_unlock(&trace_types_lock);
2124
2125 seq_release(inode, file);
2126 kfree(iter);
2127 return 0;
2128}
2129
2130static int tracing_open(struct inode *inode, struct file *file)
2131{
2132 int ret;
2133
2134 __tracing_open(inode, file, &ret);
2135
2136 return ret;
2137}
2138
2139static int tracing_lt_open(struct inode *inode, struct file *file)
2140{
2141 struct trace_iterator *iter;
2142 int ret;
2143
2144 iter = __tracing_open(inode, file, &ret);
2145
2146 if (!ret)
2147 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2148
2149 return ret;
2150}
2151
2152
e309b41d 2153static void *
bc0c38d1
SR
2154t_next(struct seq_file *m, void *v, loff_t *pos)
2155{
2156 struct tracer *t = m->private;
2157
2158 (*pos)++;
2159
2160 if (t)
2161 t = t->next;
2162
2163 m->private = t;
2164
2165 return t;
2166}
2167
2168static void *t_start(struct seq_file *m, loff_t *pos)
2169{
2170 struct tracer *t = m->private;
2171 loff_t l = 0;
2172
2173 mutex_lock(&trace_types_lock);
2174 for (; t && l < *pos; t = t_next(m, t, &l))
2175 ;
2176
2177 return t;
2178}
2179
2180static void t_stop(struct seq_file *m, void *p)
2181{
2182 mutex_unlock(&trace_types_lock);
2183}
2184
2185static int t_show(struct seq_file *m, void *v)
2186{
2187 struct tracer *t = v;
2188
2189 if (!t)
2190 return 0;
2191
2192 seq_printf(m, "%s", t->name);
2193 if (t->next)
2194 seq_putc(m, ' ');
2195 else
2196 seq_putc(m, '\n');
2197
2198 return 0;
2199}
2200
2201static struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
2202 .start = t_start,
2203 .next = t_next,
2204 .stop = t_stop,
2205 .show = t_show,
bc0c38d1
SR
2206};
2207
2208static int show_traces_open(struct inode *inode, struct file *file)
2209{
2210 int ret;
2211
60a11774
SR
2212 if (tracing_disabled)
2213 return -ENODEV;
2214
bc0c38d1
SR
2215 ret = seq_open(file, &show_traces_seq_ops);
2216 if (!ret) {
2217 struct seq_file *m = file->private_data;
2218 m->private = trace_types;
2219 }
2220
2221 return ret;
2222}
2223
2224static struct file_operations tracing_fops = {
4bf39a94
IM
2225 .open = tracing_open,
2226 .read = seq_read,
2227 .llseek = seq_lseek,
2228 .release = tracing_release,
bc0c38d1
SR
2229};
2230
2231static struct file_operations tracing_lt_fops = {
4bf39a94
IM
2232 .open = tracing_lt_open,
2233 .read = seq_read,
2234 .llseek = seq_lseek,
2235 .release = tracing_release,
bc0c38d1
SR
2236};
2237
2238static struct file_operations show_traces_fops = {
c7078de1
IM
2239 .open = show_traces_open,
2240 .read = seq_read,
2241 .release = seq_release,
2242};
2243
36dfe925
IM
2244/*
2245 * Only trace on a CPU if the bitmask is set:
2246 */
2247static cpumask_t tracing_cpumask = CPU_MASK_ALL;
2248
2249/*
2250 * When tracing/tracing_cpu_mask is modified then this holds
2251 * the new bitmask we are about to install:
2252 */
2253static cpumask_t tracing_cpumask_new;
2254
2255/*
2256 * The tracer itself will not take this lock, but still we want
2257 * to provide a consistent cpumask to user-space:
2258 */
2259static DEFINE_MUTEX(tracing_cpumask_update_lock);
2260
2261/*
2262 * Temporary storage for the character representation of the
2263 * CPU bitmask (and one more byte for the newline):
2264 */
2265static char mask_str[NR_CPUS + 1];
2266
c7078de1
IM
2267static ssize_t
2268tracing_cpumask_read(struct file *filp, char __user *ubuf,
2269 size_t count, loff_t *ppos)
2270{
36dfe925 2271 int len;
c7078de1
IM
2272
2273 mutex_lock(&tracing_cpumask_update_lock);
36dfe925
IM
2274
2275 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2276 if (count - len < 2) {
2277 count = -EINVAL;
2278 goto out_err;
2279 }
2280 len += sprintf(mask_str + len, "\n");
2281 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2282
2283out_err:
c7078de1
IM
2284 mutex_unlock(&tracing_cpumask_update_lock);
2285
2286 return count;
2287}
2288
2289static ssize_t
2290tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2291 size_t count, loff_t *ppos)
2292{
36dfe925 2293 int err, cpu;
c7078de1
IM
2294
2295 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 2296 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 2297 if (err)
36dfe925
IM
2298 goto err_unlock;
2299
92205c23
SR
2300 raw_local_irq_disable();
2301 __raw_spin_lock(&ftrace_max_lock);
ab46428c 2302 for_each_tracing_cpu(cpu) {
36dfe925
IM
2303 /*
2304 * Increase/decrease the disabled counter if we are
2305 * about to flip a bit in the cpumask:
2306 */
2307 if (cpu_isset(cpu, tracing_cpumask) &&
2308 !cpu_isset(cpu, tracing_cpumask_new)) {
2309 atomic_inc(&global_trace.data[cpu]->disabled);
2310 }
2311 if (!cpu_isset(cpu, tracing_cpumask) &&
2312 cpu_isset(cpu, tracing_cpumask_new)) {
2313 atomic_dec(&global_trace.data[cpu]->disabled);
2314 }
2315 }
92205c23
SR
2316 __raw_spin_unlock(&ftrace_max_lock);
2317 raw_local_irq_enable();
36dfe925
IM
2318
2319 tracing_cpumask = tracing_cpumask_new;
2320
2321 mutex_unlock(&tracing_cpumask_update_lock);
c7078de1
IM
2322
2323 return count;
36dfe925
IM
2324
2325err_unlock:
2326 mutex_unlock(&tracing_cpumask_update_lock);
2327
2328 return err;
c7078de1
IM
2329}
2330
2331static struct file_operations tracing_cpumask_fops = {
2332 .open = tracing_open_generic,
2333 .read = tracing_cpumask_read,
2334 .write = tracing_cpumask_write,
bc0c38d1
SR
2335};
2336
2337static ssize_t
2338tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2339 size_t cnt, loff_t *ppos)
2340{
2341 char *buf;
2342 int r = 0;
2343 int len = 0;
2344 int i;
2345
2346 /* calulate max size */
2347 for (i = 0; trace_options[i]; i++) {
2348 len += strlen(trace_options[i]);
2349 len += 3; /* "no" and space */
2350 }
2351
2352 /* +2 for \n and \0 */
2353 buf = kmalloc(len + 2, GFP_KERNEL);
2354 if (!buf)
2355 return -ENOMEM;
2356
2357 for (i = 0; trace_options[i]; i++) {
2358 if (trace_flags & (1 << i))
2359 r += sprintf(buf + r, "%s ", trace_options[i]);
2360 else
2361 r += sprintf(buf + r, "no%s ", trace_options[i]);
2362 }
2363
2364 r += sprintf(buf + r, "\n");
2365 WARN_ON(r >= len + 2);
2366
36dfe925 2367 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2368
2369 kfree(buf);
2370
2371 return r;
2372}
2373
2374static ssize_t
2375tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2376 size_t cnt, loff_t *ppos)
2377{
2378 char buf[64];
2379 char *cmp = buf;
2380 int neg = 0;
2381 int i;
2382
cffae437
SR
2383 if (cnt >= sizeof(buf))
2384 return -EINVAL;
bc0c38d1
SR
2385
2386 if (copy_from_user(&buf, ubuf, cnt))
2387 return -EFAULT;
2388
2389 buf[cnt] = 0;
2390
2391 if (strncmp(buf, "no", 2) == 0) {
2392 neg = 1;
2393 cmp += 2;
2394 }
2395
2396 for (i = 0; trace_options[i]; i++) {
2397 int len = strlen(trace_options[i]);
2398
2399 if (strncmp(cmp, trace_options[i], len) == 0) {
2400 if (neg)
2401 trace_flags &= ~(1 << i);
2402 else
2403 trace_flags |= (1 << i);
2404 break;
2405 }
2406 }
442e544c
IM
2407 /*
2408 * If no option could be set, return an error:
2409 */
2410 if (!trace_options[i])
2411 return -EINVAL;
bc0c38d1
SR
2412
2413 filp->f_pos += cnt;
2414
2415 return cnt;
2416}
2417
2418static struct file_operations tracing_iter_fops = {
c7078de1
IM
2419 .open = tracing_open_generic,
2420 .read = tracing_iter_ctrl_read,
2421 .write = tracing_iter_ctrl_write,
bc0c38d1
SR
2422};
2423
7bd2f24c
IM
2424static const char readme_msg[] =
2425 "tracing mini-HOWTO:\n\n"
2426 "# mkdir /debug\n"
2427 "# mount -t debugfs nodev /debug\n\n"
2428 "# cat /debug/tracing/available_tracers\n"
2429 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2430 "# cat /debug/tracing/current_tracer\n"
2431 "none\n"
2432 "# echo sched_switch > /debug/tracing/current_tracer\n"
2433 "# cat /debug/tracing/current_tracer\n"
2434 "sched_switch\n"
2435 "# cat /debug/tracing/iter_ctrl\n"
2436 "noprint-parent nosym-offset nosym-addr noverbose\n"
2437 "# echo print-parent > /debug/tracing/iter_ctrl\n"
2438 "# echo 1 > /debug/tracing/tracing_enabled\n"
2439 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2440 "echo 0 > /debug/tracing/tracing_enabled\n"
2441;
2442
2443static ssize_t
2444tracing_readme_read(struct file *filp, char __user *ubuf,
2445 size_t cnt, loff_t *ppos)
2446{
2447 return simple_read_from_buffer(ubuf, cnt, ppos,
2448 readme_msg, strlen(readme_msg));
2449}
2450
2451static struct file_operations tracing_readme_fops = {
c7078de1
IM
2452 .open = tracing_open_generic,
2453 .read = tracing_readme_read,
7bd2f24c
IM
2454};
2455
bc0c38d1
SR
2456static ssize_t
2457tracing_ctrl_read(struct file *filp, char __user *ubuf,
2458 size_t cnt, loff_t *ppos)
2459{
bc0c38d1
SR
2460 char buf[64];
2461 int r;
2462
9036990d 2463 r = sprintf(buf, "%u\n", tracer_enabled);
4e3c3333 2464 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2465}
2466
2467static ssize_t
2468tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2469 size_t cnt, loff_t *ppos)
2470{
2471 struct trace_array *tr = filp->private_data;
bc0c38d1 2472 char buf[64];
c6caeeb1
SR
2473 long val;
2474 int ret;
bc0c38d1 2475
cffae437
SR
2476 if (cnt >= sizeof(buf))
2477 return -EINVAL;
bc0c38d1
SR
2478
2479 if (copy_from_user(&buf, ubuf, cnt))
2480 return -EFAULT;
2481
2482 buf[cnt] = 0;
2483
c6caeeb1
SR
2484 ret = strict_strtoul(buf, 10, &val);
2485 if (ret < 0)
2486 return ret;
bc0c38d1
SR
2487
2488 val = !!val;
2489
2490 mutex_lock(&trace_types_lock);
9036990d
SR
2491 if (tracer_enabled ^ val) {
2492 if (val) {
bc0c38d1 2493 tracer_enabled = 1;
9036990d
SR
2494 if (current_trace->start)
2495 current_trace->start(tr);
2496 tracing_start();
2497 } else {
bc0c38d1 2498 tracer_enabled = 0;
9036990d
SR
2499 tracing_stop();
2500 if (current_trace->stop)
2501 current_trace->stop(tr);
2502 }
bc0c38d1
SR
2503 }
2504 mutex_unlock(&trace_types_lock);
2505
2506 filp->f_pos += cnt;
2507
2508 return cnt;
2509}
2510
2511static ssize_t
2512tracing_set_trace_read(struct file *filp, char __user *ubuf,
2513 size_t cnt, loff_t *ppos)
2514{
2515 char buf[max_tracer_type_len+2];
2516 int r;
2517
2518 mutex_lock(&trace_types_lock);
2519 if (current_trace)
2520 r = sprintf(buf, "%s\n", current_trace->name);
2521 else
2522 r = sprintf(buf, "\n");
2523 mutex_unlock(&trace_types_lock);
2524
4bf39a94 2525 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2526}
2527
d9e54076 2528static int tracing_set_tracer(char *buf)
bc0c38d1
SR
2529{
2530 struct trace_array *tr = &global_trace;
2531 struct tracer *t;
d9e54076 2532 int ret = 0;
bc0c38d1
SR
2533
2534 mutex_lock(&trace_types_lock);
2535 for (t = trace_types; t; t = t->next) {
2536 if (strcmp(t->name, buf) == 0)
2537 break;
2538 }
c2931e05
FW
2539 if (!t) {
2540 ret = -EINVAL;
2541 goto out;
2542 }
2543 if (t == current_trace)
bc0c38d1
SR
2544 goto out;
2545
2546 if (current_trace && current_trace->reset)
2547 current_trace->reset(tr);
2548
2549 current_trace = t;
2550 if (t->init)
2551 t->init(tr);
2552
2553 out:
2554 mutex_unlock(&trace_types_lock);
2555
d9e54076
PZ
2556 return ret;
2557}
2558
2559static ssize_t
2560tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2561 size_t cnt, loff_t *ppos)
2562{
2563 char buf[max_tracer_type_len+1];
2564 int i;
2565 size_t ret;
2566
2567 if (cnt > max_tracer_type_len)
2568 cnt = max_tracer_type_len;
2569
2570 if (copy_from_user(&buf, ubuf, cnt))
2571 return -EFAULT;
2572
2573 buf[cnt] = 0;
2574
2575 /* strip ending whitespace. */
2576 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2577 buf[i] = 0;
2578
2579 ret = tracing_set_tracer(buf);
2580 if (!ret)
2581 ret = cnt;
2582
60063a66
SR
2583 if (ret > 0)
2584 filp->f_pos += ret;
bc0c38d1 2585
c2931e05 2586 return ret;
bc0c38d1
SR
2587}
2588
2589static ssize_t
2590tracing_max_lat_read(struct file *filp, char __user *ubuf,
2591 size_t cnt, loff_t *ppos)
2592{
2593 unsigned long *ptr = filp->private_data;
2594 char buf[64];
2595 int r;
2596
cffae437 2597 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 2598 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
2599 if (r > sizeof(buf))
2600 r = sizeof(buf);
4bf39a94 2601 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2602}
2603
2604static ssize_t
2605tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2606 size_t cnt, loff_t *ppos)
2607{
2608 long *ptr = filp->private_data;
bc0c38d1 2609 char buf[64];
c6caeeb1
SR
2610 long val;
2611 int ret;
bc0c38d1 2612
cffae437
SR
2613 if (cnt >= sizeof(buf))
2614 return -EINVAL;
bc0c38d1
SR
2615
2616 if (copy_from_user(&buf, ubuf, cnt))
2617 return -EFAULT;
2618
2619 buf[cnt] = 0;
2620
c6caeeb1
SR
2621 ret = strict_strtoul(buf, 10, &val);
2622 if (ret < 0)
2623 return ret;
bc0c38d1
SR
2624
2625 *ptr = val * 1000;
2626
2627 return cnt;
2628}
2629
b3806b43
SR
2630static atomic_t tracing_reader;
2631
2632static int tracing_open_pipe(struct inode *inode, struct file *filp)
2633{
2634 struct trace_iterator *iter;
2635
2636 if (tracing_disabled)
2637 return -ENODEV;
2638
2639 /* We only allow for reader of the pipe */
2640 if (atomic_inc_return(&tracing_reader) != 1) {
2641 atomic_dec(&tracing_reader);
2642 return -EBUSY;
2643 }
2644
2645 /* create a buffer to store the information to pass to userspace */
2646 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2647 if (!iter)
2648 return -ENOMEM;
2649
107bad8b 2650 mutex_lock(&trace_types_lock);
a309720c
SR
2651
2652 /* trace pipe does not show start of buffer */
2653 cpus_setall(iter->started);
2654
b3806b43 2655 iter->tr = &global_trace;
72829bc3 2656 iter->trace = current_trace;
b3806b43
SR
2657 filp->private_data = iter;
2658
107bad8b
SR
2659 if (iter->trace->pipe_open)
2660 iter->trace->pipe_open(iter);
2661 mutex_unlock(&trace_types_lock);
2662
b3806b43
SR
2663 return 0;
2664}
2665
2666static int tracing_release_pipe(struct inode *inode, struct file *file)
2667{
2668 struct trace_iterator *iter = file->private_data;
2669
2670 kfree(iter);
2671 atomic_dec(&tracing_reader);
2672
2673 return 0;
2674}
2675
2a2cc8f7
SSP
2676static unsigned int
2677tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2678{
2679 struct trace_iterator *iter = filp->private_data;
2680
2681 if (trace_flags & TRACE_ITER_BLOCK) {
2682 /*
2683 * Always select as readable when in blocking mode
2684 */
2685 return POLLIN | POLLRDNORM;
afc2abc0 2686 } else {
2a2cc8f7
SSP
2687 if (!trace_empty(iter))
2688 return POLLIN | POLLRDNORM;
2689 poll_wait(filp, &trace_wait, poll_table);
2690 if (!trace_empty(iter))
2691 return POLLIN | POLLRDNORM;
2692
2693 return 0;
2694 }
2695}
2696
b3806b43
SR
2697/*
2698 * Consumer reader.
2699 */
2700static ssize_t
2701tracing_read_pipe(struct file *filp, char __user *ubuf,
2702 size_t cnt, loff_t *ppos)
2703{
2704 struct trace_iterator *iter = filp->private_data;
6c6c2796 2705 ssize_t sret;
b3806b43
SR
2706
2707 /* return any leftover data */
6c6c2796
PP
2708 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2709 if (sret != -EBUSY)
2710 return sret;
b3806b43 2711
6c6c2796 2712 trace_seq_reset(&iter->seq);
b3806b43 2713
107bad8b
SR
2714 mutex_lock(&trace_types_lock);
2715 if (iter->trace->read) {
6c6c2796
PP
2716 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2717 if (sret)
107bad8b 2718 goto out;
107bad8b
SR
2719 }
2720
9ff4b974
PP
2721waitagain:
2722 sret = 0;
b3806b43 2723 while (trace_empty(iter)) {
2dc8f095 2724
107bad8b 2725 if ((filp->f_flags & O_NONBLOCK)) {
6c6c2796 2726 sret = -EAGAIN;
107bad8b
SR
2727 goto out;
2728 }
2dc8f095 2729
b3806b43
SR
2730 /*
2731 * This is a make-shift waitqueue. The reason we don't use
2732 * an actual wait queue is because:
2733 * 1) we only ever have one waiter
2734 * 2) the tracing, traces all functions, we don't want
2735 * the overhead of calling wake_up and friends
2736 * (and tracing them too)
2737 * Anyway, this is really very primitive wakeup.
2738 */
2739 set_current_state(TASK_INTERRUPTIBLE);
2740 iter->tr->waiter = current;
2741
107bad8b
SR
2742 mutex_unlock(&trace_types_lock);
2743
9fe068e9
IM
2744 /* sleep for 100 msecs, and try again. */
2745 schedule_timeout(HZ/10);
b3806b43 2746
107bad8b
SR
2747 mutex_lock(&trace_types_lock);
2748
b3806b43
SR
2749 iter->tr->waiter = NULL;
2750
107bad8b 2751 if (signal_pending(current)) {
6c6c2796 2752 sret = -EINTR;
107bad8b
SR
2753 goto out;
2754 }
b3806b43 2755
84527997 2756 if (iter->trace != current_trace)
107bad8b 2757 goto out;
84527997 2758
b3806b43
SR
2759 /*
2760 * We block until we read something and tracing is disabled.
2761 * We still block if tracing is disabled, but we have never
2762 * read anything. This allows a user to cat this file, and
2763 * then enable tracing. But after we have read something,
2764 * we give an EOF when tracing is again disabled.
2765 *
2766 * iter->pos will be 0 if we haven't read anything.
2767 */
2768 if (!tracer_enabled && iter->pos)
2769 break;
2770
2771 continue;
2772 }
2773
2774 /* stop when tracing is finished */
2775 if (trace_empty(iter))
107bad8b 2776 goto out;
b3806b43
SR
2777
2778 if (cnt >= PAGE_SIZE)
2779 cnt = PAGE_SIZE - 1;
2780
53d0aa77 2781 /* reset all but tr, trace, and overruns */
53d0aa77
SR
2782 memset(&iter->seq, 0,
2783 sizeof(struct trace_iterator) -
2784 offsetof(struct trace_iterator, seq));
4823ed7e 2785 iter->pos = -1;
b3806b43 2786
088b1e42 2787 while (find_next_entry_inc(iter) != NULL) {
2c4f035f 2788 enum print_line_t ret;
088b1e42
SR
2789 int len = iter->seq.len;
2790
f9896bf3 2791 ret = print_trace_line(iter);
2c4f035f 2792 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
2793 /* don't print partial lines */
2794 iter->seq.len = len;
b3806b43 2795 break;
088b1e42 2796 }
b3806b43
SR
2797
2798 trace_consume(iter);
2799
2800 if (iter->seq.len >= cnt)
2801 break;
b3806b43
SR
2802 }
2803
b3806b43 2804 /* Now copy what we have to the user */
6c6c2796
PP
2805 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2806 if (iter->seq.readpos >= iter->seq.len)
b3806b43 2807 trace_seq_reset(&iter->seq);
9ff4b974
PP
2808
2809 /*
2810 * If there was nothing to send to user, inspite of consuming trace
2811 * entries, go back to wait for more entries.
2812 */
6c6c2796 2813 if (sret == -EBUSY)
9ff4b974 2814 goto waitagain;
b3806b43 2815
107bad8b
SR
2816out:
2817 mutex_unlock(&trace_types_lock);
2818
6c6c2796 2819 return sret;
b3806b43
SR
2820}
2821
a98a3c3f
SR
2822static ssize_t
2823tracing_entries_read(struct file *filp, char __user *ubuf,
2824 size_t cnt, loff_t *ppos)
2825{
2826 struct trace_array *tr = filp->private_data;
2827 char buf[64];
2828 int r;
2829
2830 r = sprintf(buf, "%lu\n", tr->entries);
2831 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2832}
2833
2834static ssize_t
2835tracing_entries_write(struct file *filp, const char __user *ubuf,
2836 size_t cnt, loff_t *ppos)
2837{
2838 unsigned long val;
2839 char buf[64];
bf5e6519 2840 int ret, cpu;
a98a3c3f 2841
cffae437
SR
2842 if (cnt >= sizeof(buf))
2843 return -EINVAL;
a98a3c3f
SR
2844
2845 if (copy_from_user(&buf, ubuf, cnt))
2846 return -EFAULT;
2847
2848 buf[cnt] = 0;
2849
c6caeeb1
SR
2850 ret = strict_strtoul(buf, 10, &val);
2851 if (ret < 0)
2852 return ret;
a98a3c3f
SR
2853
2854 /* must have at least 1 entry */
2855 if (!val)
2856 return -EINVAL;
2857
2858 mutex_lock(&trace_types_lock);
2859
c76f0694 2860 tracing_stop();
a98a3c3f 2861
bf5e6519
SR
2862 /* disable all cpu buffers */
2863 for_each_tracing_cpu(cpu) {
2864 if (global_trace.data[cpu])
2865 atomic_inc(&global_trace.data[cpu]->disabled);
2866 if (max_tr.data[cpu])
2867 atomic_inc(&max_tr.data[cpu]->disabled);
2868 }
2869
3928a8a2
SR
2870 if (val != global_trace.entries) {
2871 ret = ring_buffer_resize(global_trace.buffer, val);
2872 if (ret < 0) {
2873 cnt = ret;
3eefae99
SR
2874 goto out;
2875 }
2876
3928a8a2
SR
2877 ret = ring_buffer_resize(max_tr.buffer, val);
2878 if (ret < 0) {
2879 int r;
2880 cnt = ret;
2881 r = ring_buffer_resize(global_trace.buffer,
2882 global_trace.entries);
2883 if (r < 0) {
2884 /* AARGH! We are left with different
2885 * size max buffer!!!! */
2886 WARN_ON(1);
2887 tracing_disabled = 1;
a98a3c3f 2888 }
3928a8a2 2889 goto out;
a98a3c3f 2890 }
3eefae99 2891
3928a8a2 2892 global_trace.entries = val;
a98a3c3f
SR
2893 }
2894
2895 filp->f_pos += cnt;
2896
19384c03
SR
2897 /* If check pages failed, return ENOMEM */
2898 if (tracing_disabled)
2899 cnt = -ENOMEM;
a98a3c3f 2900 out:
bf5e6519
SR
2901 for_each_tracing_cpu(cpu) {
2902 if (global_trace.data[cpu])
2903 atomic_dec(&global_trace.data[cpu]->disabled);
2904 if (max_tr.data[cpu])
2905 atomic_dec(&max_tr.data[cpu]->disabled);
2906 }
2907
c76f0694 2908 tracing_start();
a98a3c3f
SR
2909 max_tr.entries = global_trace.entries;
2910 mutex_unlock(&trace_types_lock);
2911
2912 return cnt;
2913}
2914
5bf9a1ee
PP
2915static int mark_printk(const char *fmt, ...)
2916{
2917 int ret;
2918 va_list args;
2919 va_start(args, fmt);
2920 ret = trace_vprintk(0, fmt, args);
2921 va_end(args);
2922 return ret;
2923}
2924
2925static ssize_t
2926tracing_mark_write(struct file *filp, const char __user *ubuf,
2927 size_t cnt, loff_t *fpos)
2928{
2929 char *buf;
2930 char *end;
5bf9a1ee 2931
c76f0694 2932 if (tracing_disabled)
5bf9a1ee
PP
2933 return -EINVAL;
2934
2935 if (cnt > TRACE_BUF_SIZE)
2936 cnt = TRACE_BUF_SIZE;
2937
2938 buf = kmalloc(cnt + 1, GFP_KERNEL);
2939 if (buf == NULL)
2940 return -ENOMEM;
2941
2942 if (copy_from_user(buf, ubuf, cnt)) {
2943 kfree(buf);
2944 return -EFAULT;
2945 }
2946
2947 /* Cut from the first nil or newline. */
2948 buf[cnt] = '\0';
2949 end = strchr(buf, '\n');
2950 if (end)
2951 *end = '\0';
2952
2953 cnt = mark_printk("%s\n", buf);
2954 kfree(buf);
2955 *fpos += cnt;
2956
2957 return cnt;
2958}
2959
bc0c38d1 2960static struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
2961 .open = tracing_open_generic,
2962 .read = tracing_max_lat_read,
2963 .write = tracing_max_lat_write,
bc0c38d1
SR
2964};
2965
2966static struct file_operations tracing_ctrl_fops = {
4bf39a94
IM
2967 .open = tracing_open_generic,
2968 .read = tracing_ctrl_read,
2969 .write = tracing_ctrl_write,
bc0c38d1
SR
2970};
2971
2972static struct file_operations set_tracer_fops = {
4bf39a94
IM
2973 .open = tracing_open_generic,
2974 .read = tracing_set_trace_read,
2975 .write = tracing_set_trace_write,
bc0c38d1
SR
2976};
2977
b3806b43 2978static struct file_operations tracing_pipe_fops = {
4bf39a94 2979 .open = tracing_open_pipe,
2a2cc8f7 2980 .poll = tracing_poll_pipe,
4bf39a94
IM
2981 .read = tracing_read_pipe,
2982 .release = tracing_release_pipe,
b3806b43
SR
2983};
2984
a98a3c3f
SR
2985static struct file_operations tracing_entries_fops = {
2986 .open = tracing_open_generic,
2987 .read = tracing_entries_read,
2988 .write = tracing_entries_write,
2989};
2990
5bf9a1ee 2991static struct file_operations tracing_mark_fops = {
43a15386 2992 .open = tracing_open_generic,
5bf9a1ee
PP
2993 .write = tracing_mark_write,
2994};
2995
bc0c38d1
SR
2996#ifdef CONFIG_DYNAMIC_FTRACE
2997
b807c3d0
SR
2998int __weak ftrace_arch_read_dyn_info(char *buf, int size)
2999{
3000 return 0;
3001}
3002
bc0c38d1 3003static ssize_t
b807c3d0 3004tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
3005 size_t cnt, loff_t *ppos)
3006{
a26a2a27
SR
3007 static char ftrace_dyn_info_buffer[1024];
3008 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 3009 unsigned long *p = filp->private_data;
b807c3d0 3010 char *buf = ftrace_dyn_info_buffer;
a26a2a27 3011 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
3012 int r;
3013
b807c3d0
SR
3014 mutex_lock(&dyn_info_mutex);
3015 r = sprintf(buf, "%ld ", *p);
4bf39a94 3016
a26a2a27 3017 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
3018 buf[r++] = '\n';
3019
3020 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3021
3022 mutex_unlock(&dyn_info_mutex);
3023
3024 return r;
bc0c38d1
SR
3025}
3026
b807c3d0 3027static struct file_operations tracing_dyn_info_fops = {
4bf39a94 3028 .open = tracing_open_generic,
b807c3d0 3029 .read = tracing_read_dyn_info,
bc0c38d1
SR
3030};
3031#endif
3032
3033static struct dentry *d_tracer;
3034
3035struct dentry *tracing_init_dentry(void)
3036{
3037 static int once;
3038
3039 if (d_tracer)
3040 return d_tracer;
3041
3042 d_tracer = debugfs_create_dir("tracing", NULL);
3043
3044 if (!d_tracer && !once) {
3045 once = 1;
3046 pr_warning("Could not create debugfs directory 'tracing'\n");
3047 return NULL;
3048 }
3049
3050 return d_tracer;
3051}
3052
60a11774
SR
3053#ifdef CONFIG_FTRACE_SELFTEST
3054/* Let selftest have access to static functions in this file */
3055#include "trace_selftest.c"
3056#endif
3057
b5ad384e 3058static __init int tracer_init_debugfs(void)
bc0c38d1
SR
3059{
3060 struct dentry *d_tracer;
3061 struct dentry *entry;
3062
3063 d_tracer = tracing_init_dentry();
3064
3065 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
3066 &global_trace, &tracing_ctrl_fops);
3067 if (!entry)
3068 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
3069
3070 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
3071 NULL, &tracing_iter_fops);
3072 if (!entry)
3073 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
3074
c7078de1
IM
3075 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
3076 NULL, &tracing_cpumask_fops);
3077 if (!entry)
3078 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
3079
bc0c38d1
SR
3080 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
3081 &global_trace, &tracing_lt_fops);
3082 if (!entry)
3083 pr_warning("Could not create debugfs 'latency_trace' entry\n");
3084
3085 entry = debugfs_create_file("trace", 0444, d_tracer,
3086 &global_trace, &tracing_fops);
3087 if (!entry)
3088 pr_warning("Could not create debugfs 'trace' entry\n");
3089
3090 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
3091 &global_trace, &show_traces_fops);
3092 if (!entry)
98a983aa 3093 pr_warning("Could not create debugfs 'available_tracers' entry\n");
bc0c38d1
SR
3094
3095 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
3096 &global_trace, &set_tracer_fops);
3097 if (!entry)
98a983aa 3098 pr_warning("Could not create debugfs 'current_tracer' entry\n");
bc0c38d1
SR
3099
3100 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
3101 &tracing_max_latency,
3102 &tracing_max_lat_fops);
3103 if (!entry)
3104 pr_warning("Could not create debugfs "
3105 "'tracing_max_latency' entry\n");
3106
3107 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
3108 &tracing_thresh, &tracing_max_lat_fops);
3109 if (!entry)
3110 pr_warning("Could not create debugfs "
98a983aa 3111 "'tracing_thresh' entry\n");
7bd2f24c
IM
3112 entry = debugfs_create_file("README", 0644, d_tracer,
3113 NULL, &tracing_readme_fops);
3114 if (!entry)
3115 pr_warning("Could not create debugfs 'README' entry\n");
3116
b3806b43
SR
3117 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
3118 NULL, &tracing_pipe_fops);
3119 if (!entry)
3120 pr_warning("Could not create debugfs "
98a983aa 3121 "'trace_pipe' entry\n");
bc0c38d1 3122
a98a3c3f
SR
3123 entry = debugfs_create_file("trace_entries", 0644, d_tracer,
3124 &global_trace, &tracing_entries_fops);
3125 if (!entry)
3126 pr_warning("Could not create debugfs "
98a983aa 3127 "'trace_entries' entry\n");
a98a3c3f 3128
5bf9a1ee
PP
3129 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
3130 NULL, &tracing_mark_fops);
3131 if (!entry)
3132 pr_warning("Could not create debugfs "
3133 "'trace_marker' entry\n");
3134
bc0c38d1
SR
3135#ifdef CONFIG_DYNAMIC_FTRACE
3136 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3137 &ftrace_update_tot_cnt,
b807c3d0 3138 &tracing_dyn_info_fops);
bc0c38d1
SR
3139 if (!entry)
3140 pr_warning("Could not create debugfs "
3141 "'dyn_ftrace_total_info' entry\n");
3142#endif
d618b3e6
IM
3143#ifdef CONFIG_SYSPROF_TRACER
3144 init_tracer_sysprof_debugfs(d_tracer);
3145#endif
b5ad384e 3146 return 0;
bc0c38d1
SR
3147}
3148
801fe400 3149int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
dd0e545f 3150{
dd0e545f
SR
3151 static DEFINE_SPINLOCK(trace_buf_lock);
3152 static char trace_buf[TRACE_BUF_SIZE];
f09ce573 3153
3928a8a2 3154 struct ring_buffer_event *event;
f09ce573 3155 struct trace_array *tr = &global_trace;
dd0e545f 3156 struct trace_array_cpu *data;
777e208d 3157 struct print_entry *entry;
3928a8a2 3158 unsigned long flags, irq_flags;
38697053 3159 int cpu, len = 0, size, pc;
dd0e545f 3160
c76f0694 3161 if (tracing_disabled)
dd0e545f
SR
3162 return 0;
3163
38697053
SR
3164 pc = preempt_count();
3165 preempt_disable_notrace();
dd0e545f
SR
3166 cpu = raw_smp_processor_id();
3167 data = tr->data[cpu];
dd0e545f 3168
3ea2e6d7 3169 if (unlikely(atomic_read(&data->disabled)))
dd0e545f
SR
3170 goto out;
3171
38697053 3172 spin_lock_irqsave(&trace_buf_lock, flags);
801fe400 3173 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
dd0e545f
SR
3174
3175 len = min(len, TRACE_BUF_SIZE-1);
3176 trace_buf[len] = 0;
3177
777e208d
SR
3178 size = sizeof(*entry) + len + 1;
3179 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3928a8a2
SR
3180 if (!event)
3181 goto out_unlock;
777e208d 3182 entry = ring_buffer_event_data(event);
38697053 3183 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
3184 entry->ent.type = TRACE_PRINT;
3185 entry->ip = ip;
dd0e545f 3186
777e208d
SR
3187 memcpy(&entry->buf, trace_buf, len);
3188 entry->buf[len] = 0;
3928a8a2 3189 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
dd0e545f 3190
3928a8a2 3191 out_unlock:
38697053 3192 spin_unlock_irqrestore(&trace_buf_lock, flags);
dd0e545f
SR
3193
3194 out:
38697053 3195 preempt_enable_notrace();
dd0e545f
SR
3196
3197 return len;
3198}
801fe400
PP
3199EXPORT_SYMBOL_GPL(trace_vprintk);
3200
3201int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3202{
3203 int ret;
3204 va_list ap;
3205
3206 if (!(trace_flags & TRACE_ITER_PRINTK))
3207 return 0;
3208
3209 va_start(ap, fmt);
3210 ret = trace_vprintk(ip, fmt, ap);
3211 va_end(ap);
3212 return ret;
3213}
dd0e545f
SR
3214EXPORT_SYMBOL_GPL(__ftrace_printk);
3215
3f5a54e3
SR
3216static int trace_panic_handler(struct notifier_block *this,
3217 unsigned long event, void *unused)
3218{
944ac425
SR
3219 if (ftrace_dump_on_oops)
3220 ftrace_dump();
3f5a54e3
SR
3221 return NOTIFY_OK;
3222}
3223
3224static struct notifier_block trace_panic_notifier = {
3225 .notifier_call = trace_panic_handler,
3226 .next = NULL,
3227 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3228};
3229
3230static int trace_die_handler(struct notifier_block *self,
3231 unsigned long val,
3232 void *data)
3233{
3234 switch (val) {
3235 case DIE_OOPS:
944ac425
SR
3236 if (ftrace_dump_on_oops)
3237 ftrace_dump();
3f5a54e3
SR
3238 break;
3239 default:
3240 break;
3241 }
3242 return NOTIFY_OK;
3243}
3244
3245static struct notifier_block trace_die_notifier = {
3246 .notifier_call = trace_die_handler,
3247 .priority = 200
3248};
3249
3250/*
3251 * printk is set to max of 1024, we really don't need it that big.
3252 * Nothing should be printing 1000 characters anyway.
3253 */
3254#define TRACE_MAX_PRINT 1000
3255
3256/*
3257 * Define here KERN_TRACE so that we have one place to modify
3258 * it if we decide to change what log level the ftrace dump
3259 * should be at.
3260 */
3261#define KERN_TRACE KERN_INFO
3262
3263static void
3264trace_printk_seq(struct trace_seq *s)
3265{
3266 /* Probably should print a warning here. */
3267 if (s->len >= 1000)
3268 s->len = 1000;
3269
3270 /* should be zero ended, but we are paranoid. */
3271 s->buffer[s->len] = 0;
3272
3273 printk(KERN_TRACE "%s", s->buffer);
3274
3275 trace_seq_reset(s);
3276}
3277
3f5a54e3
SR
3278void ftrace_dump(void)
3279{
3280 static DEFINE_SPINLOCK(ftrace_dump_lock);
3281 /* use static because iter can be a bit big for the stack */
3282 static struct trace_iterator iter;
3f5a54e3
SR
3283 static cpumask_t mask;
3284 static int dump_ran;
d769041f
SR
3285 unsigned long flags;
3286 int cnt = 0, cpu;
3f5a54e3
SR
3287
3288 /* only one dump */
3289 spin_lock_irqsave(&ftrace_dump_lock, flags);
3290 if (dump_ran)
3291 goto out;
3292
3293 dump_ran = 1;
3294
3295 /* No turning back! */
81adbdc0 3296 ftrace_kill();
3f5a54e3 3297
d769041f
SR
3298 for_each_tracing_cpu(cpu) {
3299 atomic_inc(&global_trace.data[cpu]->disabled);
3300 }
3301
3f5a54e3
SR
3302 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3303
3304 iter.tr = &global_trace;
3305 iter.trace = current_trace;
3306
3307 /*
3308 * We need to stop all tracing on all CPUS to read the
3309 * the next buffer. This is a bit expensive, but is
3310 * not done often. We fill all what we can read,
3311 * and then release the locks again.
3312 */
3313
3314 cpus_clear(mask);
3315
3f5a54e3
SR
3316 while (!trace_empty(&iter)) {
3317
3318 if (!cnt)
3319 printk(KERN_TRACE "---------------------------------\n");
3320
3321 cnt++;
3322
3323 /* reset all but tr, trace, and overruns */
3324 memset(&iter.seq, 0,
3325 sizeof(struct trace_iterator) -
3326 offsetof(struct trace_iterator, seq));
3327 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3328 iter.pos = -1;
3329
3330 if (find_next_entry_inc(&iter) != NULL) {
3331 print_trace_line(&iter);
3332 trace_consume(&iter);
3333 }
3334
3335 trace_printk_seq(&iter.seq);
3336 }
3337
3338 if (!cnt)
3339 printk(KERN_TRACE " (ftrace buffer empty)\n");
3340 else
3341 printk(KERN_TRACE "---------------------------------\n");
3342
3f5a54e3
SR
3343 out:
3344 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3345}
3346
3928a8a2 3347__init static int tracer_alloc_buffers(void)
bc0c38d1 3348{
4c11d7ae 3349 struct trace_array_cpu *data;
4c11d7ae
SR
3350 int i;
3351
3928a8a2
SR
3352 /* TODO: make the number of buffers hot pluggable with CPUS */
3353 tracing_buffer_mask = cpu_possible_map;
4c11d7ae 3354
3928a8a2
SR
3355 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3356 TRACE_BUFFER_FLAGS);
3357 if (!global_trace.buffer) {
3358 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3359 WARN_ON(1);
3360 return 0;
4c11d7ae 3361 }
3928a8a2 3362 global_trace.entries = ring_buffer_size(global_trace.buffer);
4c11d7ae
SR
3363
3364#ifdef CONFIG_TRACER_MAX_TRACE
3928a8a2
SR
3365 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3366 TRACE_BUFFER_FLAGS);
3367 if (!max_tr.buffer) {
3368 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3369 WARN_ON(1);
3370 ring_buffer_free(global_trace.buffer);
3371 return 0;
4c11d7ae 3372 }
3928a8a2
SR
3373 max_tr.entries = ring_buffer_size(max_tr.buffer);
3374 WARN_ON(max_tr.entries != global_trace.entries);
a98a3c3f 3375#endif
ab46428c 3376
4c11d7ae 3377 /* Allocate the first page for all buffers */
ab46428c 3378 for_each_tracing_cpu(i) {
4c11d7ae 3379 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
bc0c38d1 3380 max_tr.data[i] = &per_cpu(max_data, i);
4c11d7ae 3381 }
bc0c38d1 3382
bc0c38d1
SR
3383 trace_init_cmdlines();
3384
43a15386 3385 register_tracer(&nop_trace);
b5ad384e
FW
3386#ifdef CONFIG_BOOT_TRACER
3387 register_tracer(&boot_tracer);
3388 current_trace = &boot_tracer;
3389 current_trace->init(&global_trace);
3390#else
43a15386 3391 current_trace = &nop_trace;
b5ad384e 3392#endif
bc0c38d1 3393
60a11774
SR
3394 /* All seems OK, enable tracing */
3395 tracing_disabled = 0;
3928a8a2 3396
3f5a54e3
SR
3397 atomic_notifier_chain_register(&panic_notifier_list,
3398 &trace_panic_notifier);
3399
3400 register_die_notifier(&trace_die_notifier);
3401
bc0c38d1 3402 return 0;
bc0c38d1 3403}
b5ad384e
FW
3404early_initcall(tracer_alloc_buffers);
3405fs_initcall(tracer_init_debugfs);