]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace.c
ftrace: add self-tests
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
4c11d7ae 18#include <linux/pagemap.h>
bc0c38d1
SR
19#include <linux/hardirq.h>
20#include <linux/linkage.h>
21#include <linux/uaccess.h>
22#include <linux/ftrace.h>
23#include <linux/module.h>
24#include <linux/percpu.h>
25#include <linux/ctype.h>
26#include <linux/init.h>
27#include <linux/gfp.h>
28#include <linux/fs.h>
29
30#include "trace.h"
31
32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33unsigned long __read_mostly tracing_thresh;
34
60a11774
SR
35static int tracing_disabled = 1;
36
bc0c38d1
SR
37static long notrace
38ns2usecs(cycle_t nsec)
39{
40 nsec += 500;
41 do_div(nsec, 1000);
42 return nsec;
43}
44
45static atomic_t tracer_counter;
46static struct trace_array global_trace;
47
48static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
49
50static struct trace_array max_tr;
51
52static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
53
54static int tracer_enabled;
4c11d7ae 55static unsigned long trace_nr_entries = 16384UL;
bc0c38d1
SR
56
57static struct tracer *trace_types __read_mostly;
58static struct tracer *current_trace __read_mostly;
59static int max_tracer_type_len;
60
61static DEFINE_MUTEX(trace_types_lock);
62
4c11d7ae
SR
63#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
64
bc0c38d1
SR
65static int __init set_nr_entries(char *str)
66{
67 if (!str)
68 return 0;
69 trace_nr_entries = simple_strtoul(str, &str, 0);
70 return 1;
71}
72__setup("trace_entries=", set_nr_entries);
73
57f50be1
SR
74unsigned long nsecs_to_usecs(unsigned long nsecs)
75{
76 return nsecs / 1000;
77}
78
bc0c38d1
SR
79enum trace_type {
80 __TRACE_FIRST_TYPE = 0,
81
82 TRACE_FN,
83 TRACE_CTX,
84
85 __TRACE_LAST_TYPE
86};
87
88enum trace_flag_type {
89 TRACE_FLAG_IRQS_OFF = 0x01,
90 TRACE_FLAG_NEED_RESCHED = 0x02,
91 TRACE_FLAG_HARDIRQ = 0x04,
92 TRACE_FLAG_SOFTIRQ = 0x08,
93};
94
95enum trace_iterator_flags {
96 TRACE_ITER_PRINT_PARENT = 0x01,
97 TRACE_ITER_SYM_OFFSET = 0x02,
98 TRACE_ITER_SYM_ADDR = 0x04,
99 TRACE_ITER_VERBOSE = 0x08,
100};
101
102#define TRACE_ITER_SYM_MASK \
103 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
104
105/* These must match the bit postions above */
106static const char *trace_options[] = {
107 "print-parent",
108 "sym-offset",
109 "sym-addr",
110 "verbose",
111 NULL
112};
113
114static unsigned trace_flags;
115
4c11d7ae 116static DEFINE_SPINLOCK(ftrace_max_lock);
bc0c38d1
SR
117
118/*
119 * Copy the new maximum trace into the separate maximum-trace
120 * structure. (this way the maximum trace is permanently saved,
121 * for later retrieval via /debugfs/tracing/latency_trace)
122 */
123static void notrace
124__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
125{
126 struct trace_array_cpu *data = tr->data[cpu];
127
128 max_tr.cpu = cpu;
129 max_tr.time_start = data->preempt_timestamp;
130
131 data = max_tr.data[cpu];
132 data->saved_latency = tracing_max_latency;
133
134 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
135 data->pid = tsk->pid;
136 data->uid = tsk->uid;
137 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
138 data->policy = tsk->policy;
139 data->rt_priority = tsk->rt_priority;
140
141 /* record this tasks comm */
142 tracing_record_cmdline(current);
143}
144
145notrace void
146update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
147{
148 struct trace_array_cpu *data;
149 void *save_trace;
4c11d7ae 150 struct list_head save_pages;
bc0c38d1
SR
151 int i;
152
4c11d7ae
SR
153 WARN_ON_ONCE(!irqs_disabled());
154 spin_lock(&ftrace_max_lock);
bc0c38d1
SR
155 /* clear out all the previous traces */
156 for_each_possible_cpu(i) {
157 data = tr->data[i];
158 save_trace = max_tr.data[i]->trace;
4c11d7ae 159 save_pages = max_tr.data[i]->trace_pages;
bc0c38d1
SR
160 memcpy(max_tr.data[i], data, sizeof(*data));
161 data->trace = save_trace;
4c11d7ae 162 data->trace_pages = save_pages;
89b2f978 163 tracing_reset(data);
bc0c38d1
SR
164 }
165
166 __update_max_tr(tr, tsk, cpu);
4c11d7ae 167 spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
168}
169
170/**
171 * update_max_tr_single - only copy one trace over, and reset the rest
172 * @tr - tracer
173 * @tsk - task with the latency
174 * @cpu - the cpu of the buffer to copy.
175 */
176notrace void
177update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
178{
179 struct trace_array_cpu *data = tr->data[cpu];
180 void *save_trace;
4c11d7ae 181 struct list_head save_pages;
bc0c38d1
SR
182 int i;
183
4c11d7ae
SR
184 WARN_ON_ONCE(!irqs_disabled());
185 spin_lock(&ftrace_max_lock);
bc0c38d1
SR
186 for_each_possible_cpu(i)
187 tracing_reset(max_tr.data[i]);
188
189 save_trace = max_tr.data[cpu]->trace;
4c11d7ae 190 save_pages = max_tr.data[cpu]->trace_pages;
bc0c38d1
SR
191 memcpy(max_tr.data[cpu], data, sizeof(*data));
192 data->trace = save_trace;
4c11d7ae 193 data->trace_pages = save_pages;
89b2f978 194 tracing_reset(data);
bc0c38d1
SR
195
196 __update_max_tr(tr, tsk, cpu);
4c11d7ae 197 spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
198}
199
200int register_tracer(struct tracer *type)
201{
202 struct tracer *t;
203 int len;
204 int ret = 0;
205
206 if (!type->name) {
207 pr_info("Tracer must have a name\n");
208 return -1;
209 }
210
211 mutex_lock(&trace_types_lock);
212 for (t = trace_types; t; t = t->next) {
213 if (strcmp(type->name, t->name) == 0) {
214 /* already found */
215 pr_info("Trace %s already registered\n",
216 type->name);
217 ret = -1;
218 goto out;
219 }
220 }
221
60a11774
SR
222#ifdef CONFIG_FTRACE_STARTUP_TEST
223 if (type->selftest) {
224 struct tracer *saved_tracer = current_trace;
225 struct trace_array_cpu *data;
226 struct trace_array *tr = &global_trace;
227 int saved_ctrl = tr->ctrl;
228 int i;
229 /*
230 * Run a selftest on this tracer.
231 * Here we reset the trace buffer, and set the current
232 * tracer to be this tracer. The tracer can then run some
233 * internal tracing to verify that everything is in order.
234 * If we fail, we do not register this tracer.
235 */
236 for_each_possible_cpu(i) {
237 if (!data->trace)
238 continue;
239 data = tr->data[i];
240 tracing_reset(data);
241 }
242 current_trace = type;
243 tr->ctrl = 0;
244 /* the test is responsible for initializing and enabling */
245 pr_info("Testing tracer %s: ", type->name);
246 ret = type->selftest(type, tr);
247 /* the test is responsible for resetting too */
248 current_trace = saved_tracer;
249 tr->ctrl = saved_ctrl;
250 if (ret) {
251 printk(KERN_CONT "FAILED!\n");
252 goto out;
253 }
254 printk(KERN_CONT "PASSED\n");
255 }
256#endif
257
bc0c38d1
SR
258 type->next = trace_types;
259 trace_types = type;
260 len = strlen(type->name);
261 if (len > max_tracer_type_len)
262 max_tracer_type_len = len;
60a11774 263
bc0c38d1
SR
264 out:
265 mutex_unlock(&trace_types_lock);
266
267 return ret;
268}
269
270void unregister_tracer(struct tracer *type)
271{
272 struct tracer **t;
273 int len;
274
275 mutex_lock(&trace_types_lock);
276 for (t = &trace_types; *t; t = &(*t)->next) {
277 if (*t == type)
278 goto found;
279 }
280 pr_info("Trace %s not registered\n", type->name);
281 goto out;
282
283 found:
284 *t = (*t)->next;
285 if (strlen(type->name) != max_tracer_type_len)
286 goto out;
287
288 max_tracer_type_len = 0;
289 for (t = &trace_types; *t; t = &(*t)->next) {
290 len = strlen((*t)->name);
291 if (len > max_tracer_type_len)
292 max_tracer_type_len = len;
293 }
294 out:
295 mutex_unlock(&trace_types_lock);
296}
297
298void notrace tracing_reset(struct trace_array_cpu *data)
299{
300 data->trace_idx = 0;
4c11d7ae
SR
301 data->trace_current = data->trace;
302 data->trace_current_idx = 0;
bc0c38d1
SR
303}
304
305#ifdef CONFIG_FTRACE
306static void notrace
307function_trace_call(unsigned long ip, unsigned long parent_ip)
308{
309 struct trace_array *tr = &global_trace;
310 struct trace_array_cpu *data;
311 unsigned long flags;
312 long disabled;
313 int cpu;
314
315 if (unlikely(!tracer_enabled))
316 return;
317
18cef379 318 local_irq_save(flags);
bc0c38d1
SR
319 cpu = raw_smp_processor_id();
320 data = tr->data[cpu];
321 disabled = atomic_inc_return(&data->disabled);
322
323 if (likely(disabled == 1))
324 ftrace(tr, data, ip, parent_ip, flags);
325
326 atomic_dec(&data->disabled);
18cef379 327 local_irq_restore(flags);
bc0c38d1
SR
328}
329
330static struct ftrace_ops trace_ops __read_mostly =
331{
332 .func = function_trace_call,
333};
334#endif
335
336notrace void tracing_start_function_trace(void)
337{
338 register_ftrace_function(&trace_ops);
339}
340
341notrace void tracing_stop_function_trace(void)
342{
343 unregister_ftrace_function(&trace_ops);
344}
345
346#define SAVED_CMDLINES 128
347static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
348static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
349static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
350static int cmdline_idx;
351static DEFINE_SPINLOCK(trace_cmdline_lock);
352atomic_t trace_record_cmdline_disabled;
353
354static void trace_init_cmdlines(void)
355{
356 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
357 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
358 cmdline_idx = 0;
359}
360
361notrace void trace_stop_cmdline_recording(void);
362
363static void notrace trace_save_cmdline(struct task_struct *tsk)
364{
365 unsigned map;
366 unsigned idx;
367
368 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
369 return;
370
371 /*
372 * It's not the end of the world if we don't get
373 * the lock, but we also don't want to spin
374 * nor do we want to disable interrupts,
375 * so if we miss here, then better luck next time.
376 */
377 if (!spin_trylock(&trace_cmdline_lock))
378 return;
379
380 idx = map_pid_to_cmdline[tsk->pid];
381 if (idx >= SAVED_CMDLINES) {
382 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
383
384 map = map_cmdline_to_pid[idx];
385 if (map <= PID_MAX_DEFAULT)
386 map_pid_to_cmdline[map] = (unsigned)-1;
387
388 map_pid_to_cmdline[tsk->pid] = idx;
389
390 cmdline_idx = idx;
391 }
392
393 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
394
395 spin_unlock(&trace_cmdline_lock);
396}
397
398static notrace char *trace_find_cmdline(int pid)
399{
400 char *cmdline = "<...>";
401 unsigned map;
402
403 if (!pid)
404 return "<idle>";
405
406 if (pid > PID_MAX_DEFAULT)
407 goto out;
408
409 map = map_pid_to_cmdline[pid];
410 if (map >= SAVED_CMDLINES)
411 goto out;
412
413 cmdline = saved_cmdlines[map];
414
415 out:
416 return cmdline;
417}
418
419notrace void tracing_record_cmdline(struct task_struct *tsk)
420{
421 if (atomic_read(&trace_record_cmdline_disabled))
422 return;
423
424 trace_save_cmdline(tsk);
425}
426
427static inline notrace struct trace_entry *
428tracing_get_trace_entry(struct trace_array *tr,
429 struct trace_array_cpu *data)
430{
431 unsigned long idx, idx_next;
432 struct trace_entry *entry;
4c11d7ae
SR
433 struct page *page;
434 struct list_head *next;
bc0c38d1 435
4c11d7ae
SR
436 data->trace_idx++;
437 idx = data->trace_current_idx;
bc0c38d1
SR
438 idx_next = idx + 1;
439
4c11d7ae
SR
440 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
441
442 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
443 page = virt_to_page(data->trace_current);
444 if (unlikely(&page->lru == data->trace_pages.prev))
445 next = data->trace_pages.next;
446 else
447 next = page->lru.next;
448 page = list_entry(next, struct page, lru);
449 data->trace_current = page_address(page);
bc0c38d1
SR
450 idx_next = 0;
451 }
452
4c11d7ae 453 data->trace_current_idx = idx_next;
bc0c38d1
SR
454
455 return entry;
456}
457
458static inline notrace void
459tracing_generic_entry_update(struct trace_entry *entry,
460 unsigned long flags)
461{
462 struct task_struct *tsk = current;
463 unsigned long pc;
464
465 pc = preempt_count();
466
467 entry->idx = atomic_inc_return(&tracer_counter);
468 entry->preempt_count = pc & 0xff;
469 entry->pid = tsk->pid;
470 entry->t = now(raw_smp_processor_id());
471 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
472 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
473 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
474 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
475}
476
477notrace void
478ftrace(struct trace_array *tr, struct trace_array_cpu *data,
479 unsigned long ip, unsigned long parent_ip,
480 unsigned long flags)
481{
482 struct trace_entry *entry;
483
484 entry = tracing_get_trace_entry(tr, data);
485 tracing_generic_entry_update(entry, flags);
486 entry->type = TRACE_FN;
487 entry->fn.ip = ip;
488 entry->fn.parent_ip = parent_ip;
489}
490
491notrace void
492tracing_sched_switch_trace(struct trace_array *tr,
493 struct trace_array_cpu *data,
494 struct task_struct *prev, struct task_struct *next,
495 unsigned long flags)
496{
497 struct trace_entry *entry;
498
499 entry = tracing_get_trace_entry(tr, data);
500 tracing_generic_entry_update(entry, flags);
501 entry->type = TRACE_CTX;
502 entry->ctx.prev_pid = prev->pid;
503 entry->ctx.prev_prio = prev->prio;
504 entry->ctx.prev_state = prev->state;
505 entry->ctx.next_pid = next->pid;
506 entry->ctx.next_prio = next->prio;
507}
508
509enum trace_file_type {
510 TRACE_FILE_LAT_FMT = 1,
511};
512
513static struct trace_entry *
4c11d7ae
SR
514trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
515 struct trace_iterator *iter, int cpu)
bc0c38d1 516{
4c11d7ae
SR
517 struct page *page;
518 struct trace_entry *array;
bc0c38d1 519
4c11d7ae
SR
520 if (iter->next_idx[cpu] >= tr->entries ||
521 iter->next_idx[cpu] >= data->trace_idx)
bc0c38d1
SR
522 return NULL;
523
4c11d7ae
SR
524 if (!iter->next_page[cpu]) {
525 /*
526 * Initialize. If the count of elements in
527 * this buffer is greater than the max entries
528 * we had an underrun. Which means we looped around.
529 * We can simply use the current pointer as our
530 * starting point.
531 */
532 if (data->trace_idx >= tr->entries) {
533 page = virt_to_page(data->trace_current);
534 iter->next_page[cpu] = &page->lru;
535 iter->next_page_idx[cpu] = data->trace_current_idx;
536 } else {
537 iter->next_page[cpu] = data->trace_pages.next;
538 iter->next_page_idx[cpu] = 0;
539 }
540 }
bc0c38d1 541
4c11d7ae
SR
542 page = list_entry(iter->next_page[cpu], struct page, lru);
543 array = page_address(page);
544
545 return &array[iter->next_page_idx[cpu]];
bc0c38d1
SR
546}
547
548static struct notrace trace_entry *
549find_next_entry(struct trace_iterator *iter, int *ent_cpu)
550{
551 struct trace_array *tr = iter->tr;
552 struct trace_entry *ent, *next = NULL;
553 int next_cpu = -1;
554 int cpu;
555
556 for_each_possible_cpu(cpu) {
557 if (!tr->data[cpu]->trace)
558 continue;
4c11d7ae 559 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
bc0c38d1
SR
560 if (ent &&
561 (!next || (long)(next->idx - ent->idx) > 0)) {
562 next = ent;
563 next_cpu = cpu;
564 }
565 }
566
567 if (ent_cpu)
568 *ent_cpu = next_cpu;
569
570 return next;
571}
572
573static void *find_next_entry_inc(struct trace_iterator *iter)
574{
575 struct trace_entry *next;
576 int next_cpu = -1;
577
578 next = find_next_entry(iter, &next_cpu);
579
580 if (next) {
bc0c38d1 581 iter->idx++;
4c11d7ae
SR
582 iter->next_idx[next_cpu]++;
583 iter->next_page_idx[next_cpu]++;
584 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
585 struct trace_array_cpu *data = iter->tr->data[next_cpu];
586
587 iter->next_page_idx[next_cpu] = 0;
588 iter->next_page[next_cpu] =
589 iter->next_page[next_cpu]->next;
590 if (iter->next_page[next_cpu] == &data->trace_pages)
591 iter->next_page[next_cpu] =
592 data->trace_pages.next;
593 }
bc0c38d1
SR
594 }
595 iter->ent = next;
596 iter->cpu = next_cpu;
597
598 return next ? iter : NULL;
599}
600
601static void notrace *
602s_next(struct seq_file *m, void *v, loff_t *pos)
603{
604 struct trace_iterator *iter = m->private;
605 void *ent;
606 void *last_ent = iter->ent;
607 int i = (int)*pos;
608
609 (*pos)++;
610
611 /* can't go backwards */
612 if (iter->idx > i)
613 return NULL;
614
615 if (iter->idx < 0)
616 ent = find_next_entry_inc(iter);
617 else
618 ent = iter;
619
620 while (ent && iter->idx < i)
621 ent = find_next_entry_inc(iter);
622
623 iter->pos = *pos;
624
625 if (last_ent && !ent)
626 seq_puts(m, "\n\nvim:ft=help\n");
627
628 return ent;
629}
630
631static void *s_start(struct seq_file *m, loff_t *pos)
632{
633 struct trace_iterator *iter = m->private;
634 void *p = NULL;
635 loff_t l = 0;
636 int i;
637
638 mutex_lock(&trace_types_lock);
639
640 if (!current_trace || current_trace != iter->trace)
641 return NULL;
642
643 atomic_inc(&trace_record_cmdline_disabled);
644
645 /* let the tracer grab locks here if needed */
646 if (current_trace->start)
647 current_trace->start(iter);
648
649 if (*pos != iter->pos) {
650 iter->ent = NULL;
651 iter->cpu = 0;
652 iter->idx = -1;
653
4c11d7ae 654 for_each_possible_cpu(i) {
bc0c38d1 655 iter->next_idx[i] = 0;
4c11d7ae
SR
656 iter->next_page[i] = NULL;
657 }
bc0c38d1
SR
658
659 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
660 ;
661
662 } else {
4c11d7ae 663 l = *pos - 1;
bc0c38d1
SR
664 p = s_next(m, p, &l);
665 }
666
667 return p;
668}
669
670static void s_stop(struct seq_file *m, void *p)
671{
672 struct trace_iterator *iter = m->private;
673
674 atomic_dec(&trace_record_cmdline_disabled);
675
676 /* let the tracer release locks here if needed */
677 if (current_trace && current_trace == iter->trace && iter->trace->stop)
678 iter->trace->stop(iter);
679
680 mutex_unlock(&trace_types_lock);
681}
682
683static void
684seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
685{
686#ifdef CONFIG_KALLSYMS
687 char str[KSYM_SYMBOL_LEN];
688
689 kallsyms_lookup(address, NULL, NULL, NULL, str);
690
691 seq_printf(m, fmt, str);
692#endif
693}
694
695static void
696seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
697{
698#ifdef CONFIG_KALLSYMS
699 char str[KSYM_SYMBOL_LEN];
700
701 sprint_symbol(str, address);
702 seq_printf(m, fmt, str);
703#endif
704}
705
706#ifndef CONFIG_64BIT
707# define IP_FMT "%08lx"
708#else
709# define IP_FMT "%016lx"
710#endif
711
712static void notrace
713seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
714{
715 if (!ip) {
716 seq_printf(m, "0");
717 return;
718 }
719
720 if (sym_flags & TRACE_ITER_SYM_OFFSET)
721 seq_print_sym_offset(m, "%s", ip);
722 else
723 seq_print_sym_short(m, "%s", ip);
724
725 if (sym_flags & TRACE_ITER_SYM_ADDR)
726 seq_printf(m, " <" IP_FMT ">", ip);
727}
728
729static void notrace print_lat_help_header(struct seq_file *m)
730{
731 seq_puts(m, "# _------=> CPU# \n");
732 seq_puts(m, "# / _-----=> irqs-off \n");
733 seq_puts(m, "# | / _----=> need-resched \n");
734 seq_puts(m, "# || / _---=> hardirq/softirq \n");
735 seq_puts(m, "# ||| / _--=> preempt-depth \n");
736 seq_puts(m, "# |||| / \n");
737 seq_puts(m, "# ||||| delay \n");
738 seq_puts(m, "# cmd pid ||||| time | caller \n");
739 seq_puts(m, "# \\ / ||||| \\ | / \n");
740}
741
742static void notrace print_func_help_header(struct seq_file *m)
743{
744 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
745 seq_puts(m, "# | | | | |\n");
746}
747
748
749static void notrace
750print_trace_header(struct seq_file *m, struct trace_iterator *iter)
751{
752 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
753 struct trace_array *tr = iter->tr;
754 struct trace_array_cpu *data = tr->data[tr->cpu];
755 struct tracer *type = current_trace;
4c11d7ae
SR
756 unsigned long total = 0;
757 unsigned long entries = 0;
bc0c38d1
SR
758 int cpu;
759 const char *name = "preemption";
760
761 if (type)
762 name = type->name;
763
764 for_each_possible_cpu(cpu) {
765 if (tr->data[cpu]->trace) {
4c11d7ae
SR
766 total += tr->data[cpu]->trace_idx;
767 if (tr->data[cpu]->trace_idx > tr->entries)
bc0c38d1 768 entries += tr->entries;
4c11d7ae 769 else
bc0c38d1
SR
770 entries += tr->data[cpu]->trace_idx;
771 }
772 }
773
774 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
775 name, UTS_RELEASE);
776 seq_puts(m, "-----------------------------------"
777 "---------------------------------\n");
778 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
779 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 780 nsecs_to_usecs(data->saved_latency),
bc0c38d1 781 entries,
4c11d7ae 782 total,
bc0c38d1
SR
783 tr->cpu,
784#if defined(CONFIG_PREEMPT_NONE)
785 "server",
786#elif defined(CONFIG_PREEMPT_VOLUNTARY)
787 "desktop",
788#elif defined(CONFIG_PREEMPT_DESKTOP)
789 "preempt",
790#else
791 "unknown",
792#endif
793 /* These are reserved for later use */
794 0, 0, 0, 0);
795#ifdef CONFIG_SMP
796 seq_printf(m, " #P:%d)\n", num_online_cpus());
797#else
798 seq_puts(m, ")\n");
799#endif
800 seq_puts(m, " -----------------\n");
801 seq_printf(m, " | task: %.16s-%d "
802 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
803 data->comm, data->pid, data->uid, data->nice,
804 data->policy, data->rt_priority);
805 seq_puts(m, " -----------------\n");
806
807 if (data->critical_start) {
808 seq_puts(m, " => started at: ");
809 seq_print_ip_sym(m, data->critical_start, sym_flags);
810 seq_puts(m, "\n => ended at: ");
811 seq_print_ip_sym(m, data->critical_end, sym_flags);
812 seq_puts(m, "\n");
813 }
814
815 seq_puts(m, "\n");
816}
817
bc0c38d1
SR
818static void notrace
819lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
820{
821 int hardirq, softirq;
822 char *comm;
823
824 comm = trace_find_cmdline(entry->pid);
825
826 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
827 seq_printf(m, "%d", cpu);
828 seq_printf(m, "%c%c",
829 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
830 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
831
832 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
833 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
834 if (hardirq && softirq)
835 seq_putc(m, 'H');
836 else {
837 if (hardirq)
838 seq_putc(m, 'h');
839 else {
840 if (softirq)
841 seq_putc(m, 's');
842 else
843 seq_putc(m, '.');
844 }
845 }
846
847 if (entry->preempt_count)
848 seq_printf(m, "%x", entry->preempt_count);
849 else
850 seq_puts(m, ".");
851}
852
853unsigned long preempt_mark_thresh = 100;
854
855static void notrace
856lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
857 unsigned long rel_usecs)
858{
859 seq_printf(m, " %4lldus", abs_usecs);
860 if (rel_usecs > preempt_mark_thresh)
861 seq_puts(m, "!: ");
862 else if (rel_usecs > 1)
863 seq_puts(m, "+: ");
864 else
865 seq_puts(m, " : ");
866}
867
868static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
869
870static void notrace
871print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
872 unsigned int trace_idx, int cpu)
873{
874 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
875 struct trace_entry *next_entry = find_next_entry(iter, NULL);
876 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
877 struct trace_entry *entry = iter->ent;
878 unsigned long abs_usecs;
879 unsigned long rel_usecs;
880 char *comm;
881 int S;
882
883 if (!next_entry)
884 next_entry = entry;
885 rel_usecs = ns2usecs(next_entry->t - entry->t);
886 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
887
888 if (verbose) {
889 comm = trace_find_cmdline(entry->pid);
890 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
891 " %ld.%03ldms (+%ld.%03ldms): ",
892 comm,
893 entry->pid, cpu, entry->flags,
894 entry->preempt_count, trace_idx,
895 ns2usecs(entry->t),
896 abs_usecs/1000,
897 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
898 } else {
899 lat_print_generic(m, entry, cpu);
900 lat_print_timestamp(m, abs_usecs, rel_usecs);
901 }
902 switch (entry->type) {
903 case TRACE_FN:
904 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
905 seq_puts(m, " (");
906 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
907 seq_puts(m, ")\n");
908 break;
909 case TRACE_CTX:
910 S = entry->ctx.prev_state < sizeof(state_to_char) ?
911 state_to_char[entry->ctx.prev_state] : 'X';
912 comm = trace_find_cmdline(entry->ctx.next_pid);
913 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
914 entry->ctx.prev_pid,
915 entry->ctx.prev_prio,
916 S,
917 entry->ctx.next_pid,
918 entry->ctx.next_prio,
919 comm);
920 break;
89b2f978
SR
921 default:
922 seq_printf(m, "Unknown type %d\n", entry->type);
bc0c38d1
SR
923 }
924}
925
926static void notrace
927print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
928{
929 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
930 struct trace_entry *entry = iter->ent;
931 unsigned long usec_rem;
932 unsigned long long t;
933 unsigned long secs;
934 char *comm;
935 int S;
936
937 comm = trace_find_cmdline(iter->ent->pid);
938
939 t = ns2usecs(entry->t);
940 usec_rem = do_div(t, 1000000ULL);
941 secs = (unsigned long)t;
942
943 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
944 seq_printf(m, "[%02d] ", iter->cpu);
945 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
946
947 switch (entry->type) {
948 case TRACE_FN:
949 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
950 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
951 entry->fn.parent_ip) {
952 seq_printf(m, " <-");
953 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
954 }
955 break;
956 case TRACE_CTX:
957 S = entry->ctx.prev_state < sizeof(state_to_char) ?
958 state_to_char[entry->ctx.prev_state] : 'X';
959 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
960 entry->ctx.prev_pid,
961 entry->ctx.prev_prio,
962 S,
963 entry->ctx.next_pid,
964 entry->ctx.next_prio);
965 break;
966 }
967 seq_printf(m, "\n");
968}
969
970static int trace_empty(struct trace_iterator *iter)
971{
972 struct trace_array_cpu *data;
973 int cpu;
974
975 for_each_possible_cpu(cpu) {
976 data = iter->tr->data[cpu];
977
978 if (data->trace &&
4c11d7ae 979 data->trace_idx)
bc0c38d1
SR
980 return 0;
981 }
982 return 1;
983}
984
985static int s_show(struct seq_file *m, void *v)
986{
987 struct trace_iterator *iter = v;
988
989 if (iter->ent == NULL) {
990 if (iter->tr) {
991 seq_printf(m, "# tracer: %s\n", iter->trace->name);
992 seq_puts(m, "#\n");
993 }
994 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
995 /* print nothing if the buffers are empty */
996 if (trace_empty(iter))
997 return 0;
998 print_trace_header(m, iter);
999 if (!(trace_flags & TRACE_ITER_VERBOSE))
1000 print_lat_help_header(m);
1001 } else {
1002 if (!(trace_flags & TRACE_ITER_VERBOSE))
1003 print_func_help_header(m);
1004 }
1005 } else {
1006 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1007 print_lat_fmt(m, iter, iter->idx, iter->cpu);
1008 else
1009 print_trace_fmt(m, iter);
1010 }
1011
1012 return 0;
1013}
1014
1015static struct seq_operations tracer_seq_ops = {
1016 .start = s_start,
1017 .next = s_next,
1018 .stop = s_stop,
1019 .show = s_show,
1020};
1021
1022static struct trace_iterator notrace *
1023__tracing_open(struct inode *inode, struct file *file, int *ret)
1024{
1025 struct trace_iterator *iter;
1026
60a11774
SR
1027 if (tracing_disabled) {
1028 *ret = -ENODEV;
1029 return NULL;
1030 }
1031
bc0c38d1
SR
1032 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1033 if (!iter) {
1034 *ret = -ENOMEM;
1035 goto out;
1036 }
1037
1038 mutex_lock(&trace_types_lock);
1039 if (current_trace && current_trace->print_max)
1040 iter->tr = &max_tr;
1041 else
1042 iter->tr = inode->i_private;
1043 iter->trace = current_trace;
1044 iter->pos = -1;
1045
1046 /* TODO stop tracer */
1047 *ret = seq_open(file, &tracer_seq_ops);
1048 if (!*ret) {
1049 struct seq_file *m = file->private_data;
1050 m->private = iter;
1051
1052 /* stop the trace while dumping */
1053 if (iter->tr->ctrl)
1054 tracer_enabled = 0;
1055
1056 if (iter->trace && iter->trace->open)
1057 iter->trace->open(iter);
1058 } else {
1059 kfree(iter);
1060 iter = NULL;
1061 }
1062 mutex_unlock(&trace_types_lock);
1063
1064 out:
1065 return iter;
1066}
1067
1068int tracing_open_generic(struct inode *inode, struct file *filp)
1069{
60a11774
SR
1070 if (tracing_disabled)
1071 return -ENODEV;
1072
bc0c38d1
SR
1073 filp->private_data = inode->i_private;
1074 return 0;
1075}
1076
1077int tracing_release(struct inode *inode, struct file *file)
1078{
1079 struct seq_file *m = (struct seq_file *)file->private_data;
1080 struct trace_iterator *iter = m->private;
1081
1082 mutex_lock(&trace_types_lock);
1083 if (iter->trace && iter->trace->close)
1084 iter->trace->close(iter);
1085
1086 /* reenable tracing if it was previously enabled */
1087 if (iter->tr->ctrl)
1088 tracer_enabled = 1;
1089 mutex_unlock(&trace_types_lock);
1090
1091 seq_release(inode, file);
1092 kfree(iter);
1093 return 0;
1094}
1095
1096static int tracing_open(struct inode *inode, struct file *file)
1097{
1098 int ret;
1099
1100 __tracing_open(inode, file, &ret);
1101
1102 return ret;
1103}
1104
1105static int tracing_lt_open(struct inode *inode, struct file *file)
1106{
1107 struct trace_iterator *iter;
1108 int ret;
1109
1110 iter = __tracing_open(inode, file, &ret);
1111
1112 if (!ret)
1113 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1114
1115 return ret;
1116}
1117
1118
1119static void notrace *
1120t_next(struct seq_file *m, void *v, loff_t *pos)
1121{
1122 struct tracer *t = m->private;
1123
1124 (*pos)++;
1125
1126 if (t)
1127 t = t->next;
1128
1129 m->private = t;
1130
1131 return t;
1132}
1133
1134static void *t_start(struct seq_file *m, loff_t *pos)
1135{
1136 struct tracer *t = m->private;
1137 loff_t l = 0;
1138
1139 mutex_lock(&trace_types_lock);
1140 for (; t && l < *pos; t = t_next(m, t, &l))
1141 ;
1142
1143 return t;
1144}
1145
1146static void t_stop(struct seq_file *m, void *p)
1147{
1148 mutex_unlock(&trace_types_lock);
1149}
1150
1151static int t_show(struct seq_file *m, void *v)
1152{
1153 struct tracer *t = v;
1154
1155 if (!t)
1156 return 0;
1157
1158 seq_printf(m, "%s", t->name);
1159 if (t->next)
1160 seq_putc(m, ' ');
1161 else
1162 seq_putc(m, '\n');
1163
1164 return 0;
1165}
1166
1167static struct seq_operations show_traces_seq_ops = {
1168 .start = t_start,
1169 .next = t_next,
1170 .stop = t_stop,
1171 .show = t_show,
1172};
1173
1174static int show_traces_open(struct inode *inode, struct file *file)
1175{
1176 int ret;
1177
60a11774
SR
1178 if (tracing_disabled)
1179 return -ENODEV;
1180
bc0c38d1
SR
1181 ret = seq_open(file, &show_traces_seq_ops);
1182 if (!ret) {
1183 struct seq_file *m = file->private_data;
1184 m->private = trace_types;
1185 }
1186
1187 return ret;
1188}
1189
1190static struct file_operations tracing_fops = {
1191 .open = tracing_open,
1192 .read = seq_read,
1193 .llseek = seq_lseek,
1194 .release = tracing_release,
1195};
1196
1197static struct file_operations tracing_lt_fops = {
1198 .open = tracing_lt_open,
1199 .read = seq_read,
1200 .llseek = seq_lseek,
1201 .release = tracing_release,
1202};
1203
1204static struct file_operations show_traces_fops = {
1205 .open = show_traces_open,
1206 .read = seq_read,
1207 .release = seq_release,
1208};
1209
1210static ssize_t
1211tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1212 size_t cnt, loff_t *ppos)
1213{
1214 char *buf;
1215 int r = 0;
1216 int len = 0;
1217 int i;
1218
1219 /* calulate max size */
1220 for (i = 0; trace_options[i]; i++) {
1221 len += strlen(trace_options[i]);
1222 len += 3; /* "no" and space */
1223 }
1224
1225 /* +2 for \n and \0 */
1226 buf = kmalloc(len + 2, GFP_KERNEL);
1227 if (!buf)
1228 return -ENOMEM;
1229
1230 for (i = 0; trace_options[i]; i++) {
1231 if (trace_flags & (1 << i))
1232 r += sprintf(buf + r, "%s ", trace_options[i]);
1233 else
1234 r += sprintf(buf + r, "no%s ", trace_options[i]);
1235 }
1236
1237 r += sprintf(buf + r, "\n");
1238 WARN_ON(r >= len + 2);
1239
1240 r = simple_read_from_buffer(ubuf, cnt, ppos,
1241 buf, r);
1242
1243 kfree(buf);
1244
1245 return r;
1246}
1247
1248static ssize_t
1249tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1250 size_t cnt, loff_t *ppos)
1251{
1252 char buf[64];
1253 char *cmp = buf;
1254 int neg = 0;
1255 int i;
1256
1257 if (cnt > 63)
1258 cnt = 63;
1259
1260 if (copy_from_user(&buf, ubuf, cnt))
1261 return -EFAULT;
1262
1263 buf[cnt] = 0;
1264
1265 if (strncmp(buf, "no", 2) == 0) {
1266 neg = 1;
1267 cmp += 2;
1268 }
1269
1270 for (i = 0; trace_options[i]; i++) {
1271 int len = strlen(trace_options[i]);
1272
1273 if (strncmp(cmp, trace_options[i], len) == 0) {
1274 if (neg)
1275 trace_flags &= ~(1 << i);
1276 else
1277 trace_flags |= (1 << i);
1278 break;
1279 }
1280 }
1281
1282 filp->f_pos += cnt;
1283
1284 return cnt;
1285}
1286
1287static struct file_operations tracing_iter_fops = {
1288 .open = tracing_open_generic,
1289 .read = tracing_iter_ctrl_read,
1290 .write = tracing_iter_ctrl_write,
1291};
1292
1293static ssize_t
1294tracing_ctrl_read(struct file *filp, char __user *ubuf,
1295 size_t cnt, loff_t *ppos)
1296{
1297 struct trace_array *tr = filp->private_data;
1298 char buf[64];
1299 int r;
1300
1301 r = sprintf(buf, "%ld\n", tr->ctrl);
1302 return simple_read_from_buffer(ubuf, cnt, ppos,
1303 buf, r);
1304}
1305
1306static ssize_t
1307tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1308 size_t cnt, loff_t *ppos)
1309{
1310 struct trace_array *tr = filp->private_data;
1311 long val;
1312 char buf[64];
1313
1314 if (cnt > 63)
1315 cnt = 63;
1316
1317 if (copy_from_user(&buf, ubuf, cnt))
1318 return -EFAULT;
1319
1320 buf[cnt] = 0;
1321
1322 val = simple_strtoul(buf, NULL, 10);
1323
1324 val = !!val;
1325
1326 mutex_lock(&trace_types_lock);
1327 if (tr->ctrl ^ val) {
1328 if (val)
1329 tracer_enabled = 1;
1330 else
1331 tracer_enabled = 0;
1332
1333 tr->ctrl = val;
1334
1335 if (current_trace && current_trace->ctrl_update)
1336 current_trace->ctrl_update(tr);
1337 }
1338 mutex_unlock(&trace_types_lock);
1339
1340 filp->f_pos += cnt;
1341
1342 return cnt;
1343}
1344
1345static ssize_t
1346tracing_set_trace_read(struct file *filp, char __user *ubuf,
1347 size_t cnt, loff_t *ppos)
1348{
1349 char buf[max_tracer_type_len+2];
1350 int r;
1351
1352 mutex_lock(&trace_types_lock);
1353 if (current_trace)
1354 r = sprintf(buf, "%s\n", current_trace->name);
1355 else
1356 r = sprintf(buf, "\n");
1357 mutex_unlock(&trace_types_lock);
1358
1359 return simple_read_from_buffer(ubuf, cnt, ppos,
1360 buf, r);
1361}
1362
1363static ssize_t
1364tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1365 size_t cnt, loff_t *ppos)
1366{
1367 struct trace_array *tr = &global_trace;
1368 struct tracer *t;
1369 char buf[max_tracer_type_len+1];
1370 int i;
1371
1372 if (cnt > max_tracer_type_len)
1373 cnt = max_tracer_type_len;
1374
1375 if (copy_from_user(&buf, ubuf, cnt))
1376 return -EFAULT;
1377
1378 buf[cnt] = 0;
1379
1380 /* strip ending whitespace. */
1381 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1382 buf[i] = 0;
1383
1384 mutex_lock(&trace_types_lock);
1385 for (t = trace_types; t; t = t->next) {
1386 if (strcmp(t->name, buf) == 0)
1387 break;
1388 }
1389 if (!t || t == current_trace)
1390 goto out;
1391
1392 if (current_trace && current_trace->reset)
1393 current_trace->reset(tr);
1394
1395 current_trace = t;
1396 if (t->init)
1397 t->init(tr);
1398
1399 out:
1400 mutex_unlock(&trace_types_lock);
1401
1402 filp->f_pos += cnt;
1403
1404 return cnt;
1405}
1406
1407static ssize_t
1408tracing_max_lat_read(struct file *filp, char __user *ubuf,
1409 size_t cnt, loff_t *ppos)
1410{
1411 unsigned long *ptr = filp->private_data;
1412 char buf[64];
1413 int r;
1414
1415 r = snprintf(buf, 64, "%ld\n",
1416 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1417 if (r > 64)
1418 r = 64;
1419 return simple_read_from_buffer(ubuf, cnt, ppos,
1420 buf, r);
1421}
1422
1423static ssize_t
1424tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1425 size_t cnt, loff_t *ppos)
1426{
1427 long *ptr = filp->private_data;
1428 long val;
1429 char buf[64];
1430
1431 if (cnt > 63)
1432 cnt = 63;
1433
1434 if (copy_from_user(&buf, ubuf, cnt))
1435 return -EFAULT;
1436
1437 buf[cnt] = 0;
1438
1439 val = simple_strtoul(buf, NULL, 10);
1440
1441 *ptr = val * 1000;
1442
1443 return cnt;
1444}
1445
1446static struct file_operations tracing_max_lat_fops = {
1447 .open = tracing_open_generic,
1448 .read = tracing_max_lat_read,
1449 .write = tracing_max_lat_write,
1450};
1451
1452static struct file_operations tracing_ctrl_fops = {
1453 .open = tracing_open_generic,
1454 .read = tracing_ctrl_read,
1455 .write = tracing_ctrl_write,
1456};
1457
1458static struct file_operations set_tracer_fops = {
1459 .open = tracing_open_generic,
1460 .read = tracing_set_trace_read,
1461 .write = tracing_set_trace_write,
1462};
1463
1464#ifdef CONFIG_DYNAMIC_FTRACE
1465
1466static ssize_t
1467tracing_read_long(struct file *filp, char __user *ubuf,
1468 size_t cnt, loff_t *ppos)
1469{
1470 unsigned long *p = filp->private_data;
1471 char buf[64];
1472 int r;
1473
1474 r = sprintf(buf, "%ld\n", *p);
1475 return simple_read_from_buffer(ubuf, cnt, ppos,
1476 buf, r);
1477}
1478
1479static struct file_operations tracing_read_long_fops = {
1480 .open = tracing_open_generic,
1481 .read = tracing_read_long,
1482};
1483#endif
1484
1485static struct dentry *d_tracer;
1486
1487struct dentry *tracing_init_dentry(void)
1488{
1489 static int once;
1490
1491 if (d_tracer)
1492 return d_tracer;
1493
1494 d_tracer = debugfs_create_dir("tracing", NULL);
1495
1496 if (!d_tracer && !once) {
1497 once = 1;
1498 pr_warning("Could not create debugfs directory 'tracing'\n");
1499 return NULL;
1500 }
1501
1502 return d_tracer;
1503}
1504
60a11774
SR
1505#ifdef CONFIG_FTRACE_SELFTEST
1506/* Let selftest have access to static functions in this file */
1507#include "trace_selftest.c"
1508#endif
1509
bc0c38d1
SR
1510static __init void tracer_init_debugfs(void)
1511{
1512 struct dentry *d_tracer;
1513 struct dentry *entry;
1514
1515 d_tracer = tracing_init_dentry();
1516
1517 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1518 &global_trace, &tracing_ctrl_fops);
1519 if (!entry)
1520 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1521
1522 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1523 NULL, &tracing_iter_fops);
1524 if (!entry)
1525 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1526
1527 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1528 &global_trace, &tracing_lt_fops);
1529 if (!entry)
1530 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1531
1532 entry = debugfs_create_file("trace", 0444, d_tracer,
1533 &global_trace, &tracing_fops);
1534 if (!entry)
1535 pr_warning("Could not create debugfs 'trace' entry\n");
1536
1537 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1538 &global_trace, &show_traces_fops);
1539 if (!entry)
1540 pr_warning("Could not create debugfs 'trace' entry\n");
1541
1542 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1543 &global_trace, &set_tracer_fops);
1544 if (!entry)
1545 pr_warning("Could not create debugfs 'trace' entry\n");
1546
1547 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1548 &tracing_max_latency,
1549 &tracing_max_lat_fops);
1550 if (!entry)
1551 pr_warning("Could not create debugfs "
1552 "'tracing_max_latency' entry\n");
1553
1554 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1555 &tracing_thresh, &tracing_max_lat_fops);
1556 if (!entry)
1557 pr_warning("Could not create debugfs "
1558 "'tracing_threash' entry\n");
1559
1560#ifdef CONFIG_DYNAMIC_FTRACE
1561 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1562 &ftrace_update_tot_cnt,
1563 &tracing_read_long_fops);
1564 if (!entry)
1565 pr_warning("Could not create debugfs "
1566 "'dyn_ftrace_total_info' entry\n");
1567#endif
1568}
1569
1570/* dummy trace to disable tracing */
1571static struct tracer no_tracer __read_mostly =
1572{
1573 .name = "none",
1574};
1575
4c11d7ae 1576static int trace_alloc_page(void)
bc0c38d1 1577{
4c11d7ae
SR
1578 struct trace_array_cpu *data;
1579 void *array;
1580 struct page *page, *tmp;
1581 LIST_HEAD(pages);
1582 int i;
1583
1584 /* first allocate a page for each CPU */
1585 for_each_possible_cpu(i) {
1586 array = (void *)__get_free_page(GFP_KERNEL);
1587 if (array == NULL) {
1588 printk(KERN_ERR "tracer: failed to allocate page"
1589 "for trace buffer!\n");
1590 goto free_pages;
1591 }
1592
1593 page = virt_to_page(array);
1594 list_add(&page->lru, &pages);
1595
1596/* Only allocate if we are actually using the max trace */
1597#ifdef CONFIG_TRACER_MAX_TRACE
1598 array = (void *)__get_free_page(GFP_KERNEL);
1599 if (array == NULL) {
1600 printk(KERN_ERR "tracer: failed to allocate page"
1601 "for trace buffer!\n");
1602 goto free_pages;
1603 }
1604 page = virt_to_page(array);
1605 list_add(&page->lru, &pages);
1606#endif
1607 }
1608
1609 /* Now that we successfully allocate a page per CPU, add them */
1610 for_each_possible_cpu(i) {
1611 data = global_trace.data[i];
1612 page = list_entry(pages.next, struct page, lru);
1613 list_del(&page->lru);
1614 list_add_tail(&page->lru, &data->trace_pages);
1615 ClearPageLRU(page);
1616
1617#ifdef CONFIG_TRACER_MAX_TRACE
1618 data = max_tr.data[i];
1619 page = list_entry(pages.next, struct page, lru);
1620 list_del(&page->lru);
1621 list_add_tail(&page->lru, &data->trace_pages);
1622 SetPageLRU(page);
1623#endif
1624 }
1625 global_trace.entries += ENTRIES_PER_PAGE;
1626
1627 return 0;
1628
1629 free_pages:
1630 list_for_each_entry_safe(page, tmp, &pages, lru) {
1631 list_del(&page->lru);
1632 __free_page(page);
1633 }
1634 return -ENOMEM;
bc0c38d1
SR
1635}
1636
1637__init static int tracer_alloc_buffers(void)
1638{
4c11d7ae
SR
1639 struct trace_array_cpu *data;
1640 void *array;
1641 struct page *page;
1642 int pages = 0;
60a11774 1643 int ret = -ENOMEM;
bc0c38d1
SR
1644 int i;
1645
4c11d7ae 1646 /* Allocate the first page for all buffers */
bc0c38d1 1647 for_each_possible_cpu(i) {
4c11d7ae 1648 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
bc0c38d1
SR
1649 max_tr.data[i] = &per_cpu(max_data, i);
1650
4c11d7ae 1651 array = (void *)__get_free_page(GFP_KERNEL);
bc0c38d1 1652 if (array == NULL) {
4c11d7ae
SR
1653 printk(KERN_ERR "tracer: failed to allocate page"
1654 "for trace buffer!\n");
bc0c38d1
SR
1655 goto free_buffers;
1656 }
4c11d7ae
SR
1657 data->trace = array;
1658
1659 /* set the array to the list */
1660 INIT_LIST_HEAD(&data->trace_pages);
1661 page = virt_to_page(array);
1662 list_add(&page->lru, &data->trace_pages);
1663 /* use the LRU flag to differentiate the two buffers */
1664 ClearPageLRU(page);
bc0c38d1
SR
1665
1666/* Only allocate if we are actually using the max trace */
1667#ifdef CONFIG_TRACER_MAX_TRACE
4c11d7ae 1668 array = (void *)__get_free_page(GFP_KERNEL);
bc0c38d1 1669 if (array == NULL) {
4c11d7ae
SR
1670 printk(KERN_ERR "tracer: failed to allocate page"
1671 "for trace buffer!\n");
bc0c38d1
SR
1672 goto free_buffers;
1673 }
1674 max_tr.data[i]->trace = array;
4c11d7ae
SR
1675
1676 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1677 page = virt_to_page(array);
1678 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1679 SetPageLRU(page);
bc0c38d1
SR
1680#endif
1681 }
1682
1683 /*
1684 * Since we allocate by orders of pages, we may be able to
1685 * round up a bit.
1686 */
4c11d7ae 1687 global_trace.entries = ENTRIES_PER_PAGE;
4c11d7ae
SR
1688 pages++;
1689
1690 while (global_trace.entries < trace_nr_entries) {
1691 if (trace_alloc_page())
1692 break;
1693 pages++;
1694 }
89b2f978 1695 max_tr.entries = global_trace.entries;
bc0c38d1 1696
4c11d7ae
SR
1697 pr_info("tracer: %d pages allocated for %ld",
1698 pages, trace_nr_entries);
bc0c38d1
SR
1699 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1700 pr_info(" actual entries %ld\n", global_trace.entries);
1701
1702 tracer_init_debugfs();
1703
1704 trace_init_cmdlines();
1705
1706 register_tracer(&no_tracer);
1707 current_trace = &no_tracer;
1708
60a11774
SR
1709 /* All seems OK, enable tracing */
1710 tracing_disabled = 0;
1711
bc0c38d1
SR
1712 return 0;
1713
1714 free_buffers:
1715 for (i-- ; i >= 0; i--) {
4c11d7ae 1716 struct page *page, *tmp;
bc0c38d1
SR
1717 struct trace_array_cpu *data = global_trace.data[i];
1718
1719 if (data && data->trace) {
4c11d7ae
SR
1720 list_for_each_entry_safe(page, tmp,
1721 &data->trace_pages, lru) {
1722 list_del(&page->lru);
1723 __free_page(page);
1724 }
bc0c38d1
SR
1725 data->trace = NULL;
1726 }
1727
1728#ifdef CONFIG_TRACER_MAX_TRACE
1729 data = max_tr.data[i];
1730 if (data && data->trace) {
4c11d7ae
SR
1731 list_for_each_entry_safe(page, tmp,
1732 &data->trace_pages, lru) {
1733 list_del(&page->lru);
1734 __free_page(page);
1735 }
bc0c38d1
SR
1736 data->trace = NULL;
1737 }
1738#endif
1739 }
60a11774 1740 return ret;
bc0c38d1
SR
1741}
1742
60a11774 1743fs_initcall(tracer_alloc_buffers);