]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/ftrace.c
tracing: clean up tracing profiler
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
8aef2d28
SR
32#include <trace/sched.h>
33
395a59d0
AS
34#include <asm/ftrace.h>
35
0706f1c4 36#include "trace_output.h"
bac429f0 37#include "trace_stat.h"
16444a8a 38
6912896e
SR
39#define FTRACE_WARN_ON(cond) \
40 do { \
41 if (WARN_ON(cond)) \
42 ftrace_kill(); \
43 } while (0)
44
45#define FTRACE_WARN_ON_ONCE(cond) \
46 do { \
47 if (WARN_ON_ONCE(cond)) \
48 ftrace_kill(); \
49 } while (0)
50
8fc0c701
SR
51/* hash bits for specific function selection */
52#define FTRACE_HASH_BITS 7
53#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
4eebcc81
SR
55/* ftrace_enabled is a method to turn ftrace on or off */
56int ftrace_enabled __read_mostly;
d61f82d0 57static int last_ftrace_enabled;
b0fc494f 58
60a7ecf4
SR
59/* Quick disabling of function tracer. */
60int function_trace_stop;
61
4eebcc81
SR
62/*
63 * ftrace_disabled is set when an anomaly is discovered.
64 * ftrace_disabled is much stronger than ftrace_enabled.
65 */
66static int ftrace_disabled __read_mostly;
67
52baf119 68static DEFINE_MUTEX(ftrace_lock);
b0fc494f 69
16444a8a
ACM
70static struct ftrace_ops ftrace_list_end __read_mostly =
71{
fb9fb015 72 .func = ftrace_stub,
16444a8a
ACM
73};
74
75static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
76ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 77ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 78ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 79
f2252935 80static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
81{
82 struct ftrace_ops *op = ftrace_list;
83
84 /* in case someone actually ports this to alpha! */
85 read_barrier_depends();
86
87 while (op != &ftrace_list_end) {
88 /* silly alpha */
89 read_barrier_depends();
90 op->func(ip, parent_ip);
91 op = op->next;
92 };
93}
94
df4fc315
SR
95static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96{
0ef8cde5 97 if (!test_tsk_trace_trace(current))
df4fc315
SR
98 return;
99
100 ftrace_pid_function(ip, parent_ip);
101}
102
103static void set_ftrace_pid_function(ftrace_func_t func)
104{
105 /* do not set ftrace_pid_function to itself! */
106 if (func != ftrace_pid_func)
107 ftrace_pid_function = func;
108}
109
16444a8a 110/**
3d083395 111 * clear_ftrace_function - reset the ftrace function
16444a8a 112 *
3d083395
SR
113 * This NULLs the ftrace function and in essence stops
114 * tracing. There may be lag
16444a8a 115 */
3d083395 116void clear_ftrace_function(void)
16444a8a 117{
3d083395 118 ftrace_trace_function = ftrace_stub;
60a7ecf4 119 __ftrace_trace_function = ftrace_stub;
df4fc315 120 ftrace_pid_function = ftrace_stub;
3d083395
SR
121}
122
60a7ecf4
SR
123#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124/*
125 * For those archs that do not test ftrace_trace_stop in their
126 * mcount call site, we need to do it from C.
127 */
128static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129{
130 if (function_trace_stop)
131 return;
132
133 __ftrace_trace_function(ip, parent_ip);
134}
135#endif
136
e309b41d 137static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 138{
16444a8a
ACM
139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
3d083395 148
b0fc494f 149 if (ftrace_enabled) {
df4fc315
SR
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
978f3a45 157 if (ftrace_pid_trace) {
df4fc315
SR
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
b0fc494f
SR
162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
60a7ecf4 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 167 ftrace_trace_function = func;
60a7ecf4 168#else
df4fc315 169 __ftrace_trace_function = func;
60a7ecf4
SR
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
b0fc494f 172 }
3d083395 173
16444a8a
ACM
174 return 0;
175}
176
e309b41d 177static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 178{
16444a8a 179 struct ftrace_ops **p;
16444a8a
ACM
180
181 /*
3d083395
SR
182 * If we are removing the last function, then simply point
183 * to the ftrace_stub.
16444a8a
ACM
184 */
185 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_stub;
187 ftrace_list = &ftrace_list_end;
e6ea44e9 188 return 0;
16444a8a
ACM
189 }
190
191 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
192 if (*p == ops)
193 break;
194
e6ea44e9
SR
195 if (*p != ops)
196 return -1;
16444a8a
ACM
197
198 *p = (*p)->next;
199
b0fc494f
SR
200 if (ftrace_enabled) {
201 /* If we only have one func left, then call that directly */
df4fc315
SR
202 if (ftrace_list->next == &ftrace_list_end) {
203 ftrace_func_t func = ftrace_list->func;
204
978f3a45 205 if (ftrace_pid_trace) {
df4fc315
SR
206 set_ftrace_pid_function(func);
207 func = ftrace_pid_func;
208 }
209#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210 ftrace_trace_function = func;
211#else
212 __ftrace_trace_function = func;
213#endif
214 }
b0fc494f 215 }
16444a8a 216
e6ea44e9 217 return 0;
3d083395
SR
218}
219
df4fc315
SR
220static void ftrace_update_pid_func(void)
221{
222 ftrace_func_t func;
223
df4fc315 224 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 225 return;
df4fc315
SR
226
227 func = ftrace_trace_function;
228
978f3a45 229 if (ftrace_pid_trace) {
df4fc315
SR
230 set_ftrace_pid_function(func);
231 func = ftrace_pid_func;
232 } else {
66eafebc
LW
233 if (func == ftrace_pid_func)
234 func = ftrace_pid_function;
df4fc315
SR
235 }
236
237#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238 ftrace_trace_function = func;
239#else
240 __ftrace_trace_function = func;
241#endif
df4fc315
SR
242}
243
493762fc
SR
244#ifdef CONFIG_FUNCTION_PROFILER
245struct ftrace_profile {
246 struct hlist_node node;
247 unsigned long ip;
248 unsigned long counter;
0706f1c4
SR
249#ifdef CONFIG_FUNCTION_GRAPH_TRACER
250 unsigned long long time;
251#endif
8fc0c701
SR
252};
253
493762fc
SR
254struct ftrace_profile_page {
255 struct ftrace_profile_page *next;
256 unsigned long index;
257 struct ftrace_profile records[];
d61f82d0
SR
258};
259
cafb168a
SR
260struct ftrace_profile_stat {
261 atomic_t disabled;
262 struct hlist_head *hash;
263 struct ftrace_profile_page *pages;
264 struct ftrace_profile_page *start;
265 struct tracer_stat stat;
266};
267
493762fc
SR
268#define PROFILE_RECORDS_SIZE \
269 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 270
493762fc
SR
271#define PROFILES_PER_PAGE \
272 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 273
fb9fb015
SR
274static int ftrace_profile_bits __read_mostly;
275static int ftrace_profile_enabled __read_mostly;
276
277/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
278static DEFINE_MUTEX(ftrace_profile_lock);
279
cafb168a 280static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc
SR
281
282#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
283
bac429f0
SR
284static void *
285function_stat_next(void *v, int idx)
286{
493762fc
SR
287 struct ftrace_profile *rec = v;
288 struct ftrace_profile_page *pg;
bac429f0 289
493762fc 290 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
291
292 again:
293 rec++;
294 if ((void *)rec >= (void *)&pg->records[pg->index]) {
295 pg = pg->next;
296 if (!pg)
297 return NULL;
298 rec = &pg->records[0];
493762fc
SR
299 if (!rec->counter)
300 goto again;
bac429f0
SR
301 }
302
bac429f0
SR
303 return rec;
304}
305
306static void *function_stat_start(struct tracer_stat *trace)
307{
cafb168a
SR
308 struct ftrace_profile_stat *stat =
309 container_of(trace, struct ftrace_profile_stat, stat);
310
311 if (!stat || !stat->start)
312 return NULL;
313
314 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
315}
316
0706f1c4
SR
317#ifdef CONFIG_FUNCTION_GRAPH_TRACER
318/* function graph compares on total time */
319static int function_stat_cmp(void *p1, void *p2)
320{
321 struct ftrace_profile *a = p1;
322 struct ftrace_profile *b = p2;
323
324 if (a->time < b->time)
325 return -1;
326 if (a->time > b->time)
327 return 1;
328 else
329 return 0;
330}
331#else
332/* not function graph compares against hits */
bac429f0
SR
333static int function_stat_cmp(void *p1, void *p2)
334{
493762fc
SR
335 struct ftrace_profile *a = p1;
336 struct ftrace_profile *b = p2;
bac429f0
SR
337
338 if (a->counter < b->counter)
339 return -1;
340 if (a->counter > b->counter)
341 return 1;
342 else
343 return 0;
344}
0706f1c4 345#endif
bac429f0
SR
346
347static int function_stat_headers(struct seq_file *m)
348{
0706f1c4
SR
349#ifdef CONFIG_FUNCTION_GRAPH_TRACER
350 seq_printf(m, " Function Hit Time\n"
351 " -------- --- ----\n");
352#else
bac429f0
SR
353 seq_printf(m, " Function Hit\n"
354 " -------- ---\n");
0706f1c4 355#endif
bac429f0
SR
356 return 0;
357}
358
359static int function_stat_show(struct seq_file *m, void *v)
360{
493762fc 361 struct ftrace_profile *rec = v;
bac429f0 362 char str[KSYM_SYMBOL_LEN];
0706f1c4
SR
363#ifdef CONFIG_FUNCTION_GRAPH_TRACER
364 static struct trace_seq s;
365 static DEFINE_MUTEX(mutex);
366
367 mutex_lock(&mutex);
368 trace_seq_init(&s);
369 trace_print_graph_duration(rec->time, &s);
370#endif
bac429f0
SR
371
372 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
373 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
374
375#ifdef CONFIG_FUNCTION_GRAPH_TRACER
376 seq_printf(m, " ");
377 trace_print_seq(m, &s);
378 mutex_unlock(&mutex);
379#endif
380 seq_putc(m, '\n');
bac429f0 381
bac429f0
SR
382 return 0;
383}
384
cafb168a 385static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 386{
493762fc 387 struct ftrace_profile_page *pg;
bac429f0 388
cafb168a 389 pg = stat->pages = stat->start;
bac429f0 390
493762fc
SR
391 while (pg) {
392 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
393 pg->index = 0;
394 pg = pg->next;
bac429f0
SR
395 }
396
cafb168a 397 memset(stat->hash, 0,
493762fc
SR
398 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
399}
bac429f0 400
cafb168a 401int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
402{
403 struct ftrace_profile_page *pg;
404 int i;
bac429f0 405
493762fc 406 /* If we already allocated, do nothing */
cafb168a 407 if (stat->pages)
493762fc 408 return 0;
bac429f0 409
cafb168a
SR
410 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
411 if (!stat->pages)
493762fc 412 return -ENOMEM;
bac429f0 413
cafb168a 414 pg = stat->start = stat->pages;
bac429f0 415
493762fc
SR
416 /* allocate 10 more pages to start */
417 for (i = 0; i < 10; i++) {
418 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
419 /*
420 * We only care about allocating profile_pages, if
421 * we failed to allocate here, hopefully we will allocate
422 * later.
423 */
424 if (!pg->next)
425 break;
426 pg = pg->next;
427 }
428
429 return 0;
bac429f0
SR
430}
431
cafb168a 432static int ftrace_profile_init_cpu(int cpu)
bac429f0 433{
cafb168a 434 struct ftrace_profile_stat *stat;
493762fc 435 int size;
bac429f0 436
cafb168a
SR
437 stat = &per_cpu(ftrace_profile_stats, cpu);
438
439 if (stat->hash) {
493762fc 440 /* If the profile is already created, simply reset it */
cafb168a 441 ftrace_profile_reset(stat);
493762fc
SR
442 return 0;
443 }
bac429f0 444
493762fc
SR
445 /*
446 * We are profiling all functions, but usually only a few thousand
447 * functions are hit. We'll make a hash of 1024 items.
448 */
449 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 450
cafb168a 451 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 452
cafb168a 453 if (!stat->hash)
493762fc
SR
454 return -ENOMEM;
455
cafb168a
SR
456 if (!ftrace_profile_bits) {
457 size--;
493762fc 458
cafb168a
SR
459 for (; size; size >>= 1)
460 ftrace_profile_bits++;
461 }
493762fc
SR
462
463 /* Preallocate a few pages */
cafb168a
SR
464 if (ftrace_profile_pages_init(stat) < 0) {
465 kfree(stat->hash);
466 stat->hash = NULL;
493762fc
SR
467 return -ENOMEM;
468 }
469
470 return 0;
bac429f0
SR
471}
472
cafb168a
SR
473static int ftrace_profile_init(void)
474{
475 int cpu;
476 int ret = 0;
477
478 for_each_online_cpu(cpu) {
479 ret = ftrace_profile_init_cpu(cpu);
480 if (ret)
481 break;
482 }
483
484 return ret;
485}
486
493762fc 487/* interrupts must be disabled */
cafb168a
SR
488static struct ftrace_profile *
489ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 490{
493762fc 491 struct ftrace_profile *rec;
bac429f0
SR
492 struct hlist_head *hhd;
493 struct hlist_node *n;
bac429f0
SR
494 unsigned long key;
495
bac429f0 496 key = hash_long(ip, ftrace_profile_bits);
cafb168a 497 hhd = &stat->hash[key];
bac429f0
SR
498
499 if (hlist_empty(hhd))
500 return NULL;
501
bac429f0
SR
502 hlist_for_each_entry_rcu(rec, n, hhd, node) {
503 if (rec->ip == ip)
493762fc
SR
504 return rec;
505 }
506
507 return NULL;
508}
509
cafb168a
SR
510static void ftrace_add_profile(struct ftrace_profile_stat *stat,
511 struct ftrace_profile *rec)
493762fc
SR
512{
513 unsigned long key;
514
515 key = hash_long(rec->ip, ftrace_profile_bits);
cafb168a 516 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
517}
518
519/* Interrupts must be disabled calling this */
520static struct ftrace_profile *
cafb168a
SR
521ftrace_profile_alloc(struct ftrace_profile_stat *stat,
522 unsigned long ip, bool alloc_safe)
493762fc
SR
523{
524 struct ftrace_profile *rec = NULL;
525
526 /* prevent recursion */
cafb168a 527 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
528 goto out;
529
493762fc 530 /* Try to always keep another page available */
cafb168a
SR
531 if (!stat->pages->next && alloc_safe)
532 stat->pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
493762fc
SR
533
534 /*
535 * Try to find the function again since another
536 * task on another CPU could have added it
537 */
cafb168a 538 rec = ftrace_find_profiled_func(stat, ip);
493762fc 539 if (rec)
cafb168a 540 goto out;
493762fc 541
cafb168a
SR
542 if (stat->pages->index == PROFILES_PER_PAGE) {
543 if (!stat->pages->next)
544 goto out;
545 stat->pages = stat->pages->next;
bac429f0 546 }
493762fc 547
cafb168a 548 rec = &stat->pages->records[stat->pages->index++];
493762fc 549 rec->ip = ip;
cafb168a 550 ftrace_add_profile(stat, rec);
493762fc 551
bac429f0 552 out:
cafb168a 553 atomic_dec(&stat->disabled);
bac429f0
SR
554
555 return rec;
556}
557
493762fc
SR
558/*
559 * If we are not in an interrupt, or softirq and
560 * and interrupts are disabled and preemption is not enabled
561 * (not in a spinlock) then it should be safe to allocate memory.
562 */
563static bool ftrace_safe_to_allocate(void)
564{
565 return !in_interrupt() && irqs_disabled() && !preempt_count();
566}
567
bac429f0
SR
568static void
569function_profile_call(unsigned long ip, unsigned long parent_ip)
570{
cafb168a 571 struct ftrace_profile_stat *stat;
493762fc 572 struct ftrace_profile *rec;
bac429f0 573 unsigned long flags;
493762fc 574 bool alloc_safe;
bac429f0
SR
575
576 if (!ftrace_profile_enabled)
577 return;
578
493762fc
SR
579 alloc_safe = ftrace_safe_to_allocate();
580
bac429f0 581 local_irq_save(flags);
cafb168a
SR
582
583 stat = &__get_cpu_var(ftrace_profile_stats);
584 if (!stat->hash)
585 goto out;
586
587 rec = ftrace_find_profiled_func(stat, ip);
493762fc 588 if (!rec) {
cafb168a 589 rec = ftrace_profile_alloc(stat, ip, alloc_safe);
493762fc
SR
590 if (!rec)
591 goto out;
592 }
bac429f0
SR
593
594 rec->counter++;
595 out:
596 local_irq_restore(flags);
597}
598
0706f1c4
SR
599#ifdef CONFIG_FUNCTION_GRAPH_TRACER
600static int profile_graph_entry(struct ftrace_graph_ent *trace)
601{
602 function_profile_call(trace->func, 0);
603 return 1;
604}
605
606static void profile_graph_return(struct ftrace_graph_ret *trace)
607{
cafb168a 608 struct ftrace_profile_stat *stat;
a2a16d6a 609 unsigned long long calltime;
0706f1c4 610 struct ftrace_profile *rec;
cafb168a 611 unsigned long flags;
0706f1c4
SR
612
613 local_irq_save(flags);
cafb168a
SR
614 stat = &__get_cpu_var(ftrace_profile_stats);
615 if (!stat->hash)
616 goto out;
617
a2a16d6a
SR
618 calltime = trace->rettime - trace->calltime;
619
620 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
621 int index;
622
623 index = trace->depth;
624
625 /* Append this call time to the parent time to subtract */
626 if (index)
627 current->ret_stack[index - 1].subtime += calltime;
628
629 if (current->ret_stack[index].subtime < calltime)
630 calltime -= current->ret_stack[index].subtime;
631 else
632 calltime = 0;
633 }
634
cafb168a 635 rec = ftrace_find_profiled_func(stat, trace->func);
0706f1c4 636 if (rec)
a2a16d6a
SR
637 rec->time += calltime;
638
cafb168a 639 out:
0706f1c4
SR
640 local_irq_restore(flags);
641}
642
643static int register_ftrace_profiler(void)
644{
645 return register_ftrace_graph(&profile_graph_return,
646 &profile_graph_entry);
647}
648
649static void unregister_ftrace_profiler(void)
650{
651 unregister_ftrace_graph();
652}
653#else
bac429f0
SR
654static struct ftrace_ops ftrace_profile_ops __read_mostly =
655{
fb9fb015 656 .func = function_profile_call,
bac429f0
SR
657};
658
0706f1c4
SR
659static int register_ftrace_profiler(void)
660{
661 return register_ftrace_function(&ftrace_profile_ops);
662}
663
664static void unregister_ftrace_profiler(void)
665{
666 unregister_ftrace_function(&ftrace_profile_ops);
667}
668#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
669
bac429f0
SR
670static ssize_t
671ftrace_profile_write(struct file *filp, const char __user *ubuf,
672 size_t cnt, loff_t *ppos)
673{
674 unsigned long val;
fb9fb015 675 char buf[64]; /* big enough to hold a number */
bac429f0
SR
676 int ret;
677
bac429f0
SR
678 if (cnt >= sizeof(buf))
679 return -EINVAL;
680
681 if (copy_from_user(&buf, ubuf, cnt))
682 return -EFAULT;
683
684 buf[cnt] = 0;
685
686 ret = strict_strtoul(buf, 10, &val);
687 if (ret < 0)
688 return ret;
689
690 val = !!val;
691
692 mutex_lock(&ftrace_profile_lock);
693 if (ftrace_profile_enabled ^ val) {
694 if (val) {
493762fc
SR
695 ret = ftrace_profile_init();
696 if (ret < 0) {
697 cnt = ret;
698 goto out;
699 }
700
0706f1c4
SR
701 ret = register_ftrace_profiler();
702 if (ret < 0) {
703 cnt = ret;
704 goto out;
705 }
bac429f0
SR
706 ftrace_profile_enabled = 1;
707 } else {
708 ftrace_profile_enabled = 0;
0706f1c4 709 unregister_ftrace_profiler();
bac429f0
SR
710 }
711 }
493762fc 712 out:
bac429f0
SR
713 mutex_unlock(&ftrace_profile_lock);
714
715 filp->f_pos += cnt;
716
717 return cnt;
718}
719
493762fc
SR
720static ssize_t
721ftrace_profile_read(struct file *filp, char __user *ubuf,
722 size_t cnt, loff_t *ppos)
723{
fb9fb015 724 char buf[64]; /* big enough to hold a number */
493762fc
SR
725 int r;
726
727 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
728 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
729}
730
bac429f0
SR
731static const struct file_operations ftrace_profile_fops = {
732 .open = tracing_open_generic,
733 .read = ftrace_profile_read,
734 .write = ftrace_profile_write,
735};
736
cafb168a
SR
737/* used to initialize the real stat files */
738static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
739 .name = "functions",
740 .stat_start = function_stat_start,
741 .stat_next = function_stat_next,
742 .stat_cmp = function_stat_cmp,
743 .stat_headers = function_stat_headers,
744 .stat_show = function_stat_show
cafb168a
SR
745};
746
bac429f0
SR
747static void ftrace_profile_debugfs(struct dentry *d_tracer)
748{
cafb168a 749 struct ftrace_profile_stat *stat;
bac429f0 750 struct dentry *entry;
cafb168a 751 char *name;
bac429f0 752 int ret;
cafb168a
SR
753 int cpu;
754
755 for_each_possible_cpu(cpu) {
756 stat = &per_cpu(ftrace_profile_stats, cpu);
757
758 /* allocate enough for function name + cpu number */
759 name = kmalloc(32, GFP_KERNEL);
760 if (!name) {
761 /*
762 * The files created are permanent, if something happens
763 * we still do not free memory.
764 */
765 kfree(stat);
766 WARN(1,
767 "Could not allocate stat file for cpu %d\n",
768 cpu);
769 return;
770 }
771 stat->stat = function_stats;
772 snprintf(name, 32, "function%d", cpu);
773 stat->stat.name = name;
774 ret = register_stat_tracer(&stat->stat);
775 if (ret) {
776 WARN(1,
777 "Could not register function stat for cpu %d\n",
778 cpu);
779 kfree(name);
780 return;
781 }
bac429f0
SR
782 }
783
784 entry = debugfs_create_file("function_profile_enabled", 0644,
785 d_tracer, NULL, &ftrace_profile_fops);
786 if (!entry)
787 pr_warning("Could not create debugfs "
788 "'function_profile_enabled' entry\n");
789}
790
bac429f0 791#else /* CONFIG_FUNCTION_PROFILER */
bac429f0
SR
792static void ftrace_profile_debugfs(struct dentry *d_tracer)
793{
794}
bac429f0
SR
795#endif /* CONFIG_FUNCTION_PROFILER */
796
493762fc
SR
797/* set when tracing only a pid */
798struct pid *ftrace_pid_trace;
799static struct pid * const ftrace_swapper_pid = &init_struct_pid;
800
801#ifdef CONFIG_DYNAMIC_FTRACE
802
803#ifndef CONFIG_FTRACE_MCOUNT_RECORD
804# error Dynamic ftrace depends on MCOUNT_RECORD
805#endif
806
807static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
808
809struct ftrace_func_probe {
810 struct hlist_node node;
811 struct ftrace_probe_ops *ops;
812 unsigned long flags;
813 unsigned long ip;
814 void *data;
815 struct rcu_head rcu;
816};
817
818enum {
819 FTRACE_ENABLE_CALLS = (1 << 0),
820 FTRACE_DISABLE_CALLS = (1 << 1),
821 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
822 FTRACE_ENABLE_MCOUNT = (1 << 3),
823 FTRACE_DISABLE_MCOUNT = (1 << 4),
824 FTRACE_START_FUNC_RET = (1 << 5),
825 FTRACE_STOP_FUNC_RET = (1 << 6),
826};
827
828static int ftrace_filtered;
829
830static struct dyn_ftrace *ftrace_new_addrs;
831
832static DEFINE_MUTEX(ftrace_regex_lock);
833
834struct ftrace_page {
835 struct ftrace_page *next;
836 int index;
837 struct dyn_ftrace records[];
838};
839
840#define ENTRIES_PER_PAGE \
841 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
842
843/* estimate from running different kernels */
844#define NR_TO_INIT 10000
845
846static struct ftrace_page *ftrace_pages_start;
847static struct ftrace_page *ftrace_pages;
848
849static struct dyn_ftrace *ftrace_free_records;
850
851/*
852 * This is a double for. Do not use 'break' to break out of the loop,
853 * you must use a goto.
854 */
855#define do_for_each_ftrace_rec(pg, rec) \
856 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
857 int _____i; \
858 for (_____i = 0; _____i < pg->index; _____i++) { \
859 rec = &pg->records[_____i];
860
861#define while_for_each_ftrace_rec() \
862 } \
863 }
864
ecea656d 865#ifdef CONFIG_KPROBES
f17845e5
IM
866
867static int frozen_record_count;
868
ecea656d
AS
869static inline void freeze_record(struct dyn_ftrace *rec)
870{
871 if (!(rec->flags & FTRACE_FL_FROZEN)) {
872 rec->flags |= FTRACE_FL_FROZEN;
873 frozen_record_count++;
874 }
875}
876
877static inline void unfreeze_record(struct dyn_ftrace *rec)
878{
879 if (rec->flags & FTRACE_FL_FROZEN) {
880 rec->flags &= ~FTRACE_FL_FROZEN;
881 frozen_record_count--;
882 }
883}
884
885static inline int record_frozen(struct dyn_ftrace *rec)
886{
887 return rec->flags & FTRACE_FL_FROZEN;
888}
889#else
890# define freeze_record(rec) ({ 0; })
891# define unfreeze_record(rec) ({ 0; })
892# define record_frozen(rec) ({ 0; })
893#endif /* CONFIG_KPROBES */
894
e309b41d 895static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 896{
ee000b7f 897 rec->freelist = ftrace_free_records;
37ad5084
SR
898 ftrace_free_records = rec;
899 rec->flags |= FTRACE_FL_FREE;
900}
901
fed1939c
SR
902void ftrace_release(void *start, unsigned long size)
903{
904 struct dyn_ftrace *rec;
905 struct ftrace_page *pg;
906 unsigned long s = (unsigned long)start;
907 unsigned long e = s + size;
fed1939c 908
00fd61ae 909 if (ftrace_disabled || !start)
fed1939c
SR
910 return;
911
52baf119 912 mutex_lock(&ftrace_lock);
265c831c 913 do_for_each_ftrace_rec(pg, rec) {
b00f0b6d 914 if ((rec->ip >= s) && (rec->ip < e) &&
493762fc 915 !(rec->flags & FTRACE_FL_FREE))
265c831c
SR
916 ftrace_free_rec(rec);
917 } while_for_each_ftrace_rec();
52baf119 918 mutex_unlock(&ftrace_lock);
fed1939c
SR
919}
920
e309b41d 921static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 922{
37ad5084
SR
923 struct dyn_ftrace *rec;
924
925 /* First check for freed records */
926 if (ftrace_free_records) {
927 rec = ftrace_free_records;
928
37ad5084 929 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 930 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
931 ftrace_free_records = NULL;
932 return NULL;
933 }
934
ee000b7f 935 ftrace_free_records = rec->freelist;
37ad5084
SR
936 memset(rec, 0, sizeof(*rec));
937 return rec;
938 }
939
3c1720f0 940 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
941 if (!ftrace_pages->next) {
942 /* allocate another page */
943 ftrace_pages->next =
944 (void *)get_zeroed_page(GFP_KERNEL);
945 if (!ftrace_pages->next)
946 return NULL;
947 }
3c1720f0
SR
948 ftrace_pages = ftrace_pages->next;
949 }
950
951 return &ftrace_pages->records[ftrace_pages->index++];
952}
953
08f5ac90 954static struct dyn_ftrace *
d61f82d0 955ftrace_record_ip(unsigned long ip)
3d083395 956{
08f5ac90 957 struct dyn_ftrace *rec;
3d083395 958
f3c7ac40 959 if (ftrace_disabled)
08f5ac90 960 return NULL;
3d083395 961
08f5ac90
SR
962 rec = ftrace_alloc_dyn_node(ip);
963 if (!rec)
964 return NULL;
3d083395 965
08f5ac90 966 rec->ip = ip;
ee000b7f 967 rec->newlist = ftrace_new_addrs;
e94142a6 968 ftrace_new_addrs = rec;
3d083395 969
08f5ac90 970 return rec;
3d083395
SR
971}
972
b17e8a37
SR
973static void print_ip_ins(const char *fmt, unsigned char *p)
974{
975 int i;
976
977 printk(KERN_CONT "%s", fmt);
978
979 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
980 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
981}
982
31e88909 983static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
984{
985 switch (failed) {
986 case -EFAULT:
987 FTRACE_WARN_ON_ONCE(1);
988 pr_info("ftrace faulted on modifying ");
989 print_ip_sym(ip);
990 break;
991 case -EINVAL:
992 FTRACE_WARN_ON_ONCE(1);
993 pr_info("ftrace failed to modify ");
994 print_ip_sym(ip);
b17e8a37 995 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
996 printk(KERN_CONT "\n");
997 break;
998 case -EPERM:
999 FTRACE_WARN_ON_ONCE(1);
1000 pr_info("ftrace faulted on writing ");
1001 print_ip_sym(ip);
1002 break;
1003 default:
1004 FTRACE_WARN_ON_ONCE(1);
1005 pr_info("ftrace faulted on unknown error ");
1006 print_ip_sym(ip);
1007 }
1008}
1009
3c1720f0 1010
0eb96701 1011static int
31e88909 1012__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 1013{
e7d3737e 1014 unsigned long ftrace_addr;
6a24a244 1015 unsigned long ip, fl;
e7d3737e 1016
f0001207 1017 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
1018
1019 ip = rec->ip;
1020
982c350b
SR
1021 /*
1022 * If this record is not to be traced and
1023 * it is not enabled then do nothing.
1024 *
1025 * If this record is not to be traced and
57794a9d 1026 * it is enabled then disable it.
982c350b
SR
1027 *
1028 */
1029 if (rec->flags & FTRACE_FL_NOTRACE) {
1030 if (rec->flags & FTRACE_FL_ENABLED)
1031 rec->flags &= ~FTRACE_FL_ENABLED;
1032 else
1033 return 0;
1034
1035 } else if (ftrace_filtered && enable) {
5072c59f 1036 /*
982c350b 1037 * Filtering is on:
5072c59f 1038 */
a4500b84 1039
982c350b 1040 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 1041
982c350b
SR
1042 /* Record is filtered and enabled, do nothing */
1043 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 1044 return 0;
5072c59f 1045
57794a9d 1046 /* Record is not filtered or enabled, do nothing */
982c350b
SR
1047 if (!fl)
1048 return 0;
1049
1050 /* Record is not filtered but enabled, disable it */
1051 if (fl == FTRACE_FL_ENABLED)
5072c59f 1052 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
1053 else
1054 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 1055 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 1056 } else {
982c350b 1057 /* Disable or not filtered */
5072c59f 1058
41c52c0d 1059 if (enable) {
982c350b 1060 /* if record is enabled, do nothing */
5072c59f 1061 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 1062 return 0;
982c350b 1063
5072c59f 1064 rec->flags |= FTRACE_FL_ENABLED;
982c350b 1065
5072c59f 1066 } else {
982c350b 1067
57794a9d 1068 /* if record is not enabled, do nothing */
5072c59f 1069 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 1070 return 0;
982c350b 1071
5072c59f
SR
1072 rec->flags &= ~FTRACE_FL_ENABLED;
1073 }
1074 }
1075
982c350b 1076 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 1077 return ftrace_make_call(rec, ftrace_addr);
31e88909 1078 else
e7d3737e 1079 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
1080}
1081
e309b41d 1082static void ftrace_replace_code(int enable)
3c1720f0 1083{
3c1720f0
SR
1084 struct dyn_ftrace *rec;
1085 struct ftrace_page *pg;
6a24a244 1086 int failed;
3c1720f0 1087
265c831c
SR
1088 do_for_each_ftrace_rec(pg, rec) {
1089 /*
fa9d13cf
Z
1090 * Skip over free records, records that have
1091 * failed and not converted.
265c831c
SR
1092 */
1093 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf 1094 rec->flags & FTRACE_FL_FAILED ||
03303549 1095 !(rec->flags & FTRACE_FL_CONVERTED))
265c831c
SR
1096 continue;
1097
1098 /* ignore updates to this record's mcount site */
1099 if (get_kprobe((void *)rec->ip)) {
1100 freeze_record(rec);
1101 continue;
1102 } else {
1103 unfreeze_record(rec);
1104 }
f22f9a89 1105
265c831c 1106 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 1107 if (failed) {
265c831c
SR
1108 rec->flags |= FTRACE_FL_FAILED;
1109 if ((system_state == SYSTEM_BOOTING) ||
1110 !core_kernel_text(rec->ip)) {
1111 ftrace_free_rec(rec);
4377245a 1112 } else {
265c831c 1113 ftrace_bug(failed, rec->ip);
4377245a
SR
1114 /* Stop processing */
1115 return;
1116 }
3c1720f0 1117 }
265c831c 1118 } while_for_each_ftrace_rec();
3c1720f0
SR
1119}
1120
492a7ea5 1121static int
31e88909 1122ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
1123{
1124 unsigned long ip;
593eb8a2 1125 int ret;
3c1720f0
SR
1126
1127 ip = rec->ip;
1128
25aac9dc 1129 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 1130 if (ret) {
31e88909 1131 ftrace_bug(ret, ip);
3c1720f0 1132 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 1133 return 0;
37ad5084 1134 }
492a7ea5 1135 return 1;
3c1720f0
SR
1136}
1137
000ab691
SR
1138/*
1139 * archs can override this function if they must do something
1140 * before the modifying code is performed.
1141 */
1142int __weak ftrace_arch_code_modify_prepare(void)
1143{
1144 return 0;
1145}
1146
1147/*
1148 * archs can override this function if they must do something
1149 * after the modifying code is performed.
1150 */
1151int __weak ftrace_arch_code_modify_post_process(void)
1152{
1153 return 0;
1154}
1155
e309b41d 1156static int __ftrace_modify_code(void *data)
3d083395 1157{
d61f82d0
SR
1158 int *command = data;
1159
a3583244 1160 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 1161 ftrace_replace_code(1);
a3583244 1162 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
1163 ftrace_replace_code(0);
1164
1165 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1166 ftrace_update_ftrace_func(ftrace_trace_function);
1167
5a45cfe1
SR
1168 if (*command & FTRACE_START_FUNC_RET)
1169 ftrace_enable_ftrace_graph_caller();
1170 else if (*command & FTRACE_STOP_FUNC_RET)
1171 ftrace_disable_ftrace_graph_caller();
1172
d61f82d0 1173 return 0;
3d083395
SR
1174}
1175
e309b41d 1176static void ftrace_run_update_code(int command)
3d083395 1177{
000ab691
SR
1178 int ret;
1179
1180 ret = ftrace_arch_code_modify_prepare();
1181 FTRACE_WARN_ON(ret);
1182 if (ret)
1183 return;
1184
784e2d76 1185 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
1186
1187 ret = ftrace_arch_code_modify_post_process();
1188 FTRACE_WARN_ON(ret);
3d083395
SR
1189}
1190
d61f82d0 1191static ftrace_func_t saved_ftrace_func;
60a7ecf4 1192static int ftrace_start_up;
df4fc315
SR
1193
1194static void ftrace_startup_enable(int command)
1195{
1196 if (saved_ftrace_func != ftrace_trace_function) {
1197 saved_ftrace_func = ftrace_trace_function;
1198 command |= FTRACE_UPDATE_TRACE_FUNC;
1199 }
1200
1201 if (!command || !ftrace_enabled)
1202 return;
1203
1204 ftrace_run_update_code(command);
1205}
d61f82d0 1206
5a45cfe1 1207static void ftrace_startup(int command)
3d083395 1208{
4eebcc81
SR
1209 if (unlikely(ftrace_disabled))
1210 return;
1211
60a7ecf4 1212 ftrace_start_up++;
982c350b 1213 command |= FTRACE_ENABLE_CALLS;
d61f82d0 1214
df4fc315 1215 ftrace_startup_enable(command);
3d083395
SR
1216}
1217
5a45cfe1 1218static void ftrace_shutdown(int command)
3d083395 1219{
4eebcc81
SR
1220 if (unlikely(ftrace_disabled))
1221 return;
1222
60a7ecf4
SR
1223 ftrace_start_up--;
1224 if (!ftrace_start_up)
d61f82d0 1225 command |= FTRACE_DISABLE_CALLS;
3d083395 1226
d61f82d0
SR
1227 if (saved_ftrace_func != ftrace_trace_function) {
1228 saved_ftrace_func = ftrace_trace_function;
1229 command |= FTRACE_UPDATE_TRACE_FUNC;
1230 }
3d083395 1231
d61f82d0 1232 if (!command || !ftrace_enabled)
e6ea44e9 1233 return;
d61f82d0
SR
1234
1235 ftrace_run_update_code(command);
3d083395
SR
1236}
1237
e309b41d 1238static void ftrace_startup_sysctl(void)
b0fc494f 1239{
d61f82d0
SR
1240 int command = FTRACE_ENABLE_MCOUNT;
1241
4eebcc81
SR
1242 if (unlikely(ftrace_disabled))
1243 return;
1244
d61f82d0
SR
1245 /* Force update next time */
1246 saved_ftrace_func = NULL;
60a7ecf4
SR
1247 /* ftrace_start_up is true if we want ftrace running */
1248 if (ftrace_start_up)
d61f82d0
SR
1249 command |= FTRACE_ENABLE_CALLS;
1250
1251 ftrace_run_update_code(command);
b0fc494f
SR
1252}
1253
e309b41d 1254static void ftrace_shutdown_sysctl(void)
b0fc494f 1255{
d61f82d0
SR
1256 int command = FTRACE_DISABLE_MCOUNT;
1257
4eebcc81
SR
1258 if (unlikely(ftrace_disabled))
1259 return;
1260
60a7ecf4
SR
1261 /* ftrace_start_up is true if ftrace is running */
1262 if (ftrace_start_up)
d61f82d0
SR
1263 command |= FTRACE_DISABLE_CALLS;
1264
1265 ftrace_run_update_code(command);
b0fc494f
SR
1266}
1267
3d083395
SR
1268static cycle_t ftrace_update_time;
1269static unsigned long ftrace_update_cnt;
1270unsigned long ftrace_update_tot_cnt;
1271
31e88909 1272static int ftrace_update_code(struct module *mod)
3d083395 1273{
e94142a6 1274 struct dyn_ftrace *p;
f22f9a89 1275 cycle_t start, stop;
3d083395 1276
750ed1a4 1277 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
1278 ftrace_update_cnt = 0;
1279
e94142a6 1280 while (ftrace_new_addrs) {
3d083395 1281
08f5ac90
SR
1282 /* If something went wrong, bail without enabling anything */
1283 if (unlikely(ftrace_disabled))
1284 return -1;
f22f9a89 1285
e94142a6 1286 p = ftrace_new_addrs;
ee000b7f 1287 ftrace_new_addrs = p->newlist;
e94142a6 1288 p->flags = 0L;
f22f9a89 1289
08f5ac90 1290 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 1291 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
1292 p->flags |= FTRACE_FL_CONVERTED;
1293 ftrace_update_cnt++;
1294 } else
1295 ftrace_free_rec(p);
3d083395
SR
1296 }
1297
750ed1a4 1298 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
1299 ftrace_update_time = stop - start;
1300 ftrace_update_tot_cnt += ftrace_update_cnt;
1301
16444a8a
ACM
1302 return 0;
1303}
1304
68bf21aa 1305static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
1306{
1307 struct ftrace_page *pg;
1308 int cnt;
1309 int i;
3c1720f0
SR
1310
1311 /* allocate a few pages */
1312 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1313 if (!ftrace_pages_start)
1314 return -1;
1315
1316 /*
1317 * Allocate a few more pages.
1318 *
1319 * TODO: have some parser search vmlinux before
1320 * final linking to find all calls to ftrace.
1321 * Then we can:
1322 * a) know how many pages to allocate.
1323 * and/or
1324 * b) set up the table then.
1325 *
1326 * The dynamic code is still necessary for
1327 * modules.
1328 */
1329
1330 pg = ftrace_pages = ftrace_pages_start;
1331
68bf21aa 1332 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 1333 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 1334 num_to_init, cnt + 1);
3c1720f0
SR
1335
1336 for (i = 0; i < cnt; i++) {
1337 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1338
1339 /* If we fail, we'll try later anyway */
1340 if (!pg->next)
1341 break;
1342
1343 pg = pg->next;
1344 }
1345
1346 return 0;
1347}
1348
5072c59f
SR
1349enum {
1350 FTRACE_ITER_FILTER = (1 << 0),
1351 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 1352 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 1353 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 1354 FTRACE_ITER_PRINTALL = (1 << 4),
8fc0c701 1355 FTRACE_ITER_HASH = (1 << 5),
5072c59f
SR
1356};
1357
1358#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1359
1360struct ftrace_iterator {
5072c59f 1361 struct ftrace_page *pg;
8fc0c701 1362 int hidx;
431aa3fb 1363 int idx;
5072c59f
SR
1364 unsigned flags;
1365 unsigned char buffer[FTRACE_BUFF_MAX+1];
1366 unsigned buffer_idx;
1367 unsigned filtered;
1368};
1369
8fc0c701
SR
1370static void *
1371t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1372{
1373 struct ftrace_iterator *iter = m->private;
1374 struct hlist_node *hnd = v;
1375 struct hlist_head *hhd;
1376
1377 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1378
1379 (*pos)++;
1380
1381 retry:
1382 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1383 return NULL;
1384
1385 hhd = &ftrace_func_hash[iter->hidx];
1386
1387 if (hlist_empty(hhd)) {
1388 iter->hidx++;
1389 hnd = NULL;
1390 goto retry;
1391 }
1392
1393 if (!hnd)
1394 hnd = hhd->first;
1395 else {
1396 hnd = hnd->next;
1397 if (!hnd) {
1398 iter->hidx++;
1399 goto retry;
1400 }
1401 }
1402
1403 return hnd;
1404}
1405
1406static void *t_hash_start(struct seq_file *m, loff_t *pos)
1407{
1408 struct ftrace_iterator *iter = m->private;
1409 void *p = NULL;
1410
1411 iter->flags |= FTRACE_ITER_HASH;
1412
1413 return t_hash_next(m, p, pos);
1414}
1415
1416static int t_hash_show(struct seq_file *m, void *v)
1417{
b6887d79 1418 struct ftrace_func_probe *rec;
8fc0c701
SR
1419 struct hlist_node *hnd = v;
1420 char str[KSYM_SYMBOL_LEN];
1421
b6887d79 1422 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
8fc0c701 1423
809dcf29
SR
1424 if (rec->ops->print)
1425 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1426
8fc0c701
SR
1427 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1428 seq_printf(m, "%s:", str);
1429
1430 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1431 seq_printf(m, "%s", str);
1432
1433 if (rec->data)
1434 seq_printf(m, ":%p", rec->data);
1435 seq_putc(m, '\n');
1436
1437 return 0;
1438}
1439
e309b41d 1440static void *
5072c59f
SR
1441t_next(struct seq_file *m, void *v, loff_t *pos)
1442{
1443 struct ftrace_iterator *iter = m->private;
1444 struct dyn_ftrace *rec = NULL;
1445
8fc0c701
SR
1446 if (iter->flags & FTRACE_ITER_HASH)
1447 return t_hash_next(m, v, pos);
1448
5072c59f
SR
1449 (*pos)++;
1450
0c75a3ed
SR
1451 if (iter->flags & FTRACE_ITER_PRINTALL)
1452 return NULL;
1453
5072c59f
SR
1454 retry:
1455 if (iter->idx >= iter->pg->index) {
1456 if (iter->pg->next) {
1457 iter->pg = iter->pg->next;
1458 iter->idx = 0;
1459 goto retry;
50cdaf08
LW
1460 } else {
1461 iter->idx = -1;
5072c59f
SR
1462 }
1463 } else {
1464 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
1465 if ((rec->flags & FTRACE_FL_FREE) ||
1466
1467 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
1468 (rec->flags & FTRACE_FL_FAILED)) ||
1469
1470 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 1471 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 1472
0183fb1c
SR
1473 ((iter->flags & FTRACE_ITER_FILTER) &&
1474 !(rec->flags & FTRACE_FL_FILTER)) ||
1475
41c52c0d
SR
1476 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1477 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
1478 rec = NULL;
1479 goto retry;
1480 }
1481 }
1482
5072c59f
SR
1483 return rec;
1484}
1485
1486static void *t_start(struct seq_file *m, loff_t *pos)
1487{
1488 struct ftrace_iterator *iter = m->private;
1489 void *p = NULL;
5072c59f 1490
8fc0c701 1491 mutex_lock(&ftrace_lock);
0c75a3ed
SR
1492 /*
1493 * For set_ftrace_filter reading, if we have the filter
1494 * off, we can short cut and just print out that all
1495 * functions are enabled.
1496 */
1497 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1498 if (*pos > 0)
8fc0c701 1499 return t_hash_start(m, pos);
0c75a3ed
SR
1500 iter->flags |= FTRACE_ITER_PRINTALL;
1501 (*pos)++;
1502 return iter;
1503 }
1504
8fc0c701
SR
1505 if (iter->flags & FTRACE_ITER_HASH)
1506 return t_hash_start(m, pos);
1507
50cdaf08
LW
1508 if (*pos > 0) {
1509 if (iter->idx < 0)
1510 return p;
1511 (*pos)--;
1512 iter->idx--;
1513 }
5821e1b7 1514
50cdaf08 1515 p = t_next(m, p, pos);
5072c59f 1516
8fc0c701
SR
1517 if (!p)
1518 return t_hash_start(m, pos);
1519
5072c59f
SR
1520 return p;
1521}
1522
1523static void t_stop(struct seq_file *m, void *p)
1524{
8fc0c701 1525 mutex_unlock(&ftrace_lock);
5072c59f
SR
1526}
1527
1528static int t_show(struct seq_file *m, void *v)
1529{
0c75a3ed 1530 struct ftrace_iterator *iter = m->private;
5072c59f
SR
1531 struct dyn_ftrace *rec = v;
1532 char str[KSYM_SYMBOL_LEN];
1533
8fc0c701
SR
1534 if (iter->flags & FTRACE_ITER_HASH)
1535 return t_hash_show(m, v);
1536
0c75a3ed
SR
1537 if (iter->flags & FTRACE_ITER_PRINTALL) {
1538 seq_printf(m, "#### all functions enabled ####\n");
1539 return 0;
1540 }
1541
5072c59f
SR
1542 if (!rec)
1543 return 0;
1544
1545 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1546
50cdaf08 1547 seq_printf(m, "%s\n", str);
5072c59f
SR
1548
1549 return 0;
1550}
1551
1552static struct seq_operations show_ftrace_seq_ops = {
1553 .start = t_start,
1554 .next = t_next,
1555 .stop = t_stop,
1556 .show = t_show,
1557};
1558
e309b41d 1559static int
5072c59f
SR
1560ftrace_avail_open(struct inode *inode, struct file *file)
1561{
1562 struct ftrace_iterator *iter;
1563 int ret;
1564
4eebcc81
SR
1565 if (unlikely(ftrace_disabled))
1566 return -ENODEV;
1567
5072c59f
SR
1568 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1569 if (!iter)
1570 return -ENOMEM;
1571
1572 iter->pg = ftrace_pages_start;
5072c59f
SR
1573
1574 ret = seq_open(file, &show_ftrace_seq_ops);
1575 if (!ret) {
1576 struct seq_file *m = file->private_data;
4bf39a94 1577
5072c59f 1578 m->private = iter;
4bf39a94 1579 } else {
5072c59f 1580 kfree(iter);
4bf39a94 1581 }
5072c59f
SR
1582
1583 return ret;
1584}
1585
1586int ftrace_avail_release(struct inode *inode, struct file *file)
1587{
1588 struct seq_file *m = (struct seq_file *)file->private_data;
1589 struct ftrace_iterator *iter = m->private;
1590
1591 seq_release(inode, file);
1592 kfree(iter);
4bf39a94 1593
5072c59f
SR
1594 return 0;
1595}
1596
eb9a7bf0
AS
1597static int
1598ftrace_failures_open(struct inode *inode, struct file *file)
1599{
1600 int ret;
1601 struct seq_file *m;
1602 struct ftrace_iterator *iter;
1603
1604 ret = ftrace_avail_open(inode, file);
1605 if (!ret) {
1606 m = (struct seq_file *)file->private_data;
1607 iter = (struct ftrace_iterator *)m->private;
1608 iter->flags = FTRACE_ITER_FAILURES;
1609 }
1610
1611 return ret;
1612}
1613
1614
41c52c0d 1615static void ftrace_filter_reset(int enable)
5072c59f
SR
1616{
1617 struct ftrace_page *pg;
1618 struct dyn_ftrace *rec;
41c52c0d 1619 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1620
52baf119 1621 mutex_lock(&ftrace_lock);
41c52c0d
SR
1622 if (enable)
1623 ftrace_filtered = 0;
265c831c
SR
1624 do_for_each_ftrace_rec(pg, rec) {
1625 if (rec->flags & FTRACE_FL_FAILED)
1626 continue;
1627 rec->flags &= ~type;
1628 } while_for_each_ftrace_rec();
52baf119 1629 mutex_unlock(&ftrace_lock);
5072c59f
SR
1630}
1631
e309b41d 1632static int
41c52c0d 1633ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1634{
1635 struct ftrace_iterator *iter;
1636 int ret = 0;
1637
4eebcc81
SR
1638 if (unlikely(ftrace_disabled))
1639 return -ENODEV;
1640
5072c59f
SR
1641 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1642 if (!iter)
1643 return -ENOMEM;
1644
41c52c0d 1645 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1646 if ((file->f_mode & FMODE_WRITE) &&
1647 !(file->f_flags & O_APPEND))
41c52c0d 1648 ftrace_filter_reset(enable);
5072c59f
SR
1649
1650 if (file->f_mode & FMODE_READ) {
1651 iter->pg = ftrace_pages_start;
41c52c0d
SR
1652 iter->flags = enable ? FTRACE_ITER_FILTER :
1653 FTRACE_ITER_NOTRACE;
5072c59f
SR
1654
1655 ret = seq_open(file, &show_ftrace_seq_ops);
1656 if (!ret) {
1657 struct seq_file *m = file->private_data;
1658 m->private = iter;
1659 } else
1660 kfree(iter);
1661 } else
1662 file->private_data = iter;
41c52c0d 1663 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1664
1665 return ret;
1666}
1667
41c52c0d
SR
1668static int
1669ftrace_filter_open(struct inode *inode, struct file *file)
1670{
1671 return ftrace_regex_open(inode, file, 1);
1672}
1673
1674static int
1675ftrace_notrace_open(struct inode *inode, struct file *file)
1676{
1677 return ftrace_regex_open(inode, file, 0);
1678}
1679
e309b41d 1680static loff_t
41c52c0d 1681ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1682{
1683 loff_t ret;
1684
1685 if (file->f_mode & FMODE_READ)
1686 ret = seq_lseek(file, offset, origin);
1687 else
1688 file->f_pos = ret = 1;
1689
1690 return ret;
1691}
1692
1693enum {
1694 MATCH_FULL,
1695 MATCH_FRONT_ONLY,
1696 MATCH_MIDDLE_ONLY,
1697 MATCH_END_ONLY,
1698};
1699
9f4801e3
SR
1700/*
1701 * (static function - no need for kernel doc)
1702 *
1703 * Pass in a buffer containing a glob and this function will
1704 * set search to point to the search part of the buffer and
1705 * return the type of search it is (see enum above).
1706 * This does modify buff.
1707 *
1708 * Returns enum type.
1709 * search returns the pointer to use for comparison.
1710 * not returns 1 if buff started with a '!'
1711 * 0 otherwise.
1712 */
1713static int
64e7c440 1714ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1715{
5072c59f 1716 int type = MATCH_FULL;
9f4801e3 1717 int i;
ea3a6d6d
SR
1718
1719 if (buff[0] == '!') {
9f4801e3 1720 *not = 1;
ea3a6d6d
SR
1721 buff++;
1722 len--;
9f4801e3
SR
1723 } else
1724 *not = 0;
1725
1726 *search = buff;
5072c59f
SR
1727
1728 for (i = 0; i < len; i++) {
1729 if (buff[i] == '*') {
1730 if (!i) {
9f4801e3 1731 *search = buff + 1;
5072c59f 1732 type = MATCH_END_ONLY;
5072c59f 1733 } else {
9f4801e3 1734 if (type == MATCH_END_ONLY)
5072c59f 1735 type = MATCH_MIDDLE_ONLY;
9f4801e3 1736 else
5072c59f 1737 type = MATCH_FRONT_ONLY;
5072c59f
SR
1738 buff[i] = 0;
1739 break;
1740 }
1741 }
1742 }
1743
9f4801e3
SR
1744 return type;
1745}
1746
64e7c440 1747static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1748{
9f4801e3
SR
1749 int matched = 0;
1750 char *ptr;
1751
9f4801e3
SR
1752 switch (type) {
1753 case MATCH_FULL:
1754 if (strcmp(str, regex) == 0)
1755 matched = 1;
1756 break;
1757 case MATCH_FRONT_ONLY:
1758 if (strncmp(str, regex, len) == 0)
1759 matched = 1;
1760 break;
1761 case MATCH_MIDDLE_ONLY:
1762 if (strstr(str, regex))
1763 matched = 1;
1764 break;
1765 case MATCH_END_ONLY:
1766 ptr = strstr(str, regex);
1767 if (ptr && (ptr[len] == 0))
1768 matched = 1;
1769 break;
1770 }
1771
1772 return matched;
1773}
1774
64e7c440
SR
1775static int
1776ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1777{
1778 char str[KSYM_SYMBOL_LEN];
1779
1780 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1781 return ftrace_match(str, regex, len, type);
1782}
1783
9f4801e3
SR
1784static void ftrace_match_records(char *buff, int len, int enable)
1785{
6a24a244 1786 unsigned int search_len;
9f4801e3
SR
1787 struct ftrace_page *pg;
1788 struct dyn_ftrace *rec;
6a24a244
SR
1789 unsigned long flag;
1790 char *search;
9f4801e3 1791 int type;
9f4801e3
SR
1792 int not;
1793
6a24a244 1794 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
9f4801e3
SR
1795 type = ftrace_setup_glob(buff, len, &search, &not);
1796
1797 search_len = strlen(search);
1798
52baf119 1799 mutex_lock(&ftrace_lock);
265c831c 1800 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1801
1802 if (rec->flags & FTRACE_FL_FAILED)
1803 continue;
9f4801e3
SR
1804
1805 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1806 if (not)
1807 rec->flags &= ~flag;
1808 else
1809 rec->flags |= flag;
1810 }
e68746a2
SR
1811 /*
1812 * Only enable filtering if we have a function that
1813 * is filtered on.
1814 */
1815 if (enable && (rec->flags & FTRACE_FL_FILTER))
1816 ftrace_filtered = 1;
265c831c 1817 } while_for_each_ftrace_rec();
52baf119 1818 mutex_unlock(&ftrace_lock);
5072c59f
SR
1819}
1820
64e7c440
SR
1821static int
1822ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1823 char *regex, int len, int type)
1824{
1825 char str[KSYM_SYMBOL_LEN];
1826 char *modname;
1827
1828 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1829
1830 if (!modname || strcmp(modname, mod))
1831 return 0;
1832
1833 /* blank search means to match all funcs in the mod */
1834 if (len)
1835 return ftrace_match(str, regex, len, type);
1836 else
1837 return 1;
1838}
1839
1840static void ftrace_match_module_records(char *buff, char *mod, int enable)
1841{
6a24a244 1842 unsigned search_len = 0;
64e7c440
SR
1843 struct ftrace_page *pg;
1844 struct dyn_ftrace *rec;
1845 int type = MATCH_FULL;
6a24a244
SR
1846 char *search = buff;
1847 unsigned long flag;
64e7c440
SR
1848 int not = 0;
1849
6a24a244
SR
1850 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1851
64e7c440
SR
1852 /* blank or '*' mean the same */
1853 if (strcmp(buff, "*") == 0)
1854 buff[0] = 0;
1855
1856 /* handle the case of 'dont filter this module' */
1857 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1858 buff[0] = 0;
1859 not = 1;
1860 }
1861
1862 if (strlen(buff)) {
1863 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1864 search_len = strlen(search);
1865 }
1866
52baf119 1867 mutex_lock(&ftrace_lock);
64e7c440
SR
1868 do_for_each_ftrace_rec(pg, rec) {
1869
1870 if (rec->flags & FTRACE_FL_FAILED)
1871 continue;
1872
1873 if (ftrace_match_module_record(rec, mod,
1874 search, search_len, type)) {
1875 if (not)
1876 rec->flags &= ~flag;
1877 else
1878 rec->flags |= flag;
1879 }
e68746a2
SR
1880 if (enable && (rec->flags & FTRACE_FL_FILTER))
1881 ftrace_filtered = 1;
64e7c440
SR
1882
1883 } while_for_each_ftrace_rec();
52baf119 1884 mutex_unlock(&ftrace_lock);
64e7c440
SR
1885}
1886
f6180773
SR
1887/*
1888 * We register the module command as a template to show others how
1889 * to register the a command as well.
1890 */
1891
1892static int
1893ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1894{
1895 char *mod;
1896
1897 /*
1898 * cmd == 'mod' because we only registered this func
1899 * for the 'mod' ftrace_func_command.
1900 * But if you register one func with multiple commands,
1901 * you can tell which command was used by the cmd
1902 * parameter.
1903 */
1904
1905 /* we must have a module name */
1906 if (!param)
1907 return -EINVAL;
1908
1909 mod = strsep(&param, ":");
1910 if (!strlen(mod))
1911 return -EINVAL;
1912
1913 ftrace_match_module_records(func, mod, enable);
1914 return 0;
1915}
1916
1917static struct ftrace_func_command ftrace_mod_cmd = {
1918 .name = "mod",
1919 .func = ftrace_mod_callback,
1920};
1921
1922static int __init ftrace_mod_cmd_init(void)
1923{
1924 return register_ftrace_command(&ftrace_mod_cmd);
1925}
1926device_initcall(ftrace_mod_cmd_init);
1927
59df055f 1928static void
b6887d79 1929function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1930{
b6887d79 1931 struct ftrace_func_probe *entry;
59df055f
SR
1932 struct hlist_head *hhd;
1933 struct hlist_node *n;
1934 unsigned long key;
1935 int resched;
1936
1937 key = hash_long(ip, FTRACE_HASH_BITS);
1938
1939 hhd = &ftrace_func_hash[key];
1940
1941 if (hlist_empty(hhd))
1942 return;
1943
1944 /*
1945 * Disable preemption for these calls to prevent a RCU grace
1946 * period. This syncs the hash iteration and freeing of items
1947 * on the hash. rcu_read_lock is too dangerous here.
1948 */
1949 resched = ftrace_preempt_disable();
1950 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1951 if (entry->ip == ip)
1952 entry->ops->func(ip, parent_ip, &entry->data);
1953 }
1954 ftrace_preempt_enable(resched);
1955}
1956
b6887d79 1957static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1958{
fb9fb015 1959 .func = function_trace_probe_call,
59df055f
SR
1960};
1961
b6887d79 1962static int ftrace_probe_registered;
59df055f 1963
b6887d79 1964static void __enable_ftrace_function_probe(void)
59df055f
SR
1965{
1966 int i;
1967
b6887d79 1968 if (ftrace_probe_registered)
59df055f
SR
1969 return;
1970
1971 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1972 struct hlist_head *hhd = &ftrace_func_hash[i];
1973 if (hhd->first)
1974 break;
1975 }
1976 /* Nothing registered? */
1977 if (i == FTRACE_FUNC_HASHSIZE)
1978 return;
1979
b6887d79 1980 __register_ftrace_function(&trace_probe_ops);
59df055f 1981 ftrace_startup(0);
b6887d79 1982 ftrace_probe_registered = 1;
59df055f
SR
1983}
1984
b6887d79 1985static void __disable_ftrace_function_probe(void)
59df055f
SR
1986{
1987 int i;
1988
b6887d79 1989 if (!ftrace_probe_registered)
59df055f
SR
1990 return;
1991
1992 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1993 struct hlist_head *hhd = &ftrace_func_hash[i];
1994 if (hhd->first)
1995 return;
1996 }
1997
1998 /* no more funcs left */
b6887d79 1999 __unregister_ftrace_function(&trace_probe_ops);
59df055f 2000 ftrace_shutdown(0);
b6887d79 2001 ftrace_probe_registered = 0;
59df055f
SR
2002}
2003
2004
2005static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2006{
b6887d79
SR
2007 struct ftrace_func_probe *entry =
2008 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
2009
2010 if (entry->ops->free)
2011 entry->ops->free(&entry->data);
2012 kfree(entry);
2013}
2014
2015
2016int
b6887d79 2017register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2018 void *data)
2019{
b6887d79 2020 struct ftrace_func_probe *entry;
59df055f
SR
2021 struct ftrace_page *pg;
2022 struct dyn_ftrace *rec;
59df055f 2023 int type, len, not;
6a24a244 2024 unsigned long key;
59df055f
SR
2025 int count = 0;
2026 char *search;
2027
2028 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2029 len = strlen(search);
2030
b6887d79 2031 /* we do not support '!' for function probes */
59df055f
SR
2032 if (WARN_ON(not))
2033 return -EINVAL;
2034
2035 mutex_lock(&ftrace_lock);
2036 do_for_each_ftrace_rec(pg, rec) {
2037
2038 if (rec->flags & FTRACE_FL_FAILED)
2039 continue;
2040
2041 if (!ftrace_match_record(rec, search, len, type))
2042 continue;
2043
2044 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2045 if (!entry) {
b6887d79 2046 /* If we did not process any, then return error */
59df055f
SR
2047 if (!count)
2048 count = -ENOMEM;
2049 goto out_unlock;
2050 }
2051
2052 count++;
2053
2054 entry->data = data;
2055
2056 /*
2057 * The caller might want to do something special
2058 * for each function we find. We call the callback
2059 * to give the caller an opportunity to do so.
2060 */
2061 if (ops->callback) {
2062 if (ops->callback(rec->ip, &entry->data) < 0) {
2063 /* caller does not like this func */
2064 kfree(entry);
2065 continue;
2066 }
2067 }
2068
2069 entry->ops = ops;
2070 entry->ip = rec->ip;
2071
2072 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2073 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2074
2075 } while_for_each_ftrace_rec();
b6887d79 2076 __enable_ftrace_function_probe();
59df055f
SR
2077
2078 out_unlock:
2079 mutex_unlock(&ftrace_lock);
2080
2081 return count;
2082}
2083
2084enum {
b6887d79
SR
2085 PROBE_TEST_FUNC = 1,
2086 PROBE_TEST_DATA = 2
59df055f
SR
2087};
2088
2089static void
b6887d79 2090__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2091 void *data, int flags)
2092{
b6887d79 2093 struct ftrace_func_probe *entry;
59df055f
SR
2094 struct hlist_node *n, *tmp;
2095 char str[KSYM_SYMBOL_LEN];
2096 int type = MATCH_FULL;
2097 int i, len = 0;
2098 char *search;
2099
2100 if (glob && (strcmp(glob, "*") || !strlen(glob)))
2101 glob = NULL;
2102 else {
2103 int not;
2104
2105 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2106 len = strlen(search);
2107
b6887d79 2108 /* we do not support '!' for function probes */
59df055f
SR
2109 if (WARN_ON(not))
2110 return;
2111 }
2112
2113 mutex_lock(&ftrace_lock);
2114 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2115 struct hlist_head *hhd = &ftrace_func_hash[i];
2116
2117 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2118
2119 /* break up if statements for readability */
b6887d79 2120 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
2121 continue;
2122
b6887d79 2123 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
2124 continue;
2125
2126 /* do this last, since it is the most expensive */
2127 if (glob) {
2128 kallsyms_lookup(entry->ip, NULL, NULL,
2129 NULL, str);
2130 if (!ftrace_match(str, glob, len, type))
2131 continue;
2132 }
2133
2134 hlist_del(&entry->node);
2135 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2136 }
2137 }
b6887d79 2138 __disable_ftrace_function_probe();
59df055f
SR
2139 mutex_unlock(&ftrace_lock);
2140}
2141
2142void
b6887d79 2143unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2144 void *data)
2145{
b6887d79
SR
2146 __unregister_ftrace_function_probe(glob, ops, data,
2147 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
2148}
2149
2150void
b6887d79 2151unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 2152{
b6887d79 2153 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
2154}
2155
b6887d79 2156void unregister_ftrace_function_probe_all(char *glob)
59df055f 2157{
b6887d79 2158 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
2159}
2160
f6180773
SR
2161static LIST_HEAD(ftrace_commands);
2162static DEFINE_MUTEX(ftrace_cmd_mutex);
2163
2164int register_ftrace_command(struct ftrace_func_command *cmd)
2165{
2166 struct ftrace_func_command *p;
2167 int ret = 0;
2168
2169 mutex_lock(&ftrace_cmd_mutex);
2170 list_for_each_entry(p, &ftrace_commands, list) {
2171 if (strcmp(cmd->name, p->name) == 0) {
2172 ret = -EBUSY;
2173 goto out_unlock;
2174 }
2175 }
2176 list_add(&cmd->list, &ftrace_commands);
2177 out_unlock:
2178 mutex_unlock(&ftrace_cmd_mutex);
2179
2180 return ret;
2181}
2182
2183int unregister_ftrace_command(struct ftrace_func_command *cmd)
2184{
2185 struct ftrace_func_command *p, *n;
2186 int ret = -ENODEV;
2187
2188 mutex_lock(&ftrace_cmd_mutex);
2189 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2190 if (strcmp(cmd->name, p->name) == 0) {
2191 ret = 0;
2192 list_del_init(&p->list);
2193 goto out_unlock;
2194 }
2195 }
2196 out_unlock:
2197 mutex_unlock(&ftrace_cmd_mutex);
2198
2199 return ret;
2200}
2201
64e7c440
SR
2202static int ftrace_process_regex(char *buff, int len, int enable)
2203{
f6180773 2204 char *func, *command, *next = buff;
6a24a244 2205 struct ftrace_func_command *p;
f6180773 2206 int ret = -EINVAL;
64e7c440
SR
2207
2208 func = strsep(&next, ":");
2209
2210 if (!next) {
2211 ftrace_match_records(func, len, enable);
2212 return 0;
2213 }
2214
f6180773 2215 /* command found */
64e7c440
SR
2216
2217 command = strsep(&next, ":");
2218
f6180773
SR
2219 mutex_lock(&ftrace_cmd_mutex);
2220 list_for_each_entry(p, &ftrace_commands, list) {
2221 if (strcmp(p->name, command) == 0) {
2222 ret = p->func(func, command, next, enable);
2223 goto out_unlock;
2224 }
64e7c440 2225 }
f6180773
SR
2226 out_unlock:
2227 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 2228
f6180773 2229 return ret;
64e7c440
SR
2230}
2231
e309b41d 2232static ssize_t
41c52c0d
SR
2233ftrace_regex_write(struct file *file, const char __user *ubuf,
2234 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
2235{
2236 struct ftrace_iterator *iter;
2237 char ch;
2238 size_t read = 0;
2239 ssize_t ret;
2240
2241 if (!cnt || cnt < 0)
2242 return 0;
2243
41c52c0d 2244 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2245
2246 if (file->f_mode & FMODE_READ) {
2247 struct seq_file *m = file->private_data;
2248 iter = m->private;
2249 } else
2250 iter = file->private_data;
2251
2252 if (!*ppos) {
2253 iter->flags &= ~FTRACE_ITER_CONT;
2254 iter->buffer_idx = 0;
2255 }
2256
2257 ret = get_user(ch, ubuf++);
2258 if (ret)
2259 goto out;
2260 read++;
2261 cnt--;
2262
2263 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2264 /* skip white space */
2265 while (cnt && isspace(ch)) {
2266 ret = get_user(ch, ubuf++);
2267 if (ret)
2268 goto out;
2269 read++;
2270 cnt--;
2271 }
2272
5072c59f
SR
2273 if (isspace(ch)) {
2274 file->f_pos += read;
2275 ret = read;
2276 goto out;
2277 }
2278
2279 iter->buffer_idx = 0;
2280 }
2281
2282 while (cnt && !isspace(ch)) {
2283 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2284 iter->buffer[iter->buffer_idx++] = ch;
2285 else {
2286 ret = -EINVAL;
2287 goto out;
2288 }
2289 ret = get_user(ch, ubuf++);
2290 if (ret)
2291 goto out;
2292 read++;
2293 cnt--;
2294 }
2295
2296 if (isspace(ch)) {
2297 iter->filtered++;
2298 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
2299 ret = ftrace_process_regex(iter->buffer,
2300 iter->buffer_idx, enable);
2301 if (ret)
2302 goto out;
5072c59f
SR
2303 iter->buffer_idx = 0;
2304 } else
2305 iter->flags |= FTRACE_ITER_CONT;
2306
2307
2308 file->f_pos += read;
2309
2310 ret = read;
2311 out:
41c52c0d 2312 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2313
2314 return ret;
2315}
2316
41c52c0d
SR
2317static ssize_t
2318ftrace_filter_write(struct file *file, const char __user *ubuf,
2319 size_t cnt, loff_t *ppos)
2320{
2321 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2322}
2323
2324static ssize_t
2325ftrace_notrace_write(struct file *file, const char __user *ubuf,
2326 size_t cnt, loff_t *ppos)
2327{
2328 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2329}
2330
2331static void
2332ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2333{
2334 if (unlikely(ftrace_disabled))
2335 return;
2336
2337 mutex_lock(&ftrace_regex_lock);
2338 if (reset)
2339 ftrace_filter_reset(enable);
2340 if (buf)
7f24b31b 2341 ftrace_match_records(buf, len, enable);
41c52c0d
SR
2342 mutex_unlock(&ftrace_regex_lock);
2343}
2344
77a2b37d
SR
2345/**
2346 * ftrace_set_filter - set a function to filter on in ftrace
2347 * @buf - the string that holds the function filter text.
2348 * @len - the length of the string.
2349 * @reset - non zero to reset all filters before applying this filter.
2350 *
2351 * Filters denote which functions should be enabled when tracing is enabled.
2352 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2353 */
e309b41d 2354void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 2355{
41c52c0d
SR
2356 ftrace_set_regex(buf, len, reset, 1);
2357}
4eebcc81 2358
41c52c0d
SR
2359/**
2360 * ftrace_set_notrace - set a function to not trace in ftrace
2361 * @buf - the string that holds the function notrace text.
2362 * @len - the length of the string.
2363 * @reset - non zero to reset all filters before applying this filter.
2364 *
2365 * Notrace Filters denote which functions should not be enabled when tracing
2366 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2367 * for tracing.
2368 */
2369void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2370{
2371 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
2372}
2373
e309b41d 2374static int
41c52c0d 2375ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
2376{
2377 struct seq_file *m = (struct seq_file *)file->private_data;
2378 struct ftrace_iterator *iter;
2379
41c52c0d 2380 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2381 if (file->f_mode & FMODE_READ) {
2382 iter = m->private;
2383
2384 seq_release(inode, file);
2385 } else
2386 iter = file->private_data;
2387
2388 if (iter->buffer_idx) {
2389 iter->filtered++;
2390 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 2391 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
2392 }
2393
e6ea44e9 2394 mutex_lock(&ftrace_lock);
ee02a2e5 2395 if (ftrace_start_up && ftrace_enabled)
5072c59f 2396 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 2397 mutex_unlock(&ftrace_lock);
5072c59f
SR
2398
2399 kfree(iter);
41c52c0d 2400 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2401 return 0;
2402}
2403
41c52c0d
SR
2404static int
2405ftrace_filter_release(struct inode *inode, struct file *file)
2406{
2407 return ftrace_regex_release(inode, file, 1);
2408}
2409
2410static int
2411ftrace_notrace_release(struct inode *inode, struct file *file)
2412{
2413 return ftrace_regex_release(inode, file, 0);
2414}
2415
5e2336a0 2416static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
2417 .open = ftrace_avail_open,
2418 .read = seq_read,
2419 .llseek = seq_lseek,
2420 .release = ftrace_avail_release,
2421};
2422
5e2336a0 2423static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
2424 .open = ftrace_failures_open,
2425 .read = seq_read,
2426 .llseek = seq_lseek,
2427 .release = ftrace_avail_release,
2428};
2429
5e2336a0 2430static const struct file_operations ftrace_filter_fops = {
5072c59f 2431 .open = ftrace_filter_open,
850a80cf 2432 .read = seq_read,
5072c59f 2433 .write = ftrace_filter_write,
41c52c0d 2434 .llseek = ftrace_regex_lseek,
5072c59f
SR
2435 .release = ftrace_filter_release,
2436};
2437
5e2336a0 2438static const struct file_operations ftrace_notrace_fops = {
41c52c0d 2439 .open = ftrace_notrace_open,
850a80cf 2440 .read = seq_read,
41c52c0d
SR
2441 .write = ftrace_notrace_write,
2442 .llseek = ftrace_regex_lseek,
2443 .release = ftrace_notrace_release,
2444};
2445
ea4e2bc4
SR
2446#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2447
2448static DEFINE_MUTEX(graph_lock);
2449
2450int ftrace_graph_count;
2451unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2452
2453static void *
2454g_next(struct seq_file *m, void *v, loff_t *pos)
2455{
2456 unsigned long *array = m->private;
2457 int index = *pos;
2458
2459 (*pos)++;
2460
2461 if (index >= ftrace_graph_count)
2462 return NULL;
2463
2464 return &array[index];
2465}
2466
2467static void *g_start(struct seq_file *m, loff_t *pos)
2468{
2469 void *p = NULL;
2470
2471 mutex_lock(&graph_lock);
2472
f9349a8f
FW
2473 /* Nothing, tell g_show to print all functions are enabled */
2474 if (!ftrace_graph_count && !*pos)
2475 return (void *)1;
2476
ea4e2bc4
SR
2477 p = g_next(m, p, pos);
2478
2479 return p;
2480}
2481
2482static void g_stop(struct seq_file *m, void *p)
2483{
2484 mutex_unlock(&graph_lock);
2485}
2486
2487static int g_show(struct seq_file *m, void *v)
2488{
2489 unsigned long *ptr = v;
2490 char str[KSYM_SYMBOL_LEN];
2491
2492 if (!ptr)
2493 return 0;
2494
f9349a8f
FW
2495 if (ptr == (unsigned long *)1) {
2496 seq_printf(m, "#### all functions enabled ####\n");
2497 return 0;
2498 }
2499
ea4e2bc4
SR
2500 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2501
2502 seq_printf(m, "%s\n", str);
2503
2504 return 0;
2505}
2506
2507static struct seq_operations ftrace_graph_seq_ops = {
2508 .start = g_start,
2509 .next = g_next,
2510 .stop = g_stop,
2511 .show = g_show,
2512};
2513
2514static int
2515ftrace_graph_open(struct inode *inode, struct file *file)
2516{
2517 int ret = 0;
2518
2519 if (unlikely(ftrace_disabled))
2520 return -ENODEV;
2521
2522 mutex_lock(&graph_lock);
2523 if ((file->f_mode & FMODE_WRITE) &&
2524 !(file->f_flags & O_APPEND)) {
2525 ftrace_graph_count = 0;
2526 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2527 }
2528
2529 if (file->f_mode & FMODE_READ) {
2530 ret = seq_open(file, &ftrace_graph_seq_ops);
2531 if (!ret) {
2532 struct seq_file *m = file->private_data;
2533 m->private = ftrace_graph_funcs;
2534 }
2535 } else
2536 file->private_data = ftrace_graph_funcs;
2537 mutex_unlock(&graph_lock);
2538
2539 return ret;
2540}
2541
ea4e2bc4 2542static int
f9349a8f 2543ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 2544{
ea4e2bc4
SR
2545 struct dyn_ftrace *rec;
2546 struct ftrace_page *pg;
f9349a8f 2547 int search_len;
ea4e2bc4 2548 int found = 0;
f9349a8f
FW
2549 int type, not;
2550 char *search;
2551 bool exists;
2552 int i;
ea4e2bc4
SR
2553
2554 if (ftrace_disabled)
2555 return -ENODEV;
2556
f9349a8f
FW
2557 /* decode regex */
2558 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2559 if (not)
2560 return -EINVAL;
2561
2562 search_len = strlen(search);
2563
52baf119 2564 mutex_lock(&ftrace_lock);
265c831c
SR
2565 do_for_each_ftrace_rec(pg, rec) {
2566
f9349a8f
FW
2567 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2568 break;
2569
265c831c
SR
2570 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2571 continue;
2572
f9349a8f
FW
2573 if (ftrace_match_record(rec, search, search_len, type)) {
2574 /* ensure it is not already in the array */
2575 exists = false;
2576 for (i = 0; i < *idx; i++)
2577 if (array[i] == rec->ip) {
2578 exists = true;
265c831c
SR
2579 break;
2580 }
f9349a8f
FW
2581 if (!exists) {
2582 array[(*idx)++] = rec->ip;
2583 found = 1;
2584 }
ea4e2bc4 2585 }
265c831c 2586 } while_for_each_ftrace_rec();
f9349a8f 2587
52baf119 2588 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
2589
2590 return found ? 0 : -EINVAL;
2591}
2592
2593static ssize_t
2594ftrace_graph_write(struct file *file, const char __user *ubuf,
2595 size_t cnt, loff_t *ppos)
2596{
2597 unsigned char buffer[FTRACE_BUFF_MAX+1];
2598 unsigned long *array;
2599 size_t read = 0;
2600 ssize_t ret;
2601 int index = 0;
2602 char ch;
2603
2604 if (!cnt || cnt < 0)
2605 return 0;
2606
2607 mutex_lock(&graph_lock);
2608
2609 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2610 ret = -EBUSY;
2611 goto out;
2612 }
2613
2614 if (file->f_mode & FMODE_READ) {
2615 struct seq_file *m = file->private_data;
2616 array = m->private;
2617 } else
2618 array = file->private_data;
2619
2620 ret = get_user(ch, ubuf++);
2621 if (ret)
2622 goto out;
2623 read++;
2624 cnt--;
2625
2626 /* skip white space */
2627 while (cnt && isspace(ch)) {
2628 ret = get_user(ch, ubuf++);
2629 if (ret)
2630 goto out;
2631 read++;
2632 cnt--;
2633 }
2634
2635 if (isspace(ch)) {
2636 *ppos += read;
2637 ret = read;
2638 goto out;
2639 }
2640
2641 while (cnt && !isspace(ch)) {
2642 if (index < FTRACE_BUFF_MAX)
2643 buffer[index++] = ch;
2644 else {
2645 ret = -EINVAL;
2646 goto out;
2647 }
2648 ret = get_user(ch, ubuf++);
2649 if (ret)
2650 goto out;
2651 read++;
2652 cnt--;
2653 }
2654 buffer[index] = 0;
2655
f9349a8f
FW
2656 /* we allow only one expression at a time */
2657 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
ea4e2bc4
SR
2658 if (ret)
2659 goto out;
2660
ea4e2bc4
SR
2661 file->f_pos += read;
2662
2663 ret = read;
2664 out:
2665 mutex_unlock(&graph_lock);
2666
2667 return ret;
2668}
2669
2670static const struct file_operations ftrace_graph_fops = {
2671 .open = ftrace_graph_open,
850a80cf 2672 .read = seq_read,
ea4e2bc4
SR
2673 .write = ftrace_graph_write,
2674};
2675#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2676
df4fc315 2677static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2678{
5072c59f
SR
2679 struct dentry *entry;
2680
5072c59f
SR
2681 entry = debugfs_create_file("available_filter_functions", 0444,
2682 d_tracer, NULL, &ftrace_avail_fops);
2683 if (!entry)
2684 pr_warning("Could not create debugfs "
2685 "'available_filter_functions' entry\n");
2686
eb9a7bf0
AS
2687 entry = debugfs_create_file("failures", 0444,
2688 d_tracer, NULL, &ftrace_failures_fops);
2689 if (!entry)
2690 pr_warning("Could not create debugfs 'failures' entry\n");
2691
5072c59f
SR
2692 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2693 NULL, &ftrace_filter_fops);
2694 if (!entry)
2695 pr_warning("Could not create debugfs "
2696 "'set_ftrace_filter' entry\n");
41c52c0d
SR
2697
2698 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2699 NULL, &ftrace_notrace_fops);
2700 if (!entry)
2701 pr_warning("Could not create debugfs "
2702 "'set_ftrace_notrace' entry\n");
ad90c0e3 2703
ea4e2bc4
SR
2704#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2705 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2706 NULL,
2707 &ftrace_graph_fops);
2708 if (!entry)
2709 pr_warning("Could not create debugfs "
2710 "'set_graph_function' entry\n");
2711#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2712
5072c59f
SR
2713 return 0;
2714}
2715
31e88909
SR
2716static int ftrace_convert_nops(struct module *mod,
2717 unsigned long *start,
68bf21aa
SR
2718 unsigned long *end)
2719{
2720 unsigned long *p;
2721 unsigned long addr;
2722 unsigned long flags;
2723
e6ea44e9 2724 mutex_lock(&ftrace_lock);
68bf21aa
SR
2725 p = start;
2726 while (p < end) {
2727 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2728 /*
2729 * Some architecture linkers will pad between
2730 * the different mcount_loc sections of different
2731 * object files to satisfy alignments.
2732 * Skip any NULL pointers.
2733 */
2734 if (!addr)
2735 continue;
68bf21aa 2736 ftrace_record_ip(addr);
68bf21aa
SR
2737 }
2738
08f5ac90 2739 /* disable interrupts to prevent kstop machine */
68bf21aa 2740 local_irq_save(flags);
31e88909 2741 ftrace_update_code(mod);
68bf21aa 2742 local_irq_restore(flags);
e6ea44e9 2743 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2744
2745 return 0;
2746}
2747
31e88909
SR
2748void ftrace_init_module(struct module *mod,
2749 unsigned long *start, unsigned long *end)
90d595fe 2750{
00fd61ae 2751 if (ftrace_disabled || start == end)
fed1939c 2752 return;
31e88909 2753 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2754}
2755
68bf21aa
SR
2756extern unsigned long __start_mcount_loc[];
2757extern unsigned long __stop_mcount_loc[];
2758
2759void __init ftrace_init(void)
2760{
2761 unsigned long count, addr, flags;
2762 int ret;
2763
2764 /* Keep the ftrace pointer to the stub */
2765 addr = (unsigned long)ftrace_stub;
2766
2767 local_irq_save(flags);
2768 ftrace_dyn_arch_init(&addr);
2769 local_irq_restore(flags);
2770
2771 /* ftrace_dyn_arch_init places the return code in addr */
2772 if (addr)
2773 goto failed;
2774
2775 count = __stop_mcount_loc - __start_mcount_loc;
2776
2777 ret = ftrace_dyn_table_alloc(count);
2778 if (ret)
2779 goto failed;
2780
2781 last_ftrace_enabled = ftrace_enabled = 1;
2782
31e88909
SR
2783 ret = ftrace_convert_nops(NULL,
2784 __start_mcount_loc,
68bf21aa
SR
2785 __stop_mcount_loc);
2786
2787 return;
2788 failed:
2789 ftrace_disabled = 1;
2790}
68bf21aa 2791
3d083395 2792#else
0b6e4d56
FW
2793
2794static int __init ftrace_nodyn_init(void)
2795{
2796 ftrace_enabled = 1;
2797 return 0;
2798}
2799device_initcall(ftrace_nodyn_init);
2800
df4fc315
SR
2801static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2802static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2803/* Keep as macros so we do not need to define the commands */
2804# define ftrace_startup(command) do { } while (0)
2805# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2806# define ftrace_startup_sysctl() do { } while (0)
2807# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2808#endif /* CONFIG_DYNAMIC_FTRACE */
2809
df4fc315
SR
2810static ssize_t
2811ftrace_pid_read(struct file *file, char __user *ubuf,
2812 size_t cnt, loff_t *ppos)
2813{
2814 char buf[64];
2815 int r;
2816
e32d8956
SR
2817 if (ftrace_pid_trace == ftrace_swapper_pid)
2818 r = sprintf(buf, "swapper tasks\n");
2819 else if (ftrace_pid_trace)
cc59c9e8 2820 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
df4fc315
SR
2821 else
2822 r = sprintf(buf, "no pid\n");
2823
2824 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2825}
2826
e32d8956 2827static void clear_ftrace_swapper(void)
978f3a45
SR
2828{
2829 struct task_struct *p;
e32d8956 2830 int cpu;
978f3a45 2831
e32d8956
SR
2832 get_online_cpus();
2833 for_each_online_cpu(cpu) {
2834 p = idle_task(cpu);
978f3a45 2835 clear_tsk_trace_trace(p);
e32d8956
SR
2836 }
2837 put_online_cpus();
2838}
978f3a45 2839
e32d8956
SR
2840static void set_ftrace_swapper(void)
2841{
2842 struct task_struct *p;
2843 int cpu;
2844
2845 get_online_cpus();
2846 for_each_online_cpu(cpu) {
2847 p = idle_task(cpu);
2848 set_tsk_trace_trace(p);
2849 }
2850 put_online_cpus();
978f3a45
SR
2851}
2852
e32d8956
SR
2853static void clear_ftrace_pid(struct pid *pid)
2854{
2855 struct task_struct *p;
2856
229c4ef8 2857 rcu_read_lock();
e32d8956
SR
2858 do_each_pid_task(pid, PIDTYPE_PID, p) {
2859 clear_tsk_trace_trace(p);
2860 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2861 rcu_read_unlock();
2862
e32d8956
SR
2863 put_pid(pid);
2864}
2865
2866static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2867{
2868 struct task_struct *p;
2869
229c4ef8 2870 rcu_read_lock();
978f3a45
SR
2871 do_each_pid_task(pid, PIDTYPE_PID, p) {
2872 set_tsk_trace_trace(p);
2873 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2874 rcu_read_unlock();
978f3a45
SR
2875}
2876
e32d8956
SR
2877static void clear_ftrace_pid_task(struct pid **pid)
2878{
2879 if (*pid == ftrace_swapper_pid)
2880 clear_ftrace_swapper();
2881 else
2882 clear_ftrace_pid(*pid);
2883
2884 *pid = NULL;
2885}
2886
2887static void set_ftrace_pid_task(struct pid *pid)
2888{
2889 if (pid == ftrace_swapper_pid)
2890 set_ftrace_swapper();
2891 else
2892 set_ftrace_pid(pid);
2893}
2894
df4fc315
SR
2895static ssize_t
2896ftrace_pid_write(struct file *filp, const char __user *ubuf,
2897 size_t cnt, loff_t *ppos)
2898{
978f3a45 2899 struct pid *pid;
df4fc315
SR
2900 char buf[64];
2901 long val;
2902 int ret;
2903
2904 if (cnt >= sizeof(buf))
2905 return -EINVAL;
2906
2907 if (copy_from_user(&buf, ubuf, cnt))
2908 return -EFAULT;
2909
2910 buf[cnt] = 0;
2911
2912 ret = strict_strtol(buf, 10, &val);
2913 if (ret < 0)
2914 return ret;
2915
e6ea44e9 2916 mutex_lock(&ftrace_lock);
978f3a45 2917 if (val < 0) {
df4fc315 2918 /* disable pid tracing */
978f3a45 2919 if (!ftrace_pid_trace)
df4fc315 2920 goto out;
978f3a45
SR
2921
2922 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2923
2924 } else {
e32d8956
SR
2925 /* swapper task is special */
2926 if (!val) {
2927 pid = ftrace_swapper_pid;
2928 if (pid == ftrace_pid_trace)
2929 goto out;
2930 } else {
2931 pid = find_get_pid(val);
df4fc315 2932
e32d8956
SR
2933 if (pid == ftrace_pid_trace) {
2934 put_pid(pid);
2935 goto out;
2936 }
0ef8cde5 2937 }
0ef8cde5 2938
978f3a45
SR
2939 if (ftrace_pid_trace)
2940 clear_ftrace_pid_task(&ftrace_pid_trace);
2941
2942 if (!pid)
2943 goto out;
2944
2945 ftrace_pid_trace = pid;
2946
2947 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2948 }
2949
2950 /* update the function call */
2951 ftrace_update_pid_func();
2952 ftrace_startup_enable(0);
2953
2954 out:
e6ea44e9 2955 mutex_unlock(&ftrace_lock);
df4fc315
SR
2956
2957 return cnt;
2958}
2959
5e2336a0 2960static const struct file_operations ftrace_pid_fops = {
df4fc315
SR
2961 .read = ftrace_pid_read,
2962 .write = ftrace_pid_write,
2963};
2964
2965static __init int ftrace_init_debugfs(void)
2966{
2967 struct dentry *d_tracer;
2968 struct dentry *entry;
2969
2970 d_tracer = tracing_init_dentry();
2971 if (!d_tracer)
2972 return 0;
2973
2974 ftrace_init_dyn_debugfs(d_tracer);
2975
2976 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2977 NULL, &ftrace_pid_fops);
2978 if (!entry)
2979 pr_warning("Could not create debugfs "
2980 "'set_ftrace_pid' entry\n");
493762fc
SR
2981
2982 ftrace_profile_debugfs(d_tracer);
2983
df4fc315
SR
2984 return 0;
2985}
df4fc315
SR
2986fs_initcall(ftrace_init_debugfs);
2987
a2bb6a3d 2988/**
81adbdc0 2989 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2990 *
2991 * This function should be used by panic code. It stops ftrace
2992 * but in a not so nice way. If you need to simply kill ftrace
2993 * from a non-atomic section, use ftrace_kill.
2994 */
81adbdc0 2995void ftrace_kill(void)
a2bb6a3d
SR
2996{
2997 ftrace_disabled = 1;
2998 ftrace_enabled = 0;
a2bb6a3d
SR
2999 clear_ftrace_function();
3000}
3001
16444a8a 3002/**
3d083395
SR
3003 * register_ftrace_function - register a function for profiling
3004 * @ops - ops structure that holds the function for profiling.
16444a8a 3005 *
3d083395
SR
3006 * Register a function to be called by all functions in the
3007 * kernel.
3008 *
3009 * Note: @ops->func and all the functions it calls must be labeled
3010 * with "notrace", otherwise it will go into a
3011 * recursive loop.
16444a8a 3012 */
3d083395 3013int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 3014{
b0fc494f
SR
3015 int ret;
3016
4eebcc81
SR
3017 if (unlikely(ftrace_disabled))
3018 return -1;
3019
e6ea44e9 3020 mutex_lock(&ftrace_lock);
e7d3737e 3021
b0fc494f 3022 ret = __register_ftrace_function(ops);
5a45cfe1 3023 ftrace_startup(0);
b0fc494f 3024
e6ea44e9 3025 mutex_unlock(&ftrace_lock);
b0fc494f 3026 return ret;
3d083395
SR
3027}
3028
3029/**
32632920 3030 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
3031 * @ops - ops structure that holds the function to unregister
3032 *
3033 * Unregister a function that was added to be called by ftrace profiling.
3034 */
3035int unregister_ftrace_function(struct ftrace_ops *ops)
3036{
3037 int ret;
3038
e6ea44e9 3039 mutex_lock(&ftrace_lock);
3d083395 3040 ret = __unregister_ftrace_function(ops);
5a45cfe1 3041 ftrace_shutdown(0);
e6ea44e9 3042 mutex_unlock(&ftrace_lock);
b0fc494f
SR
3043
3044 return ret;
3045}
3046
e309b41d 3047int
b0fc494f 3048ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 3049 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
3050 loff_t *ppos)
3051{
3052 int ret;
3053
4eebcc81
SR
3054 if (unlikely(ftrace_disabled))
3055 return -ENODEV;
3056
e6ea44e9 3057 mutex_lock(&ftrace_lock);
b0fc494f 3058
5072c59f 3059 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
3060
3061 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
3062 goto out;
3063
3064 last_ftrace_enabled = ftrace_enabled;
3065
3066 if (ftrace_enabled) {
3067
3068 ftrace_startup_sysctl();
3069
3070 /* we are starting ftrace again */
3071 if (ftrace_list != &ftrace_list_end) {
3072 if (ftrace_list->next == &ftrace_list_end)
3073 ftrace_trace_function = ftrace_list->func;
3074 else
3075 ftrace_trace_function = ftrace_list_func;
3076 }
3077
3078 } else {
3079 /* stopping ftrace calls (just send to ftrace_stub) */
3080 ftrace_trace_function = ftrace_stub;
3081
3082 ftrace_shutdown_sysctl();
3083 }
3084
3085 out:
e6ea44e9 3086 mutex_unlock(&ftrace_lock);
3d083395 3087 return ret;
16444a8a 3088}
f17845e5 3089
fb52607a 3090#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 3091
287b6e68 3092static atomic_t ftrace_graph_active;
4a2b8dda 3093static struct notifier_block ftrace_suspend_notifier;
e7d3737e 3094
e49dc19c
SR
3095int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3096{
3097 return 0;
3098}
3099
287b6e68
FW
3100/* The callbacks that hook a function */
3101trace_func_graph_ret_t ftrace_graph_return =
3102 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3103trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
3104
3105/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3106static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3107{
3108 int i;
3109 int ret = 0;
3110 unsigned long flags;
3111 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3112 struct task_struct *g, *t;
3113
3114 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3115 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3116 * sizeof(struct ftrace_ret_stack),
3117 GFP_KERNEL);
3118 if (!ret_stack_list[i]) {
3119 start = 0;
3120 end = i;
3121 ret = -ENOMEM;
3122 goto free;
3123 }
3124 }
3125
3126 read_lock_irqsave(&tasklist_lock, flags);
3127 do_each_thread(g, t) {
3128 if (start == end) {
3129 ret = -EAGAIN;
3130 goto unlock;
3131 }
3132
3133 if (t->ret_stack == NULL) {
f201ae23 3134 t->curr_ret_stack = -1;
48d68b20
FW
3135 /* Make sure IRQs see the -1 first: */
3136 barrier();
3137 t->ret_stack = ret_stack_list[start++];
380c4b14 3138 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
3139 atomic_set(&t->trace_overrun, 0);
3140 }
3141 } while_each_thread(g, t);
3142
3143unlock:
3144 read_unlock_irqrestore(&tasklist_lock, flags);
3145free:
3146 for (i = start; i < end; i++)
3147 kfree(ret_stack_list[i]);
3148 return ret;
3149}
3150
8aef2d28
SR
3151static void
3152ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3153 struct task_struct *next)
3154{
3155 unsigned long long timestamp;
3156 int index;
3157
be6f164a
SR
3158 /*
3159 * Does the user want to count the time a function was asleep.
3160 * If so, do not update the time stamps.
3161 */
3162 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3163 return;
3164
8aef2d28
SR
3165 timestamp = trace_clock_local();
3166
3167 prev->ftrace_timestamp = timestamp;
3168
3169 /* only process tasks that we timestamped */
3170 if (!next->ftrace_timestamp)
3171 return;
3172
3173 /*
3174 * Update all the counters in next to make up for the
3175 * time next was sleeping.
3176 */
3177 timestamp -= next->ftrace_timestamp;
3178
3179 for (index = next->curr_ret_stack; index >= 0; index--)
3180 next->ret_stack[index].calltime += timestamp;
3181}
3182
f201ae23 3183/* Allocate a return stack for each task */
fb52607a 3184static int start_graph_tracing(void)
f201ae23
FW
3185{
3186 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 3187 int ret, cpu;
f201ae23
FW
3188
3189 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3190 sizeof(struct ftrace_ret_stack *),
3191 GFP_KERNEL);
3192
3193 if (!ret_stack_list)
3194 return -ENOMEM;
3195
5b058bcd
FW
3196 /* The cpu_boot init_task->ret_stack will never be freed */
3197 for_each_online_cpu(cpu)
3198 ftrace_graph_init_task(idle_task(cpu));
3199
f201ae23
FW
3200 do {
3201 ret = alloc_retstack_tasklist(ret_stack_list);
3202 } while (ret == -EAGAIN);
3203
8aef2d28
SR
3204 if (!ret) {
3205 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3206 if (ret)
3207 pr_info("ftrace_graph: Couldn't activate tracepoint"
3208 " probe to kernel_sched_switch\n");
3209 }
3210
f201ae23
FW
3211 kfree(ret_stack_list);
3212 return ret;
3213}
3214
4a2b8dda
FW
3215/*
3216 * Hibernation protection.
3217 * The state of the current task is too much unstable during
3218 * suspend/restore to disk. We want to protect against that.
3219 */
3220static int
3221ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3222 void *unused)
3223{
3224 switch (state) {
3225 case PM_HIBERNATION_PREPARE:
3226 pause_graph_tracing();
3227 break;
3228
3229 case PM_POST_HIBERNATION:
3230 unpause_graph_tracing();
3231 break;
3232 }
3233 return NOTIFY_DONE;
3234}
3235
287b6e68
FW
3236int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3237 trace_func_graph_ent_t entryfunc)
15e6cb36 3238{
e7d3737e
FW
3239 int ret = 0;
3240
e6ea44e9 3241 mutex_lock(&ftrace_lock);
e7d3737e 3242
05ce5818
SR
3243 /* we currently allow only one tracer registered at a time */
3244 if (atomic_read(&ftrace_graph_active)) {
3245 ret = -EBUSY;
3246 goto out;
3247 }
3248
4a2b8dda
FW
3249 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3250 register_pm_notifier(&ftrace_suspend_notifier);
3251
287b6e68 3252 atomic_inc(&ftrace_graph_active);
fb52607a 3253 ret = start_graph_tracing();
f201ae23 3254 if (ret) {
287b6e68 3255 atomic_dec(&ftrace_graph_active);
f201ae23
FW
3256 goto out;
3257 }
e53a6319 3258
287b6e68
FW
3259 ftrace_graph_return = retfunc;
3260 ftrace_graph_entry = entryfunc;
e53a6319 3261
5a45cfe1 3262 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
3263
3264out:
e6ea44e9 3265 mutex_unlock(&ftrace_lock);
e7d3737e 3266 return ret;
15e6cb36
FW
3267}
3268
fb52607a 3269void unregister_ftrace_graph(void)
15e6cb36 3270{
e6ea44e9 3271 mutex_lock(&ftrace_lock);
e7d3737e 3272
287b6e68 3273 atomic_dec(&ftrace_graph_active);
8aef2d28 3274 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
287b6e68 3275 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3276 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 3277 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 3278 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 3279
e6ea44e9 3280 mutex_unlock(&ftrace_lock);
15e6cb36 3281}
f201ae23
FW
3282
3283/* Allocate a return stack for newly created task */
fb52607a 3284void ftrace_graph_init_task(struct task_struct *t)
f201ae23 3285{
287b6e68 3286 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
3287 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3288 * sizeof(struct ftrace_ret_stack),
3289 GFP_KERNEL);
3290 if (!t->ret_stack)
3291 return;
3292 t->curr_ret_stack = -1;
380c4b14 3293 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3294 atomic_set(&t->trace_overrun, 0);
8aef2d28 3295 t->ftrace_timestamp = 0;
f201ae23
FW
3296 } else
3297 t->ret_stack = NULL;
3298}
3299
fb52607a 3300void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 3301{
eae849ca
FW
3302 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3303
f201ae23 3304 t->ret_stack = NULL;
eae849ca
FW
3305 /* NULL must become visible to IRQs before we free it: */
3306 barrier();
3307
3308 kfree(ret_stack);
f201ae23 3309}
14a866c5
SR
3310
3311void ftrace_graph_stop(void)
3312{
3313 ftrace_stop();
3314}
15e6cb36
FW
3315#endif
3316