]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/ftrace.c
ftrace: Remove record freezing
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
3d083395 28#include <linux/list.h>
59df055f 29#include <linux/hash.h>
3d083395 30
ad8d75ff 31#include <trace/events/sched.h>
8aef2d28 32
395a59d0 33#include <asm/ftrace.h>
2af15d6a 34#include <asm/setup.h>
395a59d0 35
0706f1c4 36#include "trace_output.h"
bac429f0 37#include "trace_stat.h"
16444a8a 38
6912896e
SR
39#define FTRACE_WARN_ON(cond) \
40 do { \
41 if (WARN_ON(cond)) \
42 ftrace_kill(); \
43 } while (0)
44
45#define FTRACE_WARN_ON_ONCE(cond) \
46 do { \
47 if (WARN_ON_ONCE(cond)) \
48 ftrace_kill(); \
49 } while (0)
50
8fc0c701
SR
51/* hash bits for specific function selection */
52#define FTRACE_HASH_BITS 7
53#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
4eebcc81
SR
55/* ftrace_enabled is a method to turn ftrace on or off */
56int ftrace_enabled __read_mostly;
d61f82d0 57static int last_ftrace_enabled;
b0fc494f 58
60a7ecf4
SR
59/* Quick disabling of function tracer. */
60int function_trace_stop;
61
756d17ee 62/* List for set_ftrace_pid's pids. */
63LIST_HEAD(ftrace_pids);
64struct ftrace_pid {
65 struct list_head list;
66 struct pid *pid;
67};
68
4eebcc81
SR
69/*
70 * ftrace_disabled is set when an anomaly is discovered.
71 * ftrace_disabled is much stronger than ftrace_enabled.
72 */
73static int ftrace_disabled __read_mostly;
74
52baf119 75static DEFINE_MUTEX(ftrace_lock);
b0fc494f 76
16444a8a
ACM
77static struct ftrace_ops ftrace_list_end __read_mostly =
78{
fb9fb015 79 .func = ftrace_stub,
16444a8a
ACM
80};
81
82static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
83ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 84ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 85ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 86
369bc18f
SA
87#ifdef CONFIG_FUNCTION_GRAPH_TRACER
88static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
89#endif
90
f2252935 91static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
92{
93 struct ftrace_ops *op = ftrace_list;
94
95 /* in case someone actually ports this to alpha! */
96 read_barrier_depends();
97
98 while (op != &ftrace_list_end) {
99 /* silly alpha */
100 read_barrier_depends();
101 op->func(ip, parent_ip);
102 op = op->next;
103 };
104}
105
df4fc315
SR
106static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
107{
0ef8cde5 108 if (!test_tsk_trace_trace(current))
df4fc315
SR
109 return;
110
111 ftrace_pid_function(ip, parent_ip);
112}
113
114static void set_ftrace_pid_function(ftrace_func_t func)
115{
116 /* do not set ftrace_pid_function to itself! */
117 if (func != ftrace_pid_func)
118 ftrace_pid_function = func;
119}
120
16444a8a 121/**
3d083395 122 * clear_ftrace_function - reset the ftrace function
16444a8a 123 *
3d083395
SR
124 * This NULLs the ftrace function and in essence stops
125 * tracing. There may be lag
16444a8a 126 */
3d083395 127void clear_ftrace_function(void)
16444a8a 128{
3d083395 129 ftrace_trace_function = ftrace_stub;
60a7ecf4 130 __ftrace_trace_function = ftrace_stub;
df4fc315 131 ftrace_pid_function = ftrace_stub;
3d083395
SR
132}
133
60a7ecf4
SR
134#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
135/*
136 * For those archs that do not test ftrace_trace_stop in their
137 * mcount call site, we need to do it from C.
138 */
139static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
140{
141 if (function_trace_stop)
142 return;
143
144 __ftrace_trace_function(ip, parent_ip);
145}
146#endif
147
e309b41d 148static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 149{
16444a8a
ACM
150 ops->next = ftrace_list;
151 /*
152 * We are entering ops into the ftrace_list but another
153 * CPU might be walking that list. We need to make sure
154 * the ops->next pointer is valid before another CPU sees
155 * the ops pointer included into the ftrace_list.
156 */
157 smp_wmb();
158 ftrace_list = ops;
3d083395 159
b0fc494f 160 if (ftrace_enabled) {
df4fc315
SR
161 ftrace_func_t func;
162
163 if (ops->next == &ftrace_list_end)
164 func = ops->func;
165 else
166 func = ftrace_list_func;
167
756d17ee 168 if (!list_empty(&ftrace_pids)) {
df4fc315
SR
169 set_ftrace_pid_function(func);
170 func = ftrace_pid_func;
171 }
172
b0fc494f
SR
173 /*
174 * For one func, simply call it directly.
175 * For more than one func, call the chain.
176 */
60a7ecf4 177#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 178 ftrace_trace_function = func;
60a7ecf4 179#else
df4fc315 180 __ftrace_trace_function = func;
60a7ecf4
SR
181 ftrace_trace_function = ftrace_test_stop_func;
182#endif
b0fc494f 183 }
3d083395 184
16444a8a
ACM
185 return 0;
186}
187
e309b41d 188static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 189{
16444a8a 190 struct ftrace_ops **p;
16444a8a
ACM
191
192 /*
3d083395
SR
193 * If we are removing the last function, then simply point
194 * to the ftrace_stub.
16444a8a
ACM
195 */
196 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
197 ftrace_trace_function = ftrace_stub;
198 ftrace_list = &ftrace_list_end;
e6ea44e9 199 return 0;
16444a8a
ACM
200 }
201
202 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
203 if (*p == ops)
204 break;
205
e6ea44e9
SR
206 if (*p != ops)
207 return -1;
16444a8a
ACM
208
209 *p = (*p)->next;
210
b0fc494f
SR
211 if (ftrace_enabled) {
212 /* If we only have one func left, then call that directly */
df4fc315
SR
213 if (ftrace_list->next == &ftrace_list_end) {
214 ftrace_func_t func = ftrace_list->func;
215
756d17ee 216 if (!list_empty(&ftrace_pids)) {
df4fc315
SR
217 set_ftrace_pid_function(func);
218 func = ftrace_pid_func;
219 }
220#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
221 ftrace_trace_function = func;
222#else
223 __ftrace_trace_function = func;
224#endif
225 }
b0fc494f 226 }
16444a8a 227
e6ea44e9 228 return 0;
3d083395
SR
229}
230
df4fc315
SR
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
df4fc315 235 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 236 return;
df4fc315 237
33974093 238#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 239 func = ftrace_trace_function;
33974093
MF
240#else
241 func = __ftrace_trace_function;
242#endif
df4fc315 243
756d17ee 244 if (!list_empty(&ftrace_pids)) {
df4fc315
SR
245 set_ftrace_pid_function(func);
246 func = ftrace_pid_func;
247 } else {
66eafebc
LW
248 if (func == ftrace_pid_func)
249 func = ftrace_pid_function;
df4fc315
SR
250 }
251
252#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254#else
255 __ftrace_trace_function = func;
256#endif
df4fc315
SR
257}
258
493762fc
SR
259#ifdef CONFIG_FUNCTION_PROFILER
260struct ftrace_profile {
261 struct hlist_node node;
262 unsigned long ip;
263 unsigned long counter;
0706f1c4
SR
264#ifdef CONFIG_FUNCTION_GRAPH_TRACER
265 unsigned long long time;
266#endif
8fc0c701
SR
267};
268
493762fc
SR
269struct ftrace_profile_page {
270 struct ftrace_profile_page *next;
271 unsigned long index;
272 struct ftrace_profile records[];
d61f82d0
SR
273};
274
cafb168a
SR
275struct ftrace_profile_stat {
276 atomic_t disabled;
277 struct hlist_head *hash;
278 struct ftrace_profile_page *pages;
279 struct ftrace_profile_page *start;
280 struct tracer_stat stat;
281};
282
493762fc
SR
283#define PROFILE_RECORDS_SIZE \
284 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 285
493762fc
SR
286#define PROFILES_PER_PAGE \
287 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 288
fb9fb015
SR
289static int ftrace_profile_bits __read_mostly;
290static int ftrace_profile_enabled __read_mostly;
291
292/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
293static DEFINE_MUTEX(ftrace_profile_lock);
294
cafb168a 295static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc
SR
296
297#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
298
bac429f0
SR
299static void *
300function_stat_next(void *v, int idx)
301{
493762fc
SR
302 struct ftrace_profile *rec = v;
303 struct ftrace_profile_page *pg;
bac429f0 304
493762fc 305 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
306
307 again:
0296e425
LZ
308 if (idx != 0)
309 rec++;
310
bac429f0
SR
311 if ((void *)rec >= (void *)&pg->records[pg->index]) {
312 pg = pg->next;
313 if (!pg)
314 return NULL;
315 rec = &pg->records[0];
493762fc
SR
316 if (!rec->counter)
317 goto again;
bac429f0
SR
318 }
319
bac429f0
SR
320 return rec;
321}
322
323static void *function_stat_start(struct tracer_stat *trace)
324{
cafb168a
SR
325 struct ftrace_profile_stat *stat =
326 container_of(trace, struct ftrace_profile_stat, stat);
327
328 if (!stat || !stat->start)
329 return NULL;
330
331 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
332}
333
0706f1c4
SR
334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
335/* function graph compares on total time */
336static int function_stat_cmp(void *p1, void *p2)
337{
338 struct ftrace_profile *a = p1;
339 struct ftrace_profile *b = p2;
340
341 if (a->time < b->time)
342 return -1;
343 if (a->time > b->time)
344 return 1;
345 else
346 return 0;
347}
348#else
349/* not function graph compares against hits */
bac429f0
SR
350static int function_stat_cmp(void *p1, void *p2)
351{
493762fc
SR
352 struct ftrace_profile *a = p1;
353 struct ftrace_profile *b = p2;
bac429f0
SR
354
355 if (a->counter < b->counter)
356 return -1;
357 if (a->counter > b->counter)
358 return 1;
359 else
360 return 0;
361}
0706f1c4 362#endif
bac429f0
SR
363
364static int function_stat_headers(struct seq_file *m)
365{
0706f1c4 366#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
367 seq_printf(m, " Function "
368 "Hit Time Avg\n"
369 " -------- "
370 "--- ---- ---\n");
0706f1c4 371#else
bac429f0
SR
372 seq_printf(m, " Function Hit\n"
373 " -------- ---\n");
0706f1c4 374#endif
bac429f0
SR
375 return 0;
376}
377
378static int function_stat_show(struct seq_file *m, void *v)
379{
493762fc 380 struct ftrace_profile *rec = v;
bac429f0 381 char str[KSYM_SYMBOL_LEN];
0706f1c4 382#ifdef CONFIG_FUNCTION_GRAPH_TRACER
0706f1c4 383 static DEFINE_MUTEX(mutex);
34886c8b
SR
384 static struct trace_seq s;
385 unsigned long long avg;
0706f1c4 386#endif
bac429f0
SR
387
388 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
389 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
390
391#ifdef CONFIG_FUNCTION_GRAPH_TRACER
392 seq_printf(m, " ");
34886c8b
SR
393 avg = rec->time;
394 do_div(avg, rec->counter);
395
396 mutex_lock(&mutex);
397 trace_seq_init(&s);
398 trace_print_graph_duration(rec->time, &s);
399 trace_seq_puts(&s, " ");
400 trace_print_graph_duration(avg, &s);
0706f1c4
SR
401 trace_print_seq(m, &s);
402 mutex_unlock(&mutex);
403#endif
404 seq_putc(m, '\n');
bac429f0 405
bac429f0
SR
406 return 0;
407}
408
cafb168a 409static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 410{
493762fc 411 struct ftrace_profile_page *pg;
bac429f0 412
cafb168a 413 pg = stat->pages = stat->start;
bac429f0 414
493762fc
SR
415 while (pg) {
416 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
417 pg->index = 0;
418 pg = pg->next;
bac429f0
SR
419 }
420
cafb168a 421 memset(stat->hash, 0,
493762fc
SR
422 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
423}
bac429f0 424
cafb168a 425int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
426{
427 struct ftrace_profile_page *pg;
318e0a73
SR
428 int functions;
429 int pages;
493762fc 430 int i;
bac429f0 431
493762fc 432 /* If we already allocated, do nothing */
cafb168a 433 if (stat->pages)
493762fc 434 return 0;
bac429f0 435
cafb168a
SR
436 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
437 if (!stat->pages)
493762fc 438 return -ENOMEM;
bac429f0 439
318e0a73
SR
440#ifdef CONFIG_DYNAMIC_FTRACE
441 functions = ftrace_update_tot_cnt;
442#else
443 /*
444 * We do not know the number of functions that exist because
445 * dynamic tracing is what counts them. With past experience
446 * we have around 20K functions. That should be more than enough.
447 * It is highly unlikely we will execute every function in
448 * the kernel.
449 */
450 functions = 20000;
451#endif
452
cafb168a 453 pg = stat->start = stat->pages;
bac429f0 454
318e0a73
SR
455 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
456
457 for (i = 0; i < pages; i++) {
493762fc 458 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 459 if (!pg->next)
318e0a73 460 goto out_free;
493762fc
SR
461 pg = pg->next;
462 }
463
464 return 0;
318e0a73
SR
465
466 out_free:
467 pg = stat->start;
468 while (pg) {
469 unsigned long tmp = (unsigned long)pg;
470
471 pg = pg->next;
472 free_page(tmp);
473 }
474
475 free_page((unsigned long)stat->pages);
476 stat->pages = NULL;
477 stat->start = NULL;
478
479 return -ENOMEM;
bac429f0
SR
480}
481
cafb168a 482static int ftrace_profile_init_cpu(int cpu)
bac429f0 483{
cafb168a 484 struct ftrace_profile_stat *stat;
493762fc 485 int size;
bac429f0 486
cafb168a
SR
487 stat = &per_cpu(ftrace_profile_stats, cpu);
488
489 if (stat->hash) {
493762fc 490 /* If the profile is already created, simply reset it */
cafb168a 491 ftrace_profile_reset(stat);
493762fc
SR
492 return 0;
493 }
bac429f0 494
493762fc
SR
495 /*
496 * We are profiling all functions, but usually only a few thousand
497 * functions are hit. We'll make a hash of 1024 items.
498 */
499 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 500
cafb168a 501 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 502
cafb168a 503 if (!stat->hash)
493762fc
SR
504 return -ENOMEM;
505
cafb168a
SR
506 if (!ftrace_profile_bits) {
507 size--;
493762fc 508
cafb168a
SR
509 for (; size; size >>= 1)
510 ftrace_profile_bits++;
511 }
493762fc 512
318e0a73 513 /* Preallocate the function profiling pages */
cafb168a
SR
514 if (ftrace_profile_pages_init(stat) < 0) {
515 kfree(stat->hash);
516 stat->hash = NULL;
493762fc
SR
517 return -ENOMEM;
518 }
519
520 return 0;
bac429f0
SR
521}
522
cafb168a
SR
523static int ftrace_profile_init(void)
524{
525 int cpu;
526 int ret = 0;
527
528 for_each_online_cpu(cpu) {
529 ret = ftrace_profile_init_cpu(cpu);
530 if (ret)
531 break;
532 }
533
534 return ret;
535}
536
493762fc 537/* interrupts must be disabled */
cafb168a
SR
538static struct ftrace_profile *
539ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 540{
493762fc 541 struct ftrace_profile *rec;
bac429f0
SR
542 struct hlist_head *hhd;
543 struct hlist_node *n;
bac429f0
SR
544 unsigned long key;
545
bac429f0 546 key = hash_long(ip, ftrace_profile_bits);
cafb168a 547 hhd = &stat->hash[key];
bac429f0
SR
548
549 if (hlist_empty(hhd))
550 return NULL;
551
bac429f0
SR
552 hlist_for_each_entry_rcu(rec, n, hhd, node) {
553 if (rec->ip == ip)
493762fc
SR
554 return rec;
555 }
556
557 return NULL;
558}
559
cafb168a
SR
560static void ftrace_add_profile(struct ftrace_profile_stat *stat,
561 struct ftrace_profile *rec)
493762fc
SR
562{
563 unsigned long key;
564
565 key = hash_long(rec->ip, ftrace_profile_bits);
cafb168a 566 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
567}
568
318e0a73
SR
569/*
570 * The memory is already allocated, this simply finds a new record to use.
571 */
493762fc 572static struct ftrace_profile *
318e0a73 573ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
574{
575 struct ftrace_profile *rec = NULL;
576
318e0a73 577 /* prevent recursion (from NMIs) */
cafb168a 578 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
579 goto out;
580
493762fc 581 /*
318e0a73
SR
582 * Try to find the function again since an NMI
583 * could have added it
493762fc 584 */
cafb168a 585 rec = ftrace_find_profiled_func(stat, ip);
493762fc 586 if (rec)
cafb168a 587 goto out;
493762fc 588
cafb168a
SR
589 if (stat->pages->index == PROFILES_PER_PAGE) {
590 if (!stat->pages->next)
591 goto out;
592 stat->pages = stat->pages->next;
bac429f0 593 }
493762fc 594
cafb168a 595 rec = &stat->pages->records[stat->pages->index++];
493762fc 596 rec->ip = ip;
cafb168a 597 ftrace_add_profile(stat, rec);
493762fc 598
bac429f0 599 out:
cafb168a 600 atomic_dec(&stat->disabled);
bac429f0
SR
601
602 return rec;
603}
604
605static void
606function_profile_call(unsigned long ip, unsigned long parent_ip)
607{
cafb168a 608 struct ftrace_profile_stat *stat;
493762fc 609 struct ftrace_profile *rec;
bac429f0
SR
610 unsigned long flags;
611
612 if (!ftrace_profile_enabled)
613 return;
614
615 local_irq_save(flags);
cafb168a
SR
616
617 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 618 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
619 goto out;
620
621 rec = ftrace_find_profiled_func(stat, ip);
493762fc 622 if (!rec) {
318e0a73 623 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
624 if (!rec)
625 goto out;
626 }
bac429f0
SR
627
628 rec->counter++;
629 out:
630 local_irq_restore(flags);
631}
632
0706f1c4
SR
633#ifdef CONFIG_FUNCTION_GRAPH_TRACER
634static int profile_graph_entry(struct ftrace_graph_ent *trace)
635{
636 function_profile_call(trace->func, 0);
637 return 1;
638}
639
640static void profile_graph_return(struct ftrace_graph_ret *trace)
641{
cafb168a 642 struct ftrace_profile_stat *stat;
a2a16d6a 643 unsigned long long calltime;
0706f1c4 644 struct ftrace_profile *rec;
cafb168a 645 unsigned long flags;
0706f1c4
SR
646
647 local_irq_save(flags);
cafb168a 648 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 649 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
650 goto out;
651
a2a16d6a
SR
652 calltime = trace->rettime - trace->calltime;
653
654 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
655 int index;
656
657 index = trace->depth;
658
659 /* Append this call time to the parent time to subtract */
660 if (index)
661 current->ret_stack[index - 1].subtime += calltime;
662
663 if (current->ret_stack[index].subtime < calltime)
664 calltime -= current->ret_stack[index].subtime;
665 else
666 calltime = 0;
667 }
668
cafb168a 669 rec = ftrace_find_profiled_func(stat, trace->func);
0706f1c4 670 if (rec)
a2a16d6a
SR
671 rec->time += calltime;
672
cafb168a 673 out:
0706f1c4
SR
674 local_irq_restore(flags);
675}
676
677static int register_ftrace_profiler(void)
678{
679 return register_ftrace_graph(&profile_graph_return,
680 &profile_graph_entry);
681}
682
683static void unregister_ftrace_profiler(void)
684{
685 unregister_ftrace_graph();
686}
687#else
bac429f0
SR
688static struct ftrace_ops ftrace_profile_ops __read_mostly =
689{
fb9fb015 690 .func = function_profile_call,
bac429f0
SR
691};
692
0706f1c4
SR
693static int register_ftrace_profiler(void)
694{
695 return register_ftrace_function(&ftrace_profile_ops);
696}
697
698static void unregister_ftrace_profiler(void)
699{
700 unregister_ftrace_function(&ftrace_profile_ops);
701}
702#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
703
bac429f0
SR
704static ssize_t
705ftrace_profile_write(struct file *filp, const char __user *ubuf,
706 size_t cnt, loff_t *ppos)
707{
708 unsigned long val;
fb9fb015 709 char buf[64]; /* big enough to hold a number */
bac429f0
SR
710 int ret;
711
bac429f0
SR
712 if (cnt >= sizeof(buf))
713 return -EINVAL;
714
715 if (copy_from_user(&buf, ubuf, cnt))
716 return -EFAULT;
717
718 buf[cnt] = 0;
719
720 ret = strict_strtoul(buf, 10, &val);
721 if (ret < 0)
722 return ret;
723
724 val = !!val;
725
726 mutex_lock(&ftrace_profile_lock);
727 if (ftrace_profile_enabled ^ val) {
728 if (val) {
493762fc
SR
729 ret = ftrace_profile_init();
730 if (ret < 0) {
731 cnt = ret;
732 goto out;
733 }
734
0706f1c4
SR
735 ret = register_ftrace_profiler();
736 if (ret < 0) {
737 cnt = ret;
738 goto out;
739 }
bac429f0
SR
740 ftrace_profile_enabled = 1;
741 } else {
742 ftrace_profile_enabled = 0;
0f6ce3de
SR
743 /*
744 * unregister_ftrace_profiler calls stop_machine
745 * so this acts like an synchronize_sched.
746 */
0706f1c4 747 unregister_ftrace_profiler();
bac429f0
SR
748 }
749 }
493762fc 750 out:
bac429f0
SR
751 mutex_unlock(&ftrace_profile_lock);
752
cf8517cf 753 *ppos += cnt;
bac429f0
SR
754
755 return cnt;
756}
757
493762fc
SR
758static ssize_t
759ftrace_profile_read(struct file *filp, char __user *ubuf,
760 size_t cnt, loff_t *ppos)
761{
fb9fb015 762 char buf[64]; /* big enough to hold a number */
493762fc
SR
763 int r;
764
765 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
766 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
767}
768
bac429f0
SR
769static const struct file_operations ftrace_profile_fops = {
770 .open = tracing_open_generic,
771 .read = ftrace_profile_read,
772 .write = ftrace_profile_write,
773};
774
cafb168a
SR
775/* used to initialize the real stat files */
776static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
777 .name = "functions",
778 .stat_start = function_stat_start,
779 .stat_next = function_stat_next,
780 .stat_cmp = function_stat_cmp,
781 .stat_headers = function_stat_headers,
782 .stat_show = function_stat_show
cafb168a
SR
783};
784
6ab5d668 785static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0 786{
cafb168a 787 struct ftrace_profile_stat *stat;
bac429f0 788 struct dentry *entry;
cafb168a 789 char *name;
bac429f0 790 int ret;
cafb168a
SR
791 int cpu;
792
793 for_each_possible_cpu(cpu) {
794 stat = &per_cpu(ftrace_profile_stats, cpu);
795
796 /* allocate enough for function name + cpu number */
797 name = kmalloc(32, GFP_KERNEL);
798 if (!name) {
799 /*
800 * The files created are permanent, if something happens
801 * we still do not free memory.
802 */
cafb168a
SR
803 WARN(1,
804 "Could not allocate stat file for cpu %d\n",
805 cpu);
806 return;
807 }
808 stat->stat = function_stats;
809 snprintf(name, 32, "function%d", cpu);
810 stat->stat.name = name;
811 ret = register_stat_tracer(&stat->stat);
812 if (ret) {
813 WARN(1,
814 "Could not register function stat for cpu %d\n",
815 cpu);
816 kfree(name);
817 return;
818 }
bac429f0
SR
819 }
820
821 entry = debugfs_create_file("function_profile_enabled", 0644,
822 d_tracer, NULL, &ftrace_profile_fops);
823 if (!entry)
824 pr_warning("Could not create debugfs "
825 "'function_profile_enabled' entry\n");
826}
827
bac429f0 828#else /* CONFIG_FUNCTION_PROFILER */
6ab5d668 829static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0
SR
830{
831}
bac429f0
SR
832#endif /* CONFIG_FUNCTION_PROFILER */
833
493762fc
SR
834static struct pid * const ftrace_swapper_pid = &init_struct_pid;
835
836#ifdef CONFIG_DYNAMIC_FTRACE
837
838#ifndef CONFIG_FTRACE_MCOUNT_RECORD
839# error Dynamic ftrace depends on MCOUNT_RECORD
840#endif
841
842static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
843
844struct ftrace_func_probe {
845 struct hlist_node node;
846 struct ftrace_probe_ops *ops;
847 unsigned long flags;
848 unsigned long ip;
849 void *data;
850 struct rcu_head rcu;
851};
852
853enum {
854 FTRACE_ENABLE_CALLS = (1 << 0),
855 FTRACE_DISABLE_CALLS = (1 << 1),
856 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
857 FTRACE_ENABLE_MCOUNT = (1 << 3),
858 FTRACE_DISABLE_MCOUNT = (1 << 4),
859 FTRACE_START_FUNC_RET = (1 << 5),
860 FTRACE_STOP_FUNC_RET = (1 << 6),
861};
862
863static int ftrace_filtered;
864
865static struct dyn_ftrace *ftrace_new_addrs;
866
867static DEFINE_MUTEX(ftrace_regex_lock);
868
869struct ftrace_page {
870 struct ftrace_page *next;
871 int index;
872 struct dyn_ftrace records[];
873};
874
875#define ENTRIES_PER_PAGE \
876 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
877
878/* estimate from running different kernels */
879#define NR_TO_INIT 10000
880
881static struct ftrace_page *ftrace_pages_start;
882static struct ftrace_page *ftrace_pages;
883
884static struct dyn_ftrace *ftrace_free_records;
885
886/*
887 * This is a double for. Do not use 'break' to break out of the loop,
888 * you must use a goto.
889 */
890#define do_for_each_ftrace_rec(pg, rec) \
891 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
892 int _____i; \
893 for (_____i = 0; _____i < pg->index; _____i++) { \
894 rec = &pg->records[_____i];
895
896#define while_for_each_ftrace_rec() \
897 } \
898 }
899
e309b41d 900static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 901{
ee000b7f 902 rec->freelist = ftrace_free_records;
37ad5084
SR
903 ftrace_free_records = rec;
904 rec->flags |= FTRACE_FL_FREE;
905}
906
e309b41d 907static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 908{
37ad5084
SR
909 struct dyn_ftrace *rec;
910
911 /* First check for freed records */
912 if (ftrace_free_records) {
913 rec = ftrace_free_records;
914
37ad5084 915 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 916 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
917 ftrace_free_records = NULL;
918 return NULL;
919 }
920
ee000b7f 921 ftrace_free_records = rec->freelist;
37ad5084
SR
922 memset(rec, 0, sizeof(*rec));
923 return rec;
924 }
925
3c1720f0 926 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
927 if (!ftrace_pages->next) {
928 /* allocate another page */
929 ftrace_pages->next =
930 (void *)get_zeroed_page(GFP_KERNEL);
931 if (!ftrace_pages->next)
932 return NULL;
933 }
3c1720f0
SR
934 ftrace_pages = ftrace_pages->next;
935 }
936
937 return &ftrace_pages->records[ftrace_pages->index++];
938}
939
08f5ac90 940static struct dyn_ftrace *
d61f82d0 941ftrace_record_ip(unsigned long ip)
3d083395 942{
08f5ac90 943 struct dyn_ftrace *rec;
3d083395 944
f3c7ac40 945 if (ftrace_disabled)
08f5ac90 946 return NULL;
3d083395 947
08f5ac90
SR
948 rec = ftrace_alloc_dyn_node(ip);
949 if (!rec)
950 return NULL;
3d083395 951
08f5ac90 952 rec->ip = ip;
ee000b7f 953 rec->newlist = ftrace_new_addrs;
e94142a6 954 ftrace_new_addrs = rec;
3d083395 955
08f5ac90 956 return rec;
3d083395
SR
957}
958
b17e8a37
SR
959static void print_ip_ins(const char *fmt, unsigned char *p)
960{
961 int i;
962
963 printk(KERN_CONT "%s", fmt);
964
965 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
966 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
967}
968
31e88909 969static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
970{
971 switch (failed) {
972 case -EFAULT:
973 FTRACE_WARN_ON_ONCE(1);
974 pr_info("ftrace faulted on modifying ");
975 print_ip_sym(ip);
976 break;
977 case -EINVAL:
978 FTRACE_WARN_ON_ONCE(1);
979 pr_info("ftrace failed to modify ");
980 print_ip_sym(ip);
b17e8a37 981 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
982 printk(KERN_CONT "\n");
983 break;
984 case -EPERM:
985 FTRACE_WARN_ON_ONCE(1);
986 pr_info("ftrace faulted on writing ");
987 print_ip_sym(ip);
988 break;
989 default:
990 FTRACE_WARN_ON_ONCE(1);
991 pr_info("ftrace faulted on unknown error ");
992 print_ip_sym(ip);
993 }
994}
995
3c1720f0 996
2cfa1978
MH
997/* Return 1 if the address range is reserved for ftrace */
998int ftrace_text_reserved(void *start, void *end)
999{
1000 struct dyn_ftrace *rec;
1001 struct ftrace_page *pg;
1002
1003 do_for_each_ftrace_rec(pg, rec) {
1004 if (rec->ip <= (unsigned long)end &&
1005 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1006 return 1;
1007 } while_for_each_ftrace_rec();
1008 return 0;
1009}
1010
1011
0eb96701 1012static int
31e88909 1013__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 1014{
e7d3737e 1015 unsigned long ftrace_addr;
64fbcd16 1016 unsigned long flag = 0UL;
e7d3737e 1017
f0001207 1018 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f 1019
982c350b 1020 /*
64fbcd16
XG
1021 * If this record is not to be traced or we want to disable it,
1022 * then disable it.
982c350b 1023 *
64fbcd16 1024 * If we want to enable it and filtering is off, then enable it.
982c350b 1025 *
64fbcd16
XG
1026 * If we want to enable it and filtering is on, enable it only if
1027 * it's filtered
982c350b 1028 */
64fbcd16
XG
1029 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1030 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1031 flag = FTRACE_FL_ENABLED;
1032 }
982c350b 1033
64fbcd16
XG
1034 /* If the state of this record hasn't changed, then do nothing */
1035 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1036 return 0;
982c350b 1037
64fbcd16
XG
1038 if (flag) {
1039 rec->flags |= FTRACE_FL_ENABLED;
1040 return ftrace_make_call(rec, ftrace_addr);
5072c59f
SR
1041 }
1042
64fbcd16
XG
1043 rec->flags &= ~FTRACE_FL_ENABLED;
1044 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
1045}
1046
e309b41d 1047static void ftrace_replace_code(int enable)
3c1720f0 1048{
3c1720f0
SR
1049 struct dyn_ftrace *rec;
1050 struct ftrace_page *pg;
6a24a244 1051 int failed;
3c1720f0 1052
265c831c
SR
1053 do_for_each_ftrace_rec(pg, rec) {
1054 /*
fa9d13cf
Z
1055 * Skip over free records, records that have
1056 * failed and not converted.
265c831c
SR
1057 */
1058 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf 1059 rec->flags & FTRACE_FL_FAILED ||
03303549 1060 !(rec->flags & FTRACE_FL_CONVERTED))
265c831c
SR
1061 continue;
1062
265c831c 1063 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 1064 if (failed) {
265c831c 1065 rec->flags |= FTRACE_FL_FAILED;
3279ba37
SR
1066 ftrace_bug(failed, rec->ip);
1067 /* Stop processing */
1068 return;
3c1720f0 1069 }
265c831c 1070 } while_for_each_ftrace_rec();
3c1720f0
SR
1071}
1072
492a7ea5 1073static int
31e88909 1074ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
1075{
1076 unsigned long ip;
593eb8a2 1077 int ret;
3c1720f0
SR
1078
1079 ip = rec->ip;
1080
25aac9dc 1081 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 1082 if (ret) {
31e88909 1083 ftrace_bug(ret, ip);
3c1720f0 1084 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 1085 return 0;
37ad5084 1086 }
492a7ea5 1087 return 1;
3c1720f0
SR
1088}
1089
000ab691
SR
1090/*
1091 * archs can override this function if they must do something
1092 * before the modifying code is performed.
1093 */
1094int __weak ftrace_arch_code_modify_prepare(void)
1095{
1096 return 0;
1097}
1098
1099/*
1100 * archs can override this function if they must do something
1101 * after the modifying code is performed.
1102 */
1103int __weak ftrace_arch_code_modify_post_process(void)
1104{
1105 return 0;
1106}
1107
e309b41d 1108static int __ftrace_modify_code(void *data)
3d083395 1109{
d61f82d0
SR
1110 int *command = data;
1111
a3583244 1112 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 1113 ftrace_replace_code(1);
a3583244 1114 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
1115 ftrace_replace_code(0);
1116
1117 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1118 ftrace_update_ftrace_func(ftrace_trace_function);
1119
5a45cfe1
SR
1120 if (*command & FTRACE_START_FUNC_RET)
1121 ftrace_enable_ftrace_graph_caller();
1122 else if (*command & FTRACE_STOP_FUNC_RET)
1123 ftrace_disable_ftrace_graph_caller();
1124
d61f82d0 1125 return 0;
3d083395
SR
1126}
1127
e309b41d 1128static void ftrace_run_update_code(int command)
3d083395 1129{
000ab691
SR
1130 int ret;
1131
1132 ret = ftrace_arch_code_modify_prepare();
1133 FTRACE_WARN_ON(ret);
1134 if (ret)
1135 return;
1136
784e2d76 1137 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
1138
1139 ret = ftrace_arch_code_modify_post_process();
1140 FTRACE_WARN_ON(ret);
3d083395
SR
1141}
1142
d61f82d0 1143static ftrace_func_t saved_ftrace_func;
60a7ecf4 1144static int ftrace_start_up;
df4fc315
SR
1145
1146static void ftrace_startup_enable(int command)
1147{
1148 if (saved_ftrace_func != ftrace_trace_function) {
1149 saved_ftrace_func = ftrace_trace_function;
1150 command |= FTRACE_UPDATE_TRACE_FUNC;
1151 }
1152
1153 if (!command || !ftrace_enabled)
1154 return;
1155
1156 ftrace_run_update_code(command);
1157}
d61f82d0 1158
5a45cfe1 1159static void ftrace_startup(int command)
3d083395 1160{
4eebcc81
SR
1161 if (unlikely(ftrace_disabled))
1162 return;
1163
60a7ecf4 1164 ftrace_start_up++;
982c350b 1165 command |= FTRACE_ENABLE_CALLS;
d61f82d0 1166
df4fc315 1167 ftrace_startup_enable(command);
3d083395
SR
1168}
1169
5a45cfe1 1170static void ftrace_shutdown(int command)
3d083395 1171{
4eebcc81
SR
1172 if (unlikely(ftrace_disabled))
1173 return;
1174
60a7ecf4 1175 ftrace_start_up--;
9ea1a153
FW
1176 /*
1177 * Just warn in case of unbalance, no need to kill ftrace, it's not
1178 * critical but the ftrace_call callers may be never nopped again after
1179 * further ftrace uses.
1180 */
1181 WARN_ON_ONCE(ftrace_start_up < 0);
1182
60a7ecf4 1183 if (!ftrace_start_up)
d61f82d0 1184 command |= FTRACE_DISABLE_CALLS;
3d083395 1185
d61f82d0
SR
1186 if (saved_ftrace_func != ftrace_trace_function) {
1187 saved_ftrace_func = ftrace_trace_function;
1188 command |= FTRACE_UPDATE_TRACE_FUNC;
1189 }
3d083395 1190
d61f82d0 1191 if (!command || !ftrace_enabled)
e6ea44e9 1192 return;
d61f82d0
SR
1193
1194 ftrace_run_update_code(command);
3d083395
SR
1195}
1196
e309b41d 1197static void ftrace_startup_sysctl(void)
b0fc494f 1198{
d61f82d0
SR
1199 int command = FTRACE_ENABLE_MCOUNT;
1200
4eebcc81
SR
1201 if (unlikely(ftrace_disabled))
1202 return;
1203
d61f82d0
SR
1204 /* Force update next time */
1205 saved_ftrace_func = NULL;
60a7ecf4
SR
1206 /* ftrace_start_up is true if we want ftrace running */
1207 if (ftrace_start_up)
d61f82d0
SR
1208 command |= FTRACE_ENABLE_CALLS;
1209
1210 ftrace_run_update_code(command);
b0fc494f
SR
1211}
1212
e309b41d 1213static void ftrace_shutdown_sysctl(void)
b0fc494f 1214{
d61f82d0
SR
1215 int command = FTRACE_DISABLE_MCOUNT;
1216
4eebcc81
SR
1217 if (unlikely(ftrace_disabled))
1218 return;
1219
60a7ecf4
SR
1220 /* ftrace_start_up is true if ftrace is running */
1221 if (ftrace_start_up)
d61f82d0
SR
1222 command |= FTRACE_DISABLE_CALLS;
1223
1224 ftrace_run_update_code(command);
b0fc494f
SR
1225}
1226
3d083395
SR
1227static cycle_t ftrace_update_time;
1228static unsigned long ftrace_update_cnt;
1229unsigned long ftrace_update_tot_cnt;
1230
31e88909 1231static int ftrace_update_code(struct module *mod)
3d083395 1232{
e94142a6 1233 struct dyn_ftrace *p;
f22f9a89 1234 cycle_t start, stop;
3d083395 1235
750ed1a4 1236 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
1237 ftrace_update_cnt = 0;
1238
e94142a6 1239 while (ftrace_new_addrs) {
3d083395 1240
08f5ac90
SR
1241 /* If something went wrong, bail without enabling anything */
1242 if (unlikely(ftrace_disabled))
1243 return -1;
f22f9a89 1244
e94142a6 1245 p = ftrace_new_addrs;
ee000b7f 1246 ftrace_new_addrs = p->newlist;
e94142a6 1247 p->flags = 0L;
f22f9a89 1248
5cb084bb
JO
1249 /*
1250 * Do the initial record convertion from mcount jump
1251 * to the NOP instructions.
1252 */
1253 if (!ftrace_code_disable(mod, p)) {
08f5ac90 1254 ftrace_free_rec(p);
5cb084bb
JO
1255 continue;
1256 }
1257
1258 p->flags |= FTRACE_FL_CONVERTED;
1259 ftrace_update_cnt++;
1260
1261 /*
1262 * If the tracing is enabled, go ahead and enable the record.
1263 *
1264 * The reason not to enable the record immediatelly is the
1265 * inherent check of ftrace_make_nop/ftrace_make_call for
1266 * correct previous instructions. Making first the NOP
1267 * conversion puts the module to the correct state, thus
1268 * passing the ftrace_make_call check.
1269 */
1270 if (ftrace_start_up) {
1271 int failed = __ftrace_replace_code(p, 1);
1272 if (failed) {
1273 ftrace_bug(failed, p->ip);
1274 ftrace_free_rec(p);
1275 }
1276 }
3d083395
SR
1277 }
1278
750ed1a4 1279 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
1280 ftrace_update_time = stop - start;
1281 ftrace_update_tot_cnt += ftrace_update_cnt;
1282
16444a8a
ACM
1283 return 0;
1284}
1285
68bf21aa 1286static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
1287{
1288 struct ftrace_page *pg;
1289 int cnt;
1290 int i;
3c1720f0
SR
1291
1292 /* allocate a few pages */
1293 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1294 if (!ftrace_pages_start)
1295 return -1;
1296
1297 /*
1298 * Allocate a few more pages.
1299 *
1300 * TODO: have some parser search vmlinux before
1301 * final linking to find all calls to ftrace.
1302 * Then we can:
1303 * a) know how many pages to allocate.
1304 * and/or
1305 * b) set up the table then.
1306 *
1307 * The dynamic code is still necessary for
1308 * modules.
1309 */
1310
1311 pg = ftrace_pages = ftrace_pages_start;
1312
68bf21aa 1313 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 1314 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 1315 num_to_init, cnt + 1);
3c1720f0
SR
1316
1317 for (i = 0; i < cnt; i++) {
1318 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1319
1320 /* If we fail, we'll try later anyway */
1321 if (!pg->next)
1322 break;
1323
1324 pg = pg->next;
1325 }
1326
1327 return 0;
1328}
1329
5072c59f
SR
1330enum {
1331 FTRACE_ITER_FILTER = (1 << 0),
689fd8b6 1332 FTRACE_ITER_NOTRACE = (1 << 1),
1333 FTRACE_ITER_FAILURES = (1 << 2),
1334 FTRACE_ITER_PRINTALL = (1 << 3),
1335 FTRACE_ITER_HASH = (1 << 4),
5072c59f
SR
1336};
1337
1338#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1339
1340struct ftrace_iterator {
5072c59f 1341 struct ftrace_page *pg;
8fc0c701 1342 int hidx;
431aa3fb 1343 int idx;
5072c59f 1344 unsigned flags;
689fd8b6 1345 struct trace_parser parser;
5072c59f
SR
1346};
1347
8fc0c701
SR
1348static void *
1349t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1350{
1351 struct ftrace_iterator *iter = m->private;
1352 struct hlist_node *hnd = v;
1353 struct hlist_head *hhd;
1354
1355 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1356
1357 (*pos)++;
1358
1359 retry:
1360 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1361 return NULL;
1362
1363 hhd = &ftrace_func_hash[iter->hidx];
1364
1365 if (hlist_empty(hhd)) {
1366 iter->hidx++;
1367 hnd = NULL;
1368 goto retry;
1369 }
1370
1371 if (!hnd)
1372 hnd = hhd->first;
1373 else {
1374 hnd = hnd->next;
1375 if (!hnd) {
1376 iter->hidx++;
1377 goto retry;
1378 }
1379 }
1380
1381 return hnd;
1382}
1383
1384static void *t_hash_start(struct seq_file *m, loff_t *pos)
1385{
1386 struct ftrace_iterator *iter = m->private;
1387 void *p = NULL;
d82d6244
LZ
1388 loff_t l;
1389
1390 if (!(iter->flags & FTRACE_ITER_HASH))
1391 *pos = 0;
8fc0c701
SR
1392
1393 iter->flags |= FTRACE_ITER_HASH;
1394
d82d6244
LZ
1395 iter->hidx = 0;
1396 for (l = 0; l <= *pos; ) {
1397 p = t_hash_next(m, p, &l);
1398 if (!p)
1399 break;
1400 }
1401 return p;
8fc0c701
SR
1402}
1403
1404static int t_hash_show(struct seq_file *m, void *v)
1405{
b6887d79 1406 struct ftrace_func_probe *rec;
8fc0c701 1407 struct hlist_node *hnd = v;
8fc0c701 1408
b6887d79 1409 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
8fc0c701 1410
809dcf29
SR
1411 if (rec->ops->print)
1412 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1413
b375a11a 1414 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
1415
1416 if (rec->data)
1417 seq_printf(m, ":%p", rec->data);
1418 seq_putc(m, '\n');
1419
1420 return 0;
1421}
1422
e309b41d 1423static void *
5072c59f
SR
1424t_next(struct seq_file *m, void *v, loff_t *pos)
1425{
1426 struct ftrace_iterator *iter = m->private;
1427 struct dyn_ftrace *rec = NULL;
1428
8fc0c701
SR
1429 if (iter->flags & FTRACE_ITER_HASH)
1430 return t_hash_next(m, v, pos);
1431
5072c59f
SR
1432 (*pos)++;
1433
0c75a3ed
SR
1434 if (iter->flags & FTRACE_ITER_PRINTALL)
1435 return NULL;
1436
5072c59f
SR
1437 retry:
1438 if (iter->idx >= iter->pg->index) {
1439 if (iter->pg->next) {
1440 iter->pg = iter->pg->next;
1441 iter->idx = 0;
1442 goto retry;
1443 }
1444 } else {
1445 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
1446 if ((rec->flags & FTRACE_FL_FREE) ||
1447
1448 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
1449 (rec->flags & FTRACE_FL_FAILED)) ||
1450
1451 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 1452 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 1453
0183fb1c
SR
1454 ((iter->flags & FTRACE_ITER_FILTER) &&
1455 !(rec->flags & FTRACE_FL_FILTER)) ||
1456
41c52c0d
SR
1457 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1458 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
1459 rec = NULL;
1460 goto retry;
1461 }
1462 }
1463
5072c59f
SR
1464 return rec;
1465}
1466
1467static void *t_start(struct seq_file *m, loff_t *pos)
1468{
1469 struct ftrace_iterator *iter = m->private;
1470 void *p = NULL;
694ce0a5 1471 loff_t l;
5072c59f 1472
8fc0c701 1473 mutex_lock(&ftrace_lock);
0c75a3ed
SR
1474 /*
1475 * For set_ftrace_filter reading, if we have the filter
1476 * off, we can short cut and just print out that all
1477 * functions are enabled.
1478 */
1479 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1480 if (*pos > 0)
8fc0c701 1481 return t_hash_start(m, pos);
0c75a3ed 1482 iter->flags |= FTRACE_ITER_PRINTALL;
0c75a3ed
SR
1483 return iter;
1484 }
1485
8fc0c701
SR
1486 if (iter->flags & FTRACE_ITER_HASH)
1487 return t_hash_start(m, pos);
1488
694ce0a5
LZ
1489 iter->pg = ftrace_pages_start;
1490 iter->idx = 0;
1491 for (l = 0; l <= *pos; ) {
1492 p = t_next(m, p, &l);
1493 if (!p)
1494 break;
50cdaf08 1495 }
5821e1b7 1496
694ce0a5 1497 if (!p && iter->flags & FTRACE_ITER_FILTER)
8fc0c701
SR
1498 return t_hash_start(m, pos);
1499
5072c59f
SR
1500 return p;
1501}
1502
1503static void t_stop(struct seq_file *m, void *p)
1504{
8fc0c701 1505 mutex_unlock(&ftrace_lock);
5072c59f
SR
1506}
1507
1508static int t_show(struct seq_file *m, void *v)
1509{
0c75a3ed 1510 struct ftrace_iterator *iter = m->private;
5072c59f 1511 struct dyn_ftrace *rec = v;
5072c59f 1512
8fc0c701
SR
1513 if (iter->flags & FTRACE_ITER_HASH)
1514 return t_hash_show(m, v);
1515
0c75a3ed
SR
1516 if (iter->flags & FTRACE_ITER_PRINTALL) {
1517 seq_printf(m, "#### all functions enabled ####\n");
1518 return 0;
1519 }
1520
5072c59f
SR
1521 if (!rec)
1522 return 0;
1523
b375a11a 1524 seq_printf(m, "%ps\n", (void *)rec->ip);
5072c59f
SR
1525
1526 return 0;
1527}
1528
88e9d34c 1529static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
1530 .start = t_start,
1531 .next = t_next,
1532 .stop = t_stop,
1533 .show = t_show,
1534};
1535
e309b41d 1536static int
5072c59f
SR
1537ftrace_avail_open(struct inode *inode, struct file *file)
1538{
1539 struct ftrace_iterator *iter;
1540 int ret;
1541
4eebcc81
SR
1542 if (unlikely(ftrace_disabled))
1543 return -ENODEV;
1544
5072c59f
SR
1545 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1546 if (!iter)
1547 return -ENOMEM;
1548
1549 iter->pg = ftrace_pages_start;
5072c59f
SR
1550
1551 ret = seq_open(file, &show_ftrace_seq_ops);
1552 if (!ret) {
1553 struct seq_file *m = file->private_data;
4bf39a94 1554
5072c59f 1555 m->private = iter;
4bf39a94 1556 } else {
5072c59f 1557 kfree(iter);
4bf39a94 1558 }
5072c59f
SR
1559
1560 return ret;
1561}
1562
eb9a7bf0
AS
1563static int
1564ftrace_failures_open(struct inode *inode, struct file *file)
1565{
1566 int ret;
1567 struct seq_file *m;
1568 struct ftrace_iterator *iter;
1569
1570 ret = ftrace_avail_open(inode, file);
1571 if (!ret) {
1572 m = (struct seq_file *)file->private_data;
1573 iter = (struct ftrace_iterator *)m->private;
1574 iter->flags = FTRACE_ITER_FAILURES;
1575 }
1576
1577 return ret;
1578}
1579
1580
41c52c0d 1581static void ftrace_filter_reset(int enable)
5072c59f
SR
1582{
1583 struct ftrace_page *pg;
1584 struct dyn_ftrace *rec;
41c52c0d 1585 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1586
52baf119 1587 mutex_lock(&ftrace_lock);
41c52c0d
SR
1588 if (enable)
1589 ftrace_filtered = 0;
265c831c
SR
1590 do_for_each_ftrace_rec(pg, rec) {
1591 if (rec->flags & FTRACE_FL_FAILED)
1592 continue;
1593 rec->flags &= ~type;
1594 } while_for_each_ftrace_rec();
52baf119 1595 mutex_unlock(&ftrace_lock);
5072c59f
SR
1596}
1597
e309b41d 1598static int
41c52c0d 1599ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1600{
1601 struct ftrace_iterator *iter;
1602 int ret = 0;
1603
4eebcc81
SR
1604 if (unlikely(ftrace_disabled))
1605 return -ENODEV;
1606
5072c59f
SR
1607 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1608 if (!iter)
1609 return -ENOMEM;
1610
689fd8b6 1611 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1612 kfree(iter);
1613 return -ENOMEM;
1614 }
1615
41c52c0d 1616 mutex_lock(&ftrace_regex_lock);
5072c59f 1617 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 1618 (file->f_flags & O_TRUNC))
41c52c0d 1619 ftrace_filter_reset(enable);
5072c59f
SR
1620
1621 if (file->f_mode & FMODE_READ) {
1622 iter->pg = ftrace_pages_start;
41c52c0d
SR
1623 iter->flags = enable ? FTRACE_ITER_FILTER :
1624 FTRACE_ITER_NOTRACE;
5072c59f
SR
1625
1626 ret = seq_open(file, &show_ftrace_seq_ops);
1627 if (!ret) {
1628 struct seq_file *m = file->private_data;
1629 m->private = iter;
79fe249c
LZ
1630 } else {
1631 trace_parser_put(&iter->parser);
5072c59f 1632 kfree(iter);
79fe249c 1633 }
5072c59f
SR
1634 } else
1635 file->private_data = iter;
41c52c0d 1636 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1637
1638 return ret;
1639}
1640
41c52c0d
SR
1641static int
1642ftrace_filter_open(struct inode *inode, struct file *file)
1643{
1644 return ftrace_regex_open(inode, file, 1);
1645}
1646
1647static int
1648ftrace_notrace_open(struct inode *inode, struct file *file)
1649{
1650 return ftrace_regex_open(inode, file, 0);
1651}
1652
e309b41d 1653static loff_t
41c52c0d 1654ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1655{
1656 loff_t ret;
1657
1658 if (file->f_mode & FMODE_READ)
1659 ret = seq_lseek(file, offset, origin);
1660 else
1661 file->f_pos = ret = 1;
1662
1663 return ret;
1664}
1665
64e7c440 1666static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1667{
9f4801e3 1668 int matched = 0;
751e9983 1669 int slen;
9f4801e3 1670
9f4801e3
SR
1671 switch (type) {
1672 case MATCH_FULL:
1673 if (strcmp(str, regex) == 0)
1674 matched = 1;
1675 break;
1676 case MATCH_FRONT_ONLY:
1677 if (strncmp(str, regex, len) == 0)
1678 matched = 1;
1679 break;
1680 case MATCH_MIDDLE_ONLY:
1681 if (strstr(str, regex))
1682 matched = 1;
1683 break;
1684 case MATCH_END_ONLY:
751e9983
LZ
1685 slen = strlen(str);
1686 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
9f4801e3
SR
1687 matched = 1;
1688 break;
1689 }
1690
1691 return matched;
1692}
1693
64e7c440
SR
1694static int
1695ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1696{
1697 char str[KSYM_SYMBOL_LEN];
1698
1699 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1700 return ftrace_match(str, regex, len, type);
1701}
1702
311d16da 1703static int ftrace_match_records(char *buff, int len, int enable)
9f4801e3 1704{
6a24a244 1705 unsigned int search_len;
9f4801e3
SR
1706 struct ftrace_page *pg;
1707 struct dyn_ftrace *rec;
6a24a244
SR
1708 unsigned long flag;
1709 char *search;
9f4801e3 1710 int type;
9f4801e3 1711 int not;
311d16da 1712 int found = 0;
9f4801e3 1713
6a24a244 1714 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
3f6fe06d 1715 type = filter_parse_regex(buff, len, &search, &not);
9f4801e3
SR
1716
1717 search_len = strlen(search);
1718
52baf119 1719 mutex_lock(&ftrace_lock);
265c831c 1720 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1721
1722 if (rec->flags & FTRACE_FL_FAILED)
1723 continue;
9f4801e3
SR
1724
1725 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1726 if (not)
1727 rec->flags &= ~flag;
1728 else
1729 rec->flags |= flag;
311d16da 1730 found = 1;
265c831c 1731 }
e68746a2
SR
1732 /*
1733 * Only enable filtering if we have a function that
1734 * is filtered on.
1735 */
1736 if (enable && (rec->flags & FTRACE_FL_FILTER))
1737 ftrace_filtered = 1;
265c831c 1738 } while_for_each_ftrace_rec();
52baf119 1739 mutex_unlock(&ftrace_lock);
311d16da
LZ
1740
1741 return found;
5072c59f
SR
1742}
1743
64e7c440
SR
1744static int
1745ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1746 char *regex, int len, int type)
1747{
1748 char str[KSYM_SYMBOL_LEN];
1749 char *modname;
1750
1751 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1752
1753 if (!modname || strcmp(modname, mod))
1754 return 0;
1755
1756 /* blank search means to match all funcs in the mod */
1757 if (len)
1758 return ftrace_match(str, regex, len, type);
1759 else
1760 return 1;
1761}
1762
311d16da 1763static int ftrace_match_module_records(char *buff, char *mod, int enable)
64e7c440 1764{
6a24a244 1765 unsigned search_len = 0;
64e7c440
SR
1766 struct ftrace_page *pg;
1767 struct dyn_ftrace *rec;
1768 int type = MATCH_FULL;
6a24a244
SR
1769 char *search = buff;
1770 unsigned long flag;
64e7c440 1771 int not = 0;
311d16da 1772 int found = 0;
64e7c440 1773
6a24a244
SR
1774 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1775
64e7c440
SR
1776 /* blank or '*' mean the same */
1777 if (strcmp(buff, "*") == 0)
1778 buff[0] = 0;
1779
1780 /* handle the case of 'dont filter this module' */
1781 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1782 buff[0] = 0;
1783 not = 1;
1784 }
1785
1786 if (strlen(buff)) {
3f6fe06d 1787 type = filter_parse_regex(buff, strlen(buff), &search, &not);
64e7c440
SR
1788 search_len = strlen(search);
1789 }
1790
52baf119 1791 mutex_lock(&ftrace_lock);
64e7c440
SR
1792 do_for_each_ftrace_rec(pg, rec) {
1793
1794 if (rec->flags & FTRACE_FL_FAILED)
1795 continue;
1796
1797 if (ftrace_match_module_record(rec, mod,
1798 search, search_len, type)) {
1799 if (not)
1800 rec->flags &= ~flag;
1801 else
1802 rec->flags |= flag;
311d16da 1803 found = 1;
64e7c440 1804 }
e68746a2
SR
1805 if (enable && (rec->flags & FTRACE_FL_FILTER))
1806 ftrace_filtered = 1;
64e7c440
SR
1807
1808 } while_for_each_ftrace_rec();
52baf119 1809 mutex_unlock(&ftrace_lock);
311d16da
LZ
1810
1811 return found;
64e7c440
SR
1812}
1813
f6180773
SR
1814/*
1815 * We register the module command as a template to show others how
1816 * to register the a command as well.
1817 */
1818
1819static int
1820ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1821{
1822 char *mod;
1823
1824 /*
1825 * cmd == 'mod' because we only registered this func
1826 * for the 'mod' ftrace_func_command.
1827 * But if you register one func with multiple commands,
1828 * you can tell which command was used by the cmd
1829 * parameter.
1830 */
1831
1832 /* we must have a module name */
1833 if (!param)
1834 return -EINVAL;
1835
1836 mod = strsep(&param, ":");
1837 if (!strlen(mod))
1838 return -EINVAL;
1839
311d16da
LZ
1840 if (ftrace_match_module_records(func, mod, enable))
1841 return 0;
1842 return -EINVAL;
f6180773
SR
1843}
1844
1845static struct ftrace_func_command ftrace_mod_cmd = {
1846 .name = "mod",
1847 .func = ftrace_mod_callback,
1848};
1849
1850static int __init ftrace_mod_cmd_init(void)
1851{
1852 return register_ftrace_command(&ftrace_mod_cmd);
1853}
1854device_initcall(ftrace_mod_cmd_init);
1855
59df055f 1856static void
b6887d79 1857function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1858{
b6887d79 1859 struct ftrace_func_probe *entry;
59df055f
SR
1860 struct hlist_head *hhd;
1861 struct hlist_node *n;
1862 unsigned long key;
1863 int resched;
1864
1865 key = hash_long(ip, FTRACE_HASH_BITS);
1866
1867 hhd = &ftrace_func_hash[key];
1868
1869 if (hlist_empty(hhd))
1870 return;
1871
1872 /*
1873 * Disable preemption for these calls to prevent a RCU grace
1874 * period. This syncs the hash iteration and freeing of items
1875 * on the hash. rcu_read_lock is too dangerous here.
1876 */
1877 resched = ftrace_preempt_disable();
1878 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1879 if (entry->ip == ip)
1880 entry->ops->func(ip, parent_ip, &entry->data);
1881 }
1882 ftrace_preempt_enable(resched);
1883}
1884
b6887d79 1885static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1886{
fb9fb015 1887 .func = function_trace_probe_call,
59df055f
SR
1888};
1889
b6887d79 1890static int ftrace_probe_registered;
59df055f 1891
b6887d79 1892static void __enable_ftrace_function_probe(void)
59df055f
SR
1893{
1894 int i;
1895
b6887d79 1896 if (ftrace_probe_registered)
59df055f
SR
1897 return;
1898
1899 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1900 struct hlist_head *hhd = &ftrace_func_hash[i];
1901 if (hhd->first)
1902 break;
1903 }
1904 /* Nothing registered? */
1905 if (i == FTRACE_FUNC_HASHSIZE)
1906 return;
1907
b6887d79 1908 __register_ftrace_function(&trace_probe_ops);
59df055f 1909 ftrace_startup(0);
b6887d79 1910 ftrace_probe_registered = 1;
59df055f
SR
1911}
1912
b6887d79 1913static void __disable_ftrace_function_probe(void)
59df055f
SR
1914{
1915 int i;
1916
b6887d79 1917 if (!ftrace_probe_registered)
59df055f
SR
1918 return;
1919
1920 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1921 struct hlist_head *hhd = &ftrace_func_hash[i];
1922 if (hhd->first)
1923 return;
1924 }
1925
1926 /* no more funcs left */
b6887d79 1927 __unregister_ftrace_function(&trace_probe_ops);
59df055f 1928 ftrace_shutdown(0);
b6887d79 1929 ftrace_probe_registered = 0;
59df055f
SR
1930}
1931
1932
1933static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1934{
b6887d79
SR
1935 struct ftrace_func_probe *entry =
1936 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
1937
1938 if (entry->ops->free)
1939 entry->ops->free(&entry->data);
1940 kfree(entry);
1941}
1942
1943
1944int
b6887d79 1945register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1946 void *data)
1947{
b6887d79 1948 struct ftrace_func_probe *entry;
59df055f
SR
1949 struct ftrace_page *pg;
1950 struct dyn_ftrace *rec;
59df055f 1951 int type, len, not;
6a24a244 1952 unsigned long key;
59df055f
SR
1953 int count = 0;
1954 char *search;
1955
3f6fe06d 1956 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
1957 len = strlen(search);
1958
b6887d79 1959 /* we do not support '!' for function probes */
59df055f
SR
1960 if (WARN_ON(not))
1961 return -EINVAL;
1962
1963 mutex_lock(&ftrace_lock);
1964 do_for_each_ftrace_rec(pg, rec) {
1965
1966 if (rec->flags & FTRACE_FL_FAILED)
1967 continue;
1968
1969 if (!ftrace_match_record(rec, search, len, type))
1970 continue;
1971
1972 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1973 if (!entry) {
b6887d79 1974 /* If we did not process any, then return error */
59df055f
SR
1975 if (!count)
1976 count = -ENOMEM;
1977 goto out_unlock;
1978 }
1979
1980 count++;
1981
1982 entry->data = data;
1983
1984 /*
1985 * The caller might want to do something special
1986 * for each function we find. We call the callback
1987 * to give the caller an opportunity to do so.
1988 */
1989 if (ops->callback) {
1990 if (ops->callback(rec->ip, &entry->data) < 0) {
1991 /* caller does not like this func */
1992 kfree(entry);
1993 continue;
1994 }
1995 }
1996
1997 entry->ops = ops;
1998 entry->ip = rec->ip;
1999
2000 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2001 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2002
2003 } while_for_each_ftrace_rec();
b6887d79 2004 __enable_ftrace_function_probe();
59df055f
SR
2005
2006 out_unlock:
2007 mutex_unlock(&ftrace_lock);
2008
2009 return count;
2010}
2011
2012enum {
b6887d79
SR
2013 PROBE_TEST_FUNC = 1,
2014 PROBE_TEST_DATA = 2
59df055f
SR
2015};
2016
2017static void
b6887d79 2018__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2019 void *data, int flags)
2020{
b6887d79 2021 struct ftrace_func_probe *entry;
59df055f
SR
2022 struct hlist_node *n, *tmp;
2023 char str[KSYM_SYMBOL_LEN];
2024 int type = MATCH_FULL;
2025 int i, len = 0;
2026 char *search;
2027
b36461da 2028 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
59df055f 2029 glob = NULL;
b36461da 2030 else if (glob) {
59df055f
SR
2031 int not;
2032
3f6fe06d 2033 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
2034 len = strlen(search);
2035
b6887d79 2036 /* we do not support '!' for function probes */
59df055f
SR
2037 if (WARN_ON(not))
2038 return;
2039 }
2040
2041 mutex_lock(&ftrace_lock);
2042 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2043 struct hlist_head *hhd = &ftrace_func_hash[i];
2044
2045 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2046
2047 /* break up if statements for readability */
b6887d79 2048 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
2049 continue;
2050
b6887d79 2051 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
2052 continue;
2053
2054 /* do this last, since it is the most expensive */
2055 if (glob) {
2056 kallsyms_lookup(entry->ip, NULL, NULL,
2057 NULL, str);
2058 if (!ftrace_match(str, glob, len, type))
2059 continue;
2060 }
2061
2062 hlist_del(&entry->node);
2063 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2064 }
2065 }
b6887d79 2066 __disable_ftrace_function_probe();
59df055f
SR
2067 mutex_unlock(&ftrace_lock);
2068}
2069
2070void
b6887d79 2071unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2072 void *data)
2073{
b6887d79
SR
2074 __unregister_ftrace_function_probe(glob, ops, data,
2075 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
2076}
2077
2078void
b6887d79 2079unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 2080{
b6887d79 2081 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
2082}
2083
b6887d79 2084void unregister_ftrace_function_probe_all(char *glob)
59df055f 2085{
b6887d79 2086 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
2087}
2088
f6180773
SR
2089static LIST_HEAD(ftrace_commands);
2090static DEFINE_MUTEX(ftrace_cmd_mutex);
2091
2092int register_ftrace_command(struct ftrace_func_command *cmd)
2093{
2094 struct ftrace_func_command *p;
2095 int ret = 0;
2096
2097 mutex_lock(&ftrace_cmd_mutex);
2098 list_for_each_entry(p, &ftrace_commands, list) {
2099 if (strcmp(cmd->name, p->name) == 0) {
2100 ret = -EBUSY;
2101 goto out_unlock;
2102 }
2103 }
2104 list_add(&cmd->list, &ftrace_commands);
2105 out_unlock:
2106 mutex_unlock(&ftrace_cmd_mutex);
2107
2108 return ret;
2109}
2110
2111int unregister_ftrace_command(struct ftrace_func_command *cmd)
2112{
2113 struct ftrace_func_command *p, *n;
2114 int ret = -ENODEV;
2115
2116 mutex_lock(&ftrace_cmd_mutex);
2117 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2118 if (strcmp(cmd->name, p->name) == 0) {
2119 ret = 0;
2120 list_del_init(&p->list);
2121 goto out_unlock;
2122 }
2123 }
2124 out_unlock:
2125 mutex_unlock(&ftrace_cmd_mutex);
2126
2127 return ret;
2128}
2129
64e7c440
SR
2130static int ftrace_process_regex(char *buff, int len, int enable)
2131{
f6180773 2132 char *func, *command, *next = buff;
6a24a244 2133 struct ftrace_func_command *p;
f6180773 2134 int ret = -EINVAL;
64e7c440
SR
2135
2136 func = strsep(&next, ":");
2137
2138 if (!next) {
311d16da
LZ
2139 if (ftrace_match_records(func, len, enable))
2140 return 0;
2141 return ret;
64e7c440
SR
2142 }
2143
f6180773 2144 /* command found */
64e7c440
SR
2145
2146 command = strsep(&next, ":");
2147
f6180773
SR
2148 mutex_lock(&ftrace_cmd_mutex);
2149 list_for_each_entry(p, &ftrace_commands, list) {
2150 if (strcmp(p->name, command) == 0) {
2151 ret = p->func(func, command, next, enable);
2152 goto out_unlock;
2153 }
64e7c440 2154 }
f6180773
SR
2155 out_unlock:
2156 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 2157
f6180773 2158 return ret;
64e7c440
SR
2159}
2160
e309b41d 2161static ssize_t
41c52c0d
SR
2162ftrace_regex_write(struct file *file, const char __user *ubuf,
2163 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
2164{
2165 struct ftrace_iterator *iter;
689fd8b6 2166 struct trace_parser *parser;
2167 ssize_t ret, read;
5072c59f 2168
4ba7978e 2169 if (!cnt)
5072c59f
SR
2170 return 0;
2171
41c52c0d 2172 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2173
2174 if (file->f_mode & FMODE_READ) {
2175 struct seq_file *m = file->private_data;
2176 iter = m->private;
2177 } else
2178 iter = file->private_data;
2179
689fd8b6 2180 parser = &iter->parser;
2181 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 2182
4ba7978e 2183 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 2184 !trace_parser_cont(parser)) {
2185 ret = ftrace_process_regex(parser->buffer,
2186 parser->idx, enable);
313254a9 2187 trace_parser_clear(parser);
5072c59f 2188 if (ret)
ed146b25 2189 goto out_unlock;
eda1e328 2190 }
5072c59f 2191
5072c59f 2192 ret = read;
ed146b25 2193out_unlock:
689fd8b6 2194 mutex_unlock(&ftrace_regex_lock);
ed146b25 2195
5072c59f
SR
2196 return ret;
2197}
2198
41c52c0d
SR
2199static ssize_t
2200ftrace_filter_write(struct file *file, const char __user *ubuf,
2201 size_t cnt, loff_t *ppos)
2202{
2203 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2204}
2205
2206static ssize_t
2207ftrace_notrace_write(struct file *file, const char __user *ubuf,
2208 size_t cnt, loff_t *ppos)
2209{
2210 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2211}
2212
2213static void
2214ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2215{
2216 if (unlikely(ftrace_disabled))
2217 return;
2218
2219 mutex_lock(&ftrace_regex_lock);
2220 if (reset)
2221 ftrace_filter_reset(enable);
2222 if (buf)
7f24b31b 2223 ftrace_match_records(buf, len, enable);
41c52c0d
SR
2224 mutex_unlock(&ftrace_regex_lock);
2225}
2226
77a2b37d
SR
2227/**
2228 * ftrace_set_filter - set a function to filter on in ftrace
2229 * @buf - the string that holds the function filter text.
2230 * @len - the length of the string.
2231 * @reset - non zero to reset all filters before applying this filter.
2232 *
2233 * Filters denote which functions should be enabled when tracing is enabled.
2234 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2235 */
e309b41d 2236void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 2237{
41c52c0d
SR
2238 ftrace_set_regex(buf, len, reset, 1);
2239}
4eebcc81 2240
41c52c0d
SR
2241/**
2242 * ftrace_set_notrace - set a function to not trace in ftrace
2243 * @buf - the string that holds the function notrace text.
2244 * @len - the length of the string.
2245 * @reset - non zero to reset all filters before applying this filter.
2246 *
2247 * Notrace Filters denote which functions should not be enabled when tracing
2248 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2249 * for tracing.
2250 */
2251void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2252{
2253 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
2254}
2255
2af15d6a
SR
2256/*
2257 * command line interface to allow users to set filters on boot up.
2258 */
2259#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2260static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2261static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2262
2263static int __init set_ftrace_notrace(char *str)
2264{
2265 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2266 return 1;
2267}
2268__setup("ftrace_notrace=", set_ftrace_notrace);
2269
2270static int __init set_ftrace_filter(char *str)
2271{
2272 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2273 return 1;
2274}
2275__setup("ftrace_filter=", set_ftrace_filter);
2276
369bc18f 2277#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 2278static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
369bc18f
SA
2279static int __init set_graph_function(char *str)
2280{
06f43d66 2281 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
2282 return 1;
2283}
2284__setup("ftrace_graph_filter=", set_graph_function);
2285
2286static void __init set_ftrace_early_graph(char *buf)
2287{
2288 int ret;
2289 char *func;
2290
2291 while (buf) {
2292 func = strsep(&buf, ",");
2293 /* we allow only one expression at a time */
2294 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2295 func);
2296 if (ret)
2297 printk(KERN_DEBUG "ftrace: function %s not "
2298 "traceable\n", func);
2299 }
2300}
2301#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2302
2af15d6a
SR
2303static void __init set_ftrace_early_filter(char *buf, int enable)
2304{
2305 char *func;
2306
2307 while (buf) {
2308 func = strsep(&buf, ",");
2309 ftrace_set_regex(func, strlen(func), 0, enable);
2310 }
2311}
2312
2313static void __init set_ftrace_early_filters(void)
2314{
2315 if (ftrace_filter_buf[0])
2316 set_ftrace_early_filter(ftrace_filter_buf, 1);
2317 if (ftrace_notrace_buf[0])
2318 set_ftrace_early_filter(ftrace_notrace_buf, 0);
369bc18f
SA
2319#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2320 if (ftrace_graph_buf[0])
2321 set_ftrace_early_graph(ftrace_graph_buf);
2322#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
2323}
2324
e309b41d 2325static int
41c52c0d 2326ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
2327{
2328 struct seq_file *m = (struct seq_file *)file->private_data;
2329 struct ftrace_iterator *iter;
689fd8b6 2330 struct trace_parser *parser;
5072c59f 2331
41c52c0d 2332 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2333 if (file->f_mode & FMODE_READ) {
2334 iter = m->private;
2335
2336 seq_release(inode, file);
2337 } else
2338 iter = file->private_data;
2339
689fd8b6 2340 parser = &iter->parser;
2341 if (trace_parser_loaded(parser)) {
2342 parser->buffer[parser->idx] = 0;
2343 ftrace_match_records(parser->buffer, parser->idx, enable);
5072c59f
SR
2344 }
2345
e6ea44e9 2346 mutex_lock(&ftrace_lock);
ee02a2e5 2347 if (ftrace_start_up && ftrace_enabled)
5072c59f 2348 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 2349 mutex_unlock(&ftrace_lock);
5072c59f 2350
689fd8b6 2351 trace_parser_put(parser);
5072c59f 2352 kfree(iter);
689fd8b6 2353
41c52c0d 2354 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2355 return 0;
2356}
2357
41c52c0d
SR
2358static int
2359ftrace_filter_release(struct inode *inode, struct file *file)
2360{
2361 return ftrace_regex_release(inode, file, 1);
2362}
2363
2364static int
2365ftrace_notrace_release(struct inode *inode, struct file *file)
2366{
2367 return ftrace_regex_release(inode, file, 0);
2368}
2369
5e2336a0 2370static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
2371 .open = ftrace_avail_open,
2372 .read = seq_read,
2373 .llseek = seq_lseek,
3be04b47 2374 .release = seq_release_private,
5072c59f
SR
2375};
2376
5e2336a0 2377static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
2378 .open = ftrace_failures_open,
2379 .read = seq_read,
2380 .llseek = seq_lseek,
3be04b47 2381 .release = seq_release_private,
eb9a7bf0
AS
2382};
2383
5e2336a0 2384static const struct file_operations ftrace_filter_fops = {
5072c59f 2385 .open = ftrace_filter_open,
850a80cf 2386 .read = seq_read,
5072c59f 2387 .write = ftrace_filter_write,
41c52c0d 2388 .llseek = ftrace_regex_lseek,
5072c59f
SR
2389 .release = ftrace_filter_release,
2390};
2391
5e2336a0 2392static const struct file_operations ftrace_notrace_fops = {
41c52c0d 2393 .open = ftrace_notrace_open,
850a80cf 2394 .read = seq_read,
41c52c0d
SR
2395 .write = ftrace_notrace_write,
2396 .llseek = ftrace_regex_lseek,
2397 .release = ftrace_notrace_release,
2398};
2399
ea4e2bc4
SR
2400#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2401
2402static DEFINE_MUTEX(graph_lock);
2403
2404int ftrace_graph_count;
2405unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2406
2407static void *
85951842 2408__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 2409{
85951842 2410 if (*pos >= ftrace_graph_count)
ea4e2bc4 2411 return NULL;
a4ec5e0c 2412 return &ftrace_graph_funcs[*pos];
85951842 2413}
ea4e2bc4 2414
85951842
LZ
2415static void *
2416g_next(struct seq_file *m, void *v, loff_t *pos)
2417{
2418 (*pos)++;
2419 return __g_next(m, pos);
ea4e2bc4
SR
2420}
2421
2422static void *g_start(struct seq_file *m, loff_t *pos)
2423{
ea4e2bc4
SR
2424 mutex_lock(&graph_lock);
2425
f9349a8f
FW
2426 /* Nothing, tell g_show to print all functions are enabled */
2427 if (!ftrace_graph_count && !*pos)
2428 return (void *)1;
2429
85951842 2430 return __g_next(m, pos);
ea4e2bc4
SR
2431}
2432
2433static void g_stop(struct seq_file *m, void *p)
2434{
2435 mutex_unlock(&graph_lock);
2436}
2437
2438static int g_show(struct seq_file *m, void *v)
2439{
2440 unsigned long *ptr = v;
ea4e2bc4
SR
2441
2442 if (!ptr)
2443 return 0;
2444
f9349a8f
FW
2445 if (ptr == (unsigned long *)1) {
2446 seq_printf(m, "#### all functions enabled ####\n");
2447 return 0;
2448 }
2449
b375a11a 2450 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
2451
2452 return 0;
2453}
2454
88e9d34c 2455static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
2456 .start = g_start,
2457 .next = g_next,
2458 .stop = g_stop,
2459 .show = g_show,
2460};
2461
2462static int
2463ftrace_graph_open(struct inode *inode, struct file *file)
2464{
2465 int ret = 0;
2466
2467 if (unlikely(ftrace_disabled))
2468 return -ENODEV;
2469
2470 mutex_lock(&graph_lock);
2471 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2472 (file->f_flags & O_TRUNC)) {
ea4e2bc4
SR
2473 ftrace_graph_count = 0;
2474 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2475 }
a4ec5e0c 2476 mutex_unlock(&graph_lock);
ea4e2bc4 2477
a4ec5e0c 2478 if (file->f_mode & FMODE_READ)
ea4e2bc4 2479 ret = seq_open(file, &ftrace_graph_seq_ops);
ea4e2bc4
SR
2480
2481 return ret;
2482}
2483
87827111
LZ
2484static int
2485ftrace_graph_release(struct inode *inode, struct file *file)
2486{
2487 if (file->f_mode & FMODE_READ)
2488 seq_release(inode, file);
2489 return 0;
2490}
2491
ea4e2bc4 2492static int
f9349a8f 2493ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 2494{
ea4e2bc4
SR
2495 struct dyn_ftrace *rec;
2496 struct ftrace_page *pg;
f9349a8f 2497 int search_len;
ea4e2bc4 2498 int found = 0;
f9349a8f
FW
2499 int type, not;
2500 char *search;
2501 bool exists;
2502 int i;
ea4e2bc4
SR
2503
2504 if (ftrace_disabled)
2505 return -ENODEV;
2506
f9349a8f 2507 /* decode regex */
3f6fe06d 2508 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
f9349a8f
FW
2509 if (not)
2510 return -EINVAL;
2511
2512 search_len = strlen(search);
2513
52baf119 2514 mutex_lock(&ftrace_lock);
265c831c
SR
2515 do_for_each_ftrace_rec(pg, rec) {
2516
f9349a8f
FW
2517 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2518 break;
2519
265c831c
SR
2520 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2521 continue;
2522
f9349a8f
FW
2523 if (ftrace_match_record(rec, search, search_len, type)) {
2524 /* ensure it is not already in the array */
2525 exists = false;
2526 for (i = 0; i < *idx; i++)
2527 if (array[i] == rec->ip) {
2528 exists = true;
265c831c
SR
2529 break;
2530 }
91baf628 2531 if (!exists)
f9349a8f 2532 array[(*idx)++] = rec->ip;
91baf628 2533 found = 1;
ea4e2bc4 2534 }
265c831c 2535 } while_for_each_ftrace_rec();
f9349a8f 2536
52baf119 2537 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
2538
2539 return found ? 0 : -EINVAL;
2540}
2541
2542static ssize_t
2543ftrace_graph_write(struct file *file, const char __user *ubuf,
2544 size_t cnt, loff_t *ppos)
2545{
689fd8b6 2546 struct trace_parser parser;
4ba7978e 2547 ssize_t read, ret;
ea4e2bc4
SR
2548
2549 if (!cnt || cnt < 0)
2550 return 0;
2551
2552 mutex_lock(&graph_lock);
2553
2554 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2555 ret = -EBUSY;
1eb90f13 2556 goto out_unlock;
ea4e2bc4
SR
2557 }
2558
689fd8b6 2559 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2560 ret = -ENOMEM;
1eb90f13 2561 goto out_unlock;
ea4e2bc4
SR
2562 }
2563
689fd8b6 2564 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 2565
4ba7978e 2566 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 2567 parser.buffer[parser.idx] = 0;
2568
2569 /* we allow only one expression at a time */
a4ec5e0c 2570 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
689fd8b6 2571 parser.buffer);
ea4e2bc4 2572 if (ret)
1eb90f13 2573 goto out_free;
ea4e2bc4 2574 }
ea4e2bc4
SR
2575
2576 ret = read;
1eb90f13
LZ
2577
2578out_free:
689fd8b6 2579 trace_parser_put(&parser);
1eb90f13 2580out_unlock:
ea4e2bc4
SR
2581 mutex_unlock(&graph_lock);
2582
2583 return ret;
2584}
2585
2586static const struct file_operations ftrace_graph_fops = {
87827111
LZ
2587 .open = ftrace_graph_open,
2588 .read = seq_read,
2589 .write = ftrace_graph_write,
2590 .release = ftrace_graph_release,
ea4e2bc4
SR
2591};
2592#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2593
df4fc315 2594static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2595{
5072c59f 2596
5452af66
FW
2597 trace_create_file("available_filter_functions", 0444,
2598 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 2599
5452af66
FW
2600 trace_create_file("failures", 0444,
2601 d_tracer, NULL, &ftrace_failures_fops);
eb9a7bf0 2602
5452af66
FW
2603 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2604 NULL, &ftrace_filter_fops);
41c52c0d 2605
5452af66 2606 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
41c52c0d 2607 NULL, &ftrace_notrace_fops);
ad90c0e3 2608
ea4e2bc4 2609#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 2610 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
2611 NULL,
2612 &ftrace_graph_fops);
ea4e2bc4
SR
2613#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2614
5072c59f
SR
2615 return 0;
2616}
2617
5cb084bb 2618static int ftrace_process_locs(struct module *mod,
31e88909 2619 unsigned long *start,
68bf21aa
SR
2620 unsigned long *end)
2621{
2622 unsigned long *p;
2623 unsigned long addr;
2624 unsigned long flags;
2625
e6ea44e9 2626 mutex_lock(&ftrace_lock);
68bf21aa
SR
2627 p = start;
2628 while (p < end) {
2629 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2630 /*
2631 * Some architecture linkers will pad between
2632 * the different mcount_loc sections of different
2633 * object files to satisfy alignments.
2634 * Skip any NULL pointers.
2635 */
2636 if (!addr)
2637 continue;
68bf21aa 2638 ftrace_record_ip(addr);
68bf21aa
SR
2639 }
2640
08f5ac90 2641 /* disable interrupts to prevent kstop machine */
68bf21aa 2642 local_irq_save(flags);
31e88909 2643 ftrace_update_code(mod);
68bf21aa 2644 local_irq_restore(flags);
e6ea44e9 2645 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2646
2647 return 0;
2648}
2649
93eb677d 2650#ifdef CONFIG_MODULES
e7247a15 2651void ftrace_release_mod(struct module *mod)
93eb677d
SR
2652{
2653 struct dyn_ftrace *rec;
2654 struct ftrace_page *pg;
93eb677d 2655
e7247a15 2656 if (ftrace_disabled)
93eb677d
SR
2657 return;
2658
2659 mutex_lock(&ftrace_lock);
2660 do_for_each_ftrace_rec(pg, rec) {
e7247a15 2661 if (within_module_core(rec->ip, mod)) {
93eb677d
SR
2662 /*
2663 * rec->ip is changed in ftrace_free_rec()
2664 * It should not between s and e if record was freed.
2665 */
2666 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2667 ftrace_free_rec(rec);
2668 }
2669 } while_for_each_ftrace_rec();
2670 mutex_unlock(&ftrace_lock);
2671}
2672
2673static void ftrace_init_module(struct module *mod,
2674 unsigned long *start, unsigned long *end)
90d595fe 2675{
00fd61ae 2676 if (ftrace_disabled || start == end)
fed1939c 2677 return;
5cb084bb 2678 ftrace_process_locs(mod, start, end);
90d595fe
SR
2679}
2680
93eb677d
SR
2681static int ftrace_module_notify(struct notifier_block *self,
2682 unsigned long val, void *data)
2683{
2684 struct module *mod = data;
2685
2686 switch (val) {
2687 case MODULE_STATE_COMING:
2688 ftrace_init_module(mod, mod->ftrace_callsites,
2689 mod->ftrace_callsites +
2690 mod->num_ftrace_callsites);
2691 break;
2692 case MODULE_STATE_GOING:
e7247a15 2693 ftrace_release_mod(mod);
93eb677d
SR
2694 break;
2695 }
2696
2697 return 0;
2698}
2699#else
2700static int ftrace_module_notify(struct notifier_block *self,
2701 unsigned long val, void *data)
2702{
2703 return 0;
2704}
2705#endif /* CONFIG_MODULES */
2706
2707struct notifier_block ftrace_module_nb = {
2708 .notifier_call = ftrace_module_notify,
2709 .priority = 0,
2710};
2711
68bf21aa
SR
2712extern unsigned long __start_mcount_loc[];
2713extern unsigned long __stop_mcount_loc[];
2714
2715void __init ftrace_init(void)
2716{
2717 unsigned long count, addr, flags;
2718 int ret;
2719
2720 /* Keep the ftrace pointer to the stub */
2721 addr = (unsigned long)ftrace_stub;
2722
2723 local_irq_save(flags);
2724 ftrace_dyn_arch_init(&addr);
2725 local_irq_restore(flags);
2726
2727 /* ftrace_dyn_arch_init places the return code in addr */
2728 if (addr)
2729 goto failed;
2730
2731 count = __stop_mcount_loc - __start_mcount_loc;
2732
2733 ret = ftrace_dyn_table_alloc(count);
2734 if (ret)
2735 goto failed;
2736
2737 last_ftrace_enabled = ftrace_enabled = 1;
2738
5cb084bb 2739 ret = ftrace_process_locs(NULL,
31e88909 2740 __start_mcount_loc,
68bf21aa
SR
2741 __stop_mcount_loc);
2742
93eb677d 2743 ret = register_module_notifier(&ftrace_module_nb);
24ed0c4b 2744 if (ret)
93eb677d
SR
2745 pr_warning("Failed to register trace ftrace module notifier\n");
2746
2af15d6a
SR
2747 set_ftrace_early_filters();
2748
68bf21aa
SR
2749 return;
2750 failed:
2751 ftrace_disabled = 1;
2752}
68bf21aa 2753
3d083395 2754#else
0b6e4d56
FW
2755
2756static int __init ftrace_nodyn_init(void)
2757{
2758 ftrace_enabled = 1;
2759 return 0;
2760}
2761device_initcall(ftrace_nodyn_init);
2762
df4fc315
SR
2763static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2764static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2765/* Keep as macros so we do not need to define the commands */
2766# define ftrace_startup(command) do { } while (0)
2767# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2768# define ftrace_startup_sysctl() do { } while (0)
2769# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2770#endif /* CONFIG_DYNAMIC_FTRACE */
2771
e32d8956 2772static void clear_ftrace_swapper(void)
978f3a45
SR
2773{
2774 struct task_struct *p;
e32d8956 2775 int cpu;
978f3a45 2776
e32d8956
SR
2777 get_online_cpus();
2778 for_each_online_cpu(cpu) {
2779 p = idle_task(cpu);
978f3a45 2780 clear_tsk_trace_trace(p);
e32d8956
SR
2781 }
2782 put_online_cpus();
2783}
978f3a45 2784
e32d8956
SR
2785static void set_ftrace_swapper(void)
2786{
2787 struct task_struct *p;
2788 int cpu;
2789
2790 get_online_cpus();
2791 for_each_online_cpu(cpu) {
2792 p = idle_task(cpu);
2793 set_tsk_trace_trace(p);
2794 }
2795 put_online_cpus();
978f3a45
SR
2796}
2797
e32d8956
SR
2798static void clear_ftrace_pid(struct pid *pid)
2799{
2800 struct task_struct *p;
2801
229c4ef8 2802 rcu_read_lock();
e32d8956
SR
2803 do_each_pid_task(pid, PIDTYPE_PID, p) {
2804 clear_tsk_trace_trace(p);
2805 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2806 rcu_read_unlock();
2807
e32d8956
SR
2808 put_pid(pid);
2809}
2810
2811static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2812{
2813 struct task_struct *p;
2814
229c4ef8 2815 rcu_read_lock();
978f3a45
SR
2816 do_each_pid_task(pid, PIDTYPE_PID, p) {
2817 set_tsk_trace_trace(p);
2818 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2819 rcu_read_unlock();
978f3a45
SR
2820}
2821
756d17ee 2822static void clear_ftrace_pid_task(struct pid *pid)
e32d8956 2823{
756d17ee 2824 if (pid == ftrace_swapper_pid)
e32d8956
SR
2825 clear_ftrace_swapper();
2826 else
756d17ee 2827 clear_ftrace_pid(pid);
e32d8956
SR
2828}
2829
2830static void set_ftrace_pid_task(struct pid *pid)
2831{
2832 if (pid == ftrace_swapper_pid)
2833 set_ftrace_swapper();
2834 else
2835 set_ftrace_pid(pid);
2836}
2837
756d17ee 2838static int ftrace_pid_add(int p)
df4fc315 2839{
978f3a45 2840 struct pid *pid;
756d17ee 2841 struct ftrace_pid *fpid;
2842 int ret = -EINVAL;
df4fc315 2843
756d17ee 2844 mutex_lock(&ftrace_lock);
df4fc315 2845
756d17ee 2846 if (!p)
2847 pid = ftrace_swapper_pid;
2848 else
2849 pid = find_get_pid(p);
df4fc315 2850
756d17ee 2851 if (!pid)
2852 goto out;
df4fc315 2853
756d17ee 2854 ret = 0;
df4fc315 2855
756d17ee 2856 list_for_each_entry(fpid, &ftrace_pids, list)
2857 if (fpid->pid == pid)
2858 goto out_put;
978f3a45 2859
756d17ee 2860 ret = -ENOMEM;
df4fc315 2861
756d17ee 2862 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2863 if (!fpid)
2864 goto out_put;
df4fc315 2865
756d17ee 2866 list_add(&fpid->list, &ftrace_pids);
2867 fpid->pid = pid;
0ef8cde5 2868
756d17ee 2869 set_ftrace_pid_task(pid);
978f3a45 2870
756d17ee 2871 ftrace_update_pid_func();
2872 ftrace_startup_enable(0);
2873
2874 mutex_unlock(&ftrace_lock);
2875 return 0;
2876
2877out_put:
2878 if (pid != ftrace_swapper_pid)
2879 put_pid(pid);
978f3a45 2880
756d17ee 2881out:
2882 mutex_unlock(&ftrace_lock);
2883 return ret;
2884}
2885
2886static void ftrace_pid_reset(void)
2887{
2888 struct ftrace_pid *fpid, *safe;
978f3a45 2889
756d17ee 2890 mutex_lock(&ftrace_lock);
2891 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2892 struct pid *pid = fpid->pid;
2893
2894 clear_ftrace_pid_task(pid);
2895
2896 list_del(&fpid->list);
2897 kfree(fpid);
df4fc315
SR
2898 }
2899
df4fc315
SR
2900 ftrace_update_pid_func();
2901 ftrace_startup_enable(0);
2902
e6ea44e9 2903 mutex_unlock(&ftrace_lock);
756d17ee 2904}
df4fc315 2905
756d17ee 2906static void *fpid_start(struct seq_file *m, loff_t *pos)
2907{
2908 mutex_lock(&ftrace_lock);
2909
2910 if (list_empty(&ftrace_pids) && (!*pos))
2911 return (void *) 1;
2912
2913 return seq_list_start(&ftrace_pids, *pos);
2914}
2915
2916static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2917{
2918 if (v == (void *)1)
2919 return NULL;
2920
2921 return seq_list_next(v, &ftrace_pids, pos);
2922}
2923
2924static void fpid_stop(struct seq_file *m, void *p)
2925{
2926 mutex_unlock(&ftrace_lock);
2927}
2928
2929static int fpid_show(struct seq_file *m, void *v)
2930{
2931 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2932
2933 if (v == (void *)1) {
2934 seq_printf(m, "no pid\n");
2935 return 0;
2936 }
2937
2938 if (fpid->pid == ftrace_swapper_pid)
2939 seq_printf(m, "swapper tasks\n");
2940 else
2941 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2942
2943 return 0;
2944}
2945
2946static const struct seq_operations ftrace_pid_sops = {
2947 .start = fpid_start,
2948 .next = fpid_next,
2949 .stop = fpid_stop,
2950 .show = fpid_show,
2951};
2952
2953static int
2954ftrace_pid_open(struct inode *inode, struct file *file)
2955{
2956 int ret = 0;
2957
2958 if ((file->f_mode & FMODE_WRITE) &&
2959 (file->f_flags & O_TRUNC))
2960 ftrace_pid_reset();
2961
2962 if (file->f_mode & FMODE_READ)
2963 ret = seq_open(file, &ftrace_pid_sops);
2964
2965 return ret;
2966}
2967
df4fc315
SR
2968static ssize_t
2969ftrace_pid_write(struct file *filp, const char __user *ubuf,
2970 size_t cnt, loff_t *ppos)
2971{
457dc928 2972 char buf[64], *tmp;
df4fc315
SR
2973 long val;
2974 int ret;
2975
2976 if (cnt >= sizeof(buf))
2977 return -EINVAL;
2978
2979 if (copy_from_user(&buf, ubuf, cnt))
2980 return -EFAULT;
2981
2982 buf[cnt] = 0;
2983
756d17ee 2984 /*
2985 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
2986 * to clean the filter quietly.
2987 */
457dc928
IM
2988 tmp = strstrip(buf);
2989 if (strlen(tmp) == 0)
756d17ee 2990 return 1;
2991
457dc928 2992 ret = strict_strtol(tmp, 10, &val);
df4fc315
SR
2993 if (ret < 0)
2994 return ret;
2995
756d17ee 2996 ret = ftrace_pid_add(val);
df4fc315 2997
756d17ee 2998 return ret ? ret : cnt;
2999}
df4fc315 3000
756d17ee 3001static int
3002ftrace_pid_release(struct inode *inode, struct file *file)
3003{
3004 if (file->f_mode & FMODE_READ)
3005 seq_release(inode, file);
df4fc315 3006
756d17ee 3007 return 0;
df4fc315
SR
3008}
3009
5e2336a0 3010static const struct file_operations ftrace_pid_fops = {
756d17ee 3011 .open = ftrace_pid_open,
3012 .write = ftrace_pid_write,
3013 .read = seq_read,
3014 .llseek = seq_lseek,
3015 .release = ftrace_pid_release,
df4fc315
SR
3016};
3017
3018static __init int ftrace_init_debugfs(void)
3019{
3020 struct dentry *d_tracer;
df4fc315
SR
3021
3022 d_tracer = tracing_init_dentry();
3023 if (!d_tracer)
3024 return 0;
3025
3026 ftrace_init_dyn_debugfs(d_tracer);
3027
5452af66
FW
3028 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3029 NULL, &ftrace_pid_fops);
493762fc
SR
3030
3031 ftrace_profile_debugfs(d_tracer);
3032
df4fc315
SR
3033 return 0;
3034}
df4fc315
SR
3035fs_initcall(ftrace_init_debugfs);
3036
a2bb6a3d 3037/**
81adbdc0 3038 * ftrace_kill - kill ftrace
a2bb6a3d
SR
3039 *
3040 * This function should be used by panic code. It stops ftrace
3041 * but in a not so nice way. If you need to simply kill ftrace
3042 * from a non-atomic section, use ftrace_kill.
3043 */
81adbdc0 3044void ftrace_kill(void)
a2bb6a3d
SR
3045{
3046 ftrace_disabled = 1;
3047 ftrace_enabled = 0;
a2bb6a3d
SR
3048 clear_ftrace_function();
3049}
3050
16444a8a 3051/**
3d083395
SR
3052 * register_ftrace_function - register a function for profiling
3053 * @ops - ops structure that holds the function for profiling.
16444a8a 3054 *
3d083395
SR
3055 * Register a function to be called by all functions in the
3056 * kernel.
3057 *
3058 * Note: @ops->func and all the functions it calls must be labeled
3059 * with "notrace", otherwise it will go into a
3060 * recursive loop.
16444a8a 3061 */
3d083395 3062int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 3063{
b0fc494f
SR
3064 int ret;
3065
4eebcc81
SR
3066 if (unlikely(ftrace_disabled))
3067 return -1;
3068
e6ea44e9 3069 mutex_lock(&ftrace_lock);
e7d3737e 3070
b0fc494f 3071 ret = __register_ftrace_function(ops);
5a45cfe1 3072 ftrace_startup(0);
b0fc494f 3073
e6ea44e9 3074 mutex_unlock(&ftrace_lock);
b0fc494f 3075 return ret;
3d083395
SR
3076}
3077
3078/**
32632920 3079 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
3080 * @ops - ops structure that holds the function to unregister
3081 *
3082 * Unregister a function that was added to be called by ftrace profiling.
3083 */
3084int unregister_ftrace_function(struct ftrace_ops *ops)
3085{
3086 int ret;
3087
e6ea44e9 3088 mutex_lock(&ftrace_lock);
3d083395 3089 ret = __unregister_ftrace_function(ops);
5a45cfe1 3090 ftrace_shutdown(0);
e6ea44e9 3091 mutex_unlock(&ftrace_lock);
b0fc494f
SR
3092
3093 return ret;
3094}
3095
e309b41d 3096int
b0fc494f 3097ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 3098 void __user *buffer, size_t *lenp,
b0fc494f
SR
3099 loff_t *ppos)
3100{
3101 int ret;
3102
4eebcc81
SR
3103 if (unlikely(ftrace_disabled))
3104 return -ENODEV;
3105
e6ea44e9 3106 mutex_lock(&ftrace_lock);
b0fc494f 3107
8d65af78 3108 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 3109
a32c7765 3110 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
3111 goto out;
3112
a32c7765 3113 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
3114
3115 if (ftrace_enabled) {
3116
3117 ftrace_startup_sysctl();
3118
3119 /* we are starting ftrace again */
3120 if (ftrace_list != &ftrace_list_end) {
3121 if (ftrace_list->next == &ftrace_list_end)
3122 ftrace_trace_function = ftrace_list->func;
3123 else
3124 ftrace_trace_function = ftrace_list_func;
3125 }
3126
3127 } else {
3128 /* stopping ftrace calls (just send to ftrace_stub) */
3129 ftrace_trace_function = ftrace_stub;
3130
3131 ftrace_shutdown_sysctl();
3132 }
3133
3134 out:
e6ea44e9 3135 mutex_unlock(&ftrace_lock);
3d083395 3136 return ret;
16444a8a 3137}
f17845e5 3138
fb52607a 3139#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 3140
597af815 3141static int ftrace_graph_active;
4a2b8dda 3142static struct notifier_block ftrace_suspend_notifier;
e7d3737e 3143
e49dc19c
SR
3144int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3145{
3146 return 0;
3147}
3148
287b6e68
FW
3149/* The callbacks that hook a function */
3150trace_func_graph_ret_t ftrace_graph_return =
3151 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3152trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
3153
3154/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3155static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3156{
3157 int i;
3158 int ret = 0;
3159 unsigned long flags;
3160 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3161 struct task_struct *g, *t;
3162
3163 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3164 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3165 * sizeof(struct ftrace_ret_stack),
3166 GFP_KERNEL);
3167 if (!ret_stack_list[i]) {
3168 start = 0;
3169 end = i;
3170 ret = -ENOMEM;
3171 goto free;
3172 }
3173 }
3174
3175 read_lock_irqsave(&tasklist_lock, flags);
3176 do_each_thread(g, t) {
3177 if (start == end) {
3178 ret = -EAGAIN;
3179 goto unlock;
3180 }
3181
3182 if (t->ret_stack == NULL) {
380c4b14 3183 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3184 atomic_set(&t->trace_overrun, 0);
26c01624
SR
3185 t->curr_ret_stack = -1;
3186 /* Make sure the tasks see the -1 first: */
3187 smp_wmb();
3188 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
3189 }
3190 } while_each_thread(g, t);
3191
3192unlock:
3193 read_unlock_irqrestore(&tasklist_lock, flags);
3194free:
3195 for (i = start; i < end; i++)
3196 kfree(ret_stack_list[i]);
3197 return ret;
3198}
3199
8aef2d28
SR
3200static void
3201ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3202 struct task_struct *next)
3203{
3204 unsigned long long timestamp;
3205 int index;
3206
be6f164a
SR
3207 /*
3208 * Does the user want to count the time a function was asleep.
3209 * If so, do not update the time stamps.
3210 */
3211 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3212 return;
3213
8aef2d28
SR
3214 timestamp = trace_clock_local();
3215
3216 prev->ftrace_timestamp = timestamp;
3217
3218 /* only process tasks that we timestamped */
3219 if (!next->ftrace_timestamp)
3220 return;
3221
3222 /*
3223 * Update all the counters in next to make up for the
3224 * time next was sleeping.
3225 */
3226 timestamp -= next->ftrace_timestamp;
3227
3228 for (index = next->curr_ret_stack; index >= 0; index--)
3229 next->ret_stack[index].calltime += timestamp;
3230}
3231
f201ae23 3232/* Allocate a return stack for each task */
fb52607a 3233static int start_graph_tracing(void)
f201ae23
FW
3234{
3235 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 3236 int ret, cpu;
f201ae23
FW
3237
3238 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3239 sizeof(struct ftrace_ret_stack *),
3240 GFP_KERNEL);
3241
3242 if (!ret_stack_list)
3243 return -ENOMEM;
3244
5b058bcd 3245 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
3246 for_each_online_cpu(cpu) {
3247 if (!idle_task(cpu)->ret_stack)
3248 ftrace_graph_init_task(idle_task(cpu));
3249 }
5b058bcd 3250
f201ae23
FW
3251 do {
3252 ret = alloc_retstack_tasklist(ret_stack_list);
3253 } while (ret == -EAGAIN);
3254
8aef2d28
SR
3255 if (!ret) {
3256 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3257 if (ret)
3258 pr_info("ftrace_graph: Couldn't activate tracepoint"
3259 " probe to kernel_sched_switch\n");
3260 }
3261
f201ae23
FW
3262 kfree(ret_stack_list);
3263 return ret;
3264}
3265
4a2b8dda
FW
3266/*
3267 * Hibernation protection.
3268 * The state of the current task is too much unstable during
3269 * suspend/restore to disk. We want to protect against that.
3270 */
3271static int
3272ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3273 void *unused)
3274{
3275 switch (state) {
3276 case PM_HIBERNATION_PREPARE:
3277 pause_graph_tracing();
3278 break;
3279
3280 case PM_POST_HIBERNATION:
3281 unpause_graph_tracing();
3282 break;
3283 }
3284 return NOTIFY_DONE;
3285}
3286
287b6e68
FW
3287int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3288 trace_func_graph_ent_t entryfunc)
15e6cb36 3289{
e7d3737e
FW
3290 int ret = 0;
3291
e6ea44e9 3292 mutex_lock(&ftrace_lock);
e7d3737e 3293
05ce5818 3294 /* we currently allow only one tracer registered at a time */
597af815 3295 if (ftrace_graph_active) {
05ce5818
SR
3296 ret = -EBUSY;
3297 goto out;
3298 }
3299
4a2b8dda
FW
3300 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3301 register_pm_notifier(&ftrace_suspend_notifier);
3302
597af815 3303 ftrace_graph_active++;
fb52607a 3304 ret = start_graph_tracing();
f201ae23 3305 if (ret) {
597af815 3306 ftrace_graph_active--;
f201ae23
FW
3307 goto out;
3308 }
e53a6319 3309
287b6e68
FW
3310 ftrace_graph_return = retfunc;
3311 ftrace_graph_entry = entryfunc;
e53a6319 3312
5a45cfe1 3313 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
3314
3315out:
e6ea44e9 3316 mutex_unlock(&ftrace_lock);
e7d3737e 3317 return ret;
15e6cb36
FW
3318}
3319
fb52607a 3320void unregister_ftrace_graph(void)
15e6cb36 3321{
e6ea44e9 3322 mutex_lock(&ftrace_lock);
e7d3737e 3323
597af815 3324 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
3325 goto out;
3326
597af815 3327 ftrace_graph_active--;
8aef2d28 3328 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
287b6e68 3329 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3330 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 3331 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 3332 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 3333
2aad1b76 3334 out:
e6ea44e9 3335 mutex_unlock(&ftrace_lock);
15e6cb36 3336}
f201ae23
FW
3337
3338/* Allocate a return stack for newly created task */
fb52607a 3339void ftrace_graph_init_task(struct task_struct *t)
f201ae23 3340{
84047e36
SR
3341 /* Make sure we do not use the parent ret_stack */
3342 t->ret_stack = NULL;
3343
597af815 3344 if (ftrace_graph_active) {
82310a32
SR
3345 struct ftrace_ret_stack *ret_stack;
3346
3347 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
3348 * sizeof(struct ftrace_ret_stack),
3349 GFP_KERNEL);
82310a32 3350 if (!ret_stack)
f201ae23
FW
3351 return;
3352 t->curr_ret_stack = -1;
380c4b14 3353 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3354 atomic_set(&t->trace_overrun, 0);
8aef2d28 3355 t->ftrace_timestamp = 0;
82310a32
SR
3356 /* make curr_ret_stack visable before we add the ret_stack */
3357 smp_wmb();
3358 t->ret_stack = ret_stack;
84047e36 3359 }
f201ae23
FW
3360}
3361
fb52607a 3362void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 3363{
eae849ca
FW
3364 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3365
f201ae23 3366 t->ret_stack = NULL;
eae849ca
FW
3367 /* NULL must become visible to IRQs before we free it: */
3368 barrier();
3369
3370 kfree(ret_stack);
f201ae23 3371}
14a866c5
SR
3372
3373void ftrace_graph_stop(void)
3374{
3375 ftrace_stop();
3376}
15e6cb36 3377#endif