]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/ftrace.c
ftrace: don't try to __ftrace_replace_code on !FTRACE_FL_CONVERTED rec
[mirror_ubuntu-artful-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
395a59d0
AS
32#include <asm/ftrace.h>
33
3d083395 34#include "trace.h"
16444a8a 35
6912896e
SR
36#define FTRACE_WARN_ON(cond) \
37 do { \
38 if (WARN_ON(cond)) \
39 ftrace_kill(); \
40 } while (0)
41
42#define FTRACE_WARN_ON_ONCE(cond) \
43 do { \
44 if (WARN_ON_ONCE(cond)) \
45 ftrace_kill(); \
46 } while (0)
47
8fc0c701
SR
48/* hash bits for specific function selection */
49#define FTRACE_HASH_BITS 7
50#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
51
4eebcc81
SR
52/* ftrace_enabled is a method to turn ftrace on or off */
53int ftrace_enabled __read_mostly;
d61f82d0 54static int last_ftrace_enabled;
b0fc494f 55
60a7ecf4
SR
56/* Quick disabling of function tracer. */
57int function_trace_stop;
58
4eebcc81
SR
59/*
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
62 */
63static int ftrace_disabled __read_mostly;
64
52baf119 65static DEFINE_MUTEX(ftrace_lock);
b0fc494f 66
16444a8a
ACM
67static struct ftrace_ops ftrace_list_end __read_mostly =
68{
69 .func = ftrace_stub,
70};
71
72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 76
f2252935 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
78{
79 struct ftrace_ops *op = ftrace_list;
80
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
83
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
89 };
90}
91
df4fc315
SR
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
0ef8cde5 94 if (!test_tsk_trace_trace(current))
df4fc315
SR
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
16444a8a 107/**
3d083395 108 * clear_ftrace_function - reset the ftrace function
16444a8a 109 *
3d083395
SR
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
16444a8a 112 */
3d083395 113void clear_ftrace_function(void)
16444a8a 114{
3d083395 115 ftrace_trace_function = ftrace_stub;
60a7ecf4 116 __ftrace_trace_function = ftrace_stub;
df4fc315 117 ftrace_pid_function = ftrace_stub;
3d083395
SR
118}
119
60a7ecf4
SR
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
e309b41d 134static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 135{
16444a8a
ACM
136 ops->next = ftrace_list;
137 /*
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
142 */
143 smp_wmb();
144 ftrace_list = ops;
3d083395 145
b0fc494f 146 if (ftrace_enabled) {
df4fc315
SR
147 ftrace_func_t func;
148
149 if (ops->next == &ftrace_list_end)
150 func = ops->func;
151 else
152 func = ftrace_list_func;
153
978f3a45 154 if (ftrace_pid_trace) {
df4fc315
SR
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
157 }
158
b0fc494f
SR
159 /*
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
162 */
60a7ecf4 163#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 164 ftrace_trace_function = func;
60a7ecf4 165#else
df4fc315 166 __ftrace_trace_function = func;
60a7ecf4
SR
167 ftrace_trace_function = ftrace_test_stop_func;
168#endif
b0fc494f 169 }
3d083395 170
16444a8a
ACM
171 return 0;
172}
173
e309b41d 174static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 175{
16444a8a 176 struct ftrace_ops **p;
16444a8a
ACM
177
178 /*
3d083395
SR
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
16444a8a
ACM
181 */
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
e6ea44e9 185 return 0;
16444a8a
ACM
186 }
187
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189 if (*p == ops)
190 break;
191
e6ea44e9
SR
192 if (*p != ops)
193 return -1;
16444a8a
ACM
194
195 *p = (*p)->next;
196
b0fc494f
SR
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
df4fc315
SR
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
201
978f3a45 202 if (ftrace_pid_trace) {
df4fc315
SR
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
205 }
206#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
208#else
209 __ftrace_trace_function = func;
210#endif
211 }
b0fc494f 212 }
16444a8a 213
e6ea44e9 214 return 0;
3d083395
SR
215}
216
df4fc315
SR
217static void ftrace_update_pid_func(void)
218{
219 ftrace_func_t func;
220
df4fc315 221 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 222 return;
df4fc315
SR
223
224 func = ftrace_trace_function;
225
978f3a45 226 if (ftrace_pid_trace) {
df4fc315
SR
227 set_ftrace_pid_function(func);
228 func = ftrace_pid_func;
229 } else {
66eafebc
LW
230 if (func == ftrace_pid_func)
231 func = ftrace_pid_function;
df4fc315
SR
232 }
233
234#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235 ftrace_trace_function = func;
236#else
237 __ftrace_trace_function = func;
238#endif
df4fc315
SR
239}
240
73d3fd96
IM
241/* set when tracing only a pid */
242struct pid *ftrace_pid_trace;
243static struct pid * const ftrace_swapper_pid = &init_struct_pid;
244
3d083395 245#ifdef CONFIG_DYNAMIC_FTRACE
73d3fd96 246
99ecdc43 247#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 248# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
249#endif
250
8fc0c701
SR
251static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
252
b6887d79 253struct ftrace_func_probe {
8fc0c701 254 struct hlist_node node;
b6887d79 255 struct ftrace_probe_ops *ops;
8fc0c701
SR
256 unsigned long flags;
257 unsigned long ip;
258 void *data;
259 struct rcu_head rcu;
260};
261
262
d61f82d0
SR
263enum {
264 FTRACE_ENABLE_CALLS = (1 << 0),
265 FTRACE_DISABLE_CALLS = (1 << 1),
266 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
267 FTRACE_ENABLE_MCOUNT = (1 << 3),
268 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
269 FTRACE_START_FUNC_RET = (1 << 5),
270 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
271};
272
5072c59f
SR
273static int ftrace_filtered;
274
08f5ac90 275static LIST_HEAD(ftrace_new_addrs);
3d083395 276
41c52c0d 277static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 278
3c1720f0
SR
279struct ftrace_page {
280 struct ftrace_page *next;
431aa3fb 281 int index;
3c1720f0 282 struct dyn_ftrace records[];
aa5e5cea 283};
3c1720f0
SR
284
285#define ENTRIES_PER_PAGE \
286 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
287
288/* estimate from running different kernels */
289#define NR_TO_INIT 10000
290
291static struct ftrace_page *ftrace_pages_start;
292static struct ftrace_page *ftrace_pages;
293
37ad5084
SR
294static struct dyn_ftrace *ftrace_free_records;
295
265c831c
SR
296/*
297 * This is a double for. Do not use 'break' to break out of the loop,
298 * you must use a goto.
299 */
300#define do_for_each_ftrace_rec(pg, rec) \
301 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
302 int _____i; \
303 for (_____i = 0; _____i < pg->index; _____i++) { \
304 rec = &pg->records[_____i];
305
306#define while_for_each_ftrace_rec() \
307 } \
308 }
ecea656d
AS
309
310#ifdef CONFIG_KPROBES
f17845e5
IM
311
312static int frozen_record_count;
313
ecea656d
AS
314static inline void freeze_record(struct dyn_ftrace *rec)
315{
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
319 }
320}
321
322static inline void unfreeze_record(struct dyn_ftrace *rec)
323{
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
327 }
328}
329
330static inline int record_frozen(struct dyn_ftrace *rec)
331{
332 return rec->flags & FTRACE_FL_FROZEN;
333}
334#else
335# define freeze_record(rec) ({ 0; })
336# define unfreeze_record(rec) ({ 0; })
337# define record_frozen(rec) ({ 0; })
338#endif /* CONFIG_KPROBES */
339
e309b41d 340static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 341{
37ad5084
SR
342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
345}
346
fed1939c
SR
347void ftrace_release(void *start, unsigned long size)
348{
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
fed1939c 353
00fd61ae 354 if (ftrace_disabled || !start)
fed1939c
SR
355 return;
356
52baf119 357 mutex_lock(&ftrace_lock);
265c831c 358 do_for_each_ftrace_rec(pg, rec) {
b00f0b6d
Z
359 if ((rec->ip >= s) && (rec->ip < e) &&
360 !(rec->flags & FTRACE_FL_FREE))
265c831c
SR
361 ftrace_free_rec(rec);
362 } while_for_each_ftrace_rec();
52baf119 363 mutex_unlock(&ftrace_lock);
fed1939c
SR
364}
365
e309b41d 366static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 367{
37ad5084
SR
368 struct dyn_ftrace *rec;
369
370 /* First check for freed records */
371 if (ftrace_free_records) {
372 rec = ftrace_free_records;
373
37ad5084 374 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 375 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
376 ftrace_free_records = NULL;
377 return NULL;
378 }
379
380 ftrace_free_records = (void *)rec->ip;
381 memset(rec, 0, sizeof(*rec));
382 return rec;
383 }
384
3c1720f0 385 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
386 if (!ftrace_pages->next) {
387 /* allocate another page */
388 ftrace_pages->next =
389 (void *)get_zeroed_page(GFP_KERNEL);
390 if (!ftrace_pages->next)
391 return NULL;
392 }
3c1720f0
SR
393 ftrace_pages = ftrace_pages->next;
394 }
395
396 return &ftrace_pages->records[ftrace_pages->index++];
397}
398
08f5ac90 399static struct dyn_ftrace *
d61f82d0 400ftrace_record_ip(unsigned long ip)
3d083395 401{
08f5ac90 402 struct dyn_ftrace *rec;
3d083395 403
f3c7ac40 404 if (ftrace_disabled)
08f5ac90 405 return NULL;
3d083395 406
08f5ac90
SR
407 rec = ftrace_alloc_dyn_node(ip);
408 if (!rec)
409 return NULL;
3d083395 410
08f5ac90 411 rec->ip = ip;
3d083395 412
08f5ac90 413 list_add(&rec->list, &ftrace_new_addrs);
3d083395 414
08f5ac90 415 return rec;
3d083395
SR
416}
417
b17e8a37
SR
418static void print_ip_ins(const char *fmt, unsigned char *p)
419{
420 int i;
421
422 printk(KERN_CONT "%s", fmt);
423
424 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
426}
427
31e88909 428static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
429{
430 switch (failed) {
431 case -EFAULT:
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
434 print_ip_sym(ip);
435 break;
436 case -EINVAL:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
439 print_ip_sym(ip);
b17e8a37 440 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
441 printk(KERN_CONT "\n");
442 break;
443 case -EPERM:
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
446 print_ip_sym(ip);
447 break;
448 default:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
451 print_ip_sym(ip);
452 }
453}
454
3c1720f0 455
0eb96701 456static int
31e88909 457__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 458{
e7d3737e 459 unsigned long ftrace_addr;
6a24a244 460 unsigned long ip, fl;
e7d3737e 461
f0001207 462 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
463
464 ip = rec->ip;
465
982c350b
SR
466 /*
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
469 *
470 * If this record is not to be traced and
57794a9d 471 * it is enabled then disable it.
982c350b
SR
472 *
473 */
474 if (rec->flags & FTRACE_FL_NOTRACE) {
475 if (rec->flags & FTRACE_FL_ENABLED)
476 rec->flags &= ~FTRACE_FL_ENABLED;
477 else
478 return 0;
479
480 } else if (ftrace_filtered && enable) {
5072c59f 481 /*
982c350b 482 * Filtering is on:
5072c59f 483 */
a4500b84 484
982c350b 485 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 486
982c350b
SR
487 /* Record is filtered and enabled, do nothing */
488 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 489 return 0;
5072c59f 490
57794a9d 491 /* Record is not filtered or enabled, do nothing */
982c350b
SR
492 if (!fl)
493 return 0;
494
495 /* Record is not filtered but enabled, disable it */
496 if (fl == FTRACE_FL_ENABLED)
5072c59f 497 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
498 else
499 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 500 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 501 } else {
982c350b 502 /* Disable or not filtered */
5072c59f 503
41c52c0d 504 if (enable) {
982c350b 505 /* if record is enabled, do nothing */
5072c59f 506 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 507 return 0;
982c350b 508
5072c59f 509 rec->flags |= FTRACE_FL_ENABLED;
982c350b 510
5072c59f 511 } else {
982c350b 512
57794a9d 513 /* if record is not enabled, do nothing */
5072c59f 514 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 515 return 0;
982c350b 516
5072c59f
SR
517 rec->flags &= ~FTRACE_FL_ENABLED;
518 }
519 }
520
982c350b 521 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 522 return ftrace_make_call(rec, ftrace_addr);
31e88909 523 else
e7d3737e 524 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
525}
526
e309b41d 527static void ftrace_replace_code(int enable)
3c1720f0 528{
3c1720f0
SR
529 struct dyn_ftrace *rec;
530 struct ftrace_page *pg;
6a24a244 531 int failed;
3c1720f0 532
265c831c
SR
533 do_for_each_ftrace_rec(pg, rec) {
534 /*
fa9d13cf
Z
535 * Skip over free records, records that have
536 * failed and not converted.
265c831c
SR
537 */
538 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf
Z
539 rec->flags & FTRACE_FL_FAILED ||
540 rec->flags & FTRACE_FL_CONVERTED)
265c831c
SR
541 continue;
542
543 /* ignore updates to this record's mcount site */
544 if (get_kprobe((void *)rec->ip)) {
545 freeze_record(rec);
546 continue;
547 } else {
548 unfreeze_record(rec);
549 }
f22f9a89 550
265c831c 551 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 552 if (failed) {
265c831c
SR
553 rec->flags |= FTRACE_FL_FAILED;
554 if ((system_state == SYSTEM_BOOTING) ||
555 !core_kernel_text(rec->ip)) {
556 ftrace_free_rec(rec);
4377245a 557 } else {
265c831c 558 ftrace_bug(failed, rec->ip);
4377245a
SR
559 /* Stop processing */
560 return;
561 }
3c1720f0 562 }
265c831c 563 } while_for_each_ftrace_rec();
3c1720f0
SR
564}
565
492a7ea5 566static int
31e88909 567ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
568{
569 unsigned long ip;
593eb8a2 570 int ret;
3c1720f0
SR
571
572 ip = rec->ip;
573
25aac9dc 574 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 575 if (ret) {
31e88909 576 ftrace_bug(ret, ip);
3c1720f0 577 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 578 return 0;
37ad5084 579 }
492a7ea5 580 return 1;
3c1720f0
SR
581}
582
000ab691
SR
583/*
584 * archs can override this function if they must do something
585 * before the modifying code is performed.
586 */
587int __weak ftrace_arch_code_modify_prepare(void)
588{
589 return 0;
590}
591
592/*
593 * archs can override this function if they must do something
594 * after the modifying code is performed.
595 */
596int __weak ftrace_arch_code_modify_post_process(void)
597{
598 return 0;
599}
600
e309b41d 601static int __ftrace_modify_code(void *data)
3d083395 602{
d61f82d0
SR
603 int *command = data;
604
a3583244 605 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 606 ftrace_replace_code(1);
a3583244 607 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
608 ftrace_replace_code(0);
609
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
612
5a45cfe1
SR
613 if (*command & FTRACE_START_FUNC_RET)
614 ftrace_enable_ftrace_graph_caller();
615 else if (*command & FTRACE_STOP_FUNC_RET)
616 ftrace_disable_ftrace_graph_caller();
617
d61f82d0 618 return 0;
3d083395
SR
619}
620
e309b41d 621static void ftrace_run_update_code(int command)
3d083395 622{
000ab691
SR
623 int ret;
624
625 ret = ftrace_arch_code_modify_prepare();
626 FTRACE_WARN_ON(ret);
627 if (ret)
628 return;
629
784e2d76 630 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
631
632 ret = ftrace_arch_code_modify_post_process();
633 FTRACE_WARN_ON(ret);
3d083395
SR
634}
635
d61f82d0 636static ftrace_func_t saved_ftrace_func;
60a7ecf4 637static int ftrace_start_up;
df4fc315
SR
638
639static void ftrace_startup_enable(int command)
640{
641 if (saved_ftrace_func != ftrace_trace_function) {
642 saved_ftrace_func = ftrace_trace_function;
643 command |= FTRACE_UPDATE_TRACE_FUNC;
644 }
645
646 if (!command || !ftrace_enabled)
647 return;
648
649 ftrace_run_update_code(command);
650}
d61f82d0 651
5a45cfe1 652static void ftrace_startup(int command)
3d083395 653{
4eebcc81
SR
654 if (unlikely(ftrace_disabled))
655 return;
656
60a7ecf4 657 ftrace_start_up++;
982c350b 658 command |= FTRACE_ENABLE_CALLS;
d61f82d0 659
df4fc315 660 ftrace_startup_enable(command);
3d083395
SR
661}
662
5a45cfe1 663static void ftrace_shutdown(int command)
3d083395 664{
4eebcc81
SR
665 if (unlikely(ftrace_disabled))
666 return;
667
60a7ecf4
SR
668 ftrace_start_up--;
669 if (!ftrace_start_up)
d61f82d0 670 command |= FTRACE_DISABLE_CALLS;
3d083395 671
d61f82d0
SR
672 if (saved_ftrace_func != ftrace_trace_function) {
673 saved_ftrace_func = ftrace_trace_function;
674 command |= FTRACE_UPDATE_TRACE_FUNC;
675 }
3d083395 676
d61f82d0 677 if (!command || !ftrace_enabled)
e6ea44e9 678 return;
d61f82d0
SR
679
680 ftrace_run_update_code(command);
3d083395
SR
681}
682
e309b41d 683static void ftrace_startup_sysctl(void)
b0fc494f 684{
d61f82d0
SR
685 int command = FTRACE_ENABLE_MCOUNT;
686
4eebcc81
SR
687 if (unlikely(ftrace_disabled))
688 return;
689
d61f82d0
SR
690 /* Force update next time */
691 saved_ftrace_func = NULL;
60a7ecf4
SR
692 /* ftrace_start_up is true if we want ftrace running */
693 if (ftrace_start_up)
d61f82d0
SR
694 command |= FTRACE_ENABLE_CALLS;
695
696 ftrace_run_update_code(command);
b0fc494f
SR
697}
698
e309b41d 699static void ftrace_shutdown_sysctl(void)
b0fc494f 700{
d61f82d0
SR
701 int command = FTRACE_DISABLE_MCOUNT;
702
4eebcc81
SR
703 if (unlikely(ftrace_disabled))
704 return;
705
60a7ecf4
SR
706 /* ftrace_start_up is true if ftrace is running */
707 if (ftrace_start_up)
d61f82d0
SR
708 command |= FTRACE_DISABLE_CALLS;
709
710 ftrace_run_update_code(command);
b0fc494f
SR
711}
712
3d083395
SR
713static cycle_t ftrace_update_time;
714static unsigned long ftrace_update_cnt;
715unsigned long ftrace_update_tot_cnt;
716
31e88909 717static int ftrace_update_code(struct module *mod)
3d083395 718{
08f5ac90 719 struct dyn_ftrace *p, *t;
f22f9a89 720 cycle_t start, stop;
3d083395 721
750ed1a4 722 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
723 ftrace_update_cnt = 0;
724
08f5ac90 725 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 726
08f5ac90
SR
727 /* If something went wrong, bail without enabling anything */
728 if (unlikely(ftrace_disabled))
729 return -1;
f22f9a89 730
08f5ac90 731 list_del_init(&p->list);
f22f9a89 732
08f5ac90 733 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 734 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
735 p->flags |= FTRACE_FL_CONVERTED;
736 ftrace_update_cnt++;
737 } else
738 ftrace_free_rec(p);
3d083395
SR
739 }
740
750ed1a4 741 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
742 ftrace_update_time = stop - start;
743 ftrace_update_tot_cnt += ftrace_update_cnt;
744
16444a8a
ACM
745 return 0;
746}
747
68bf21aa 748static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
749{
750 struct ftrace_page *pg;
751 int cnt;
752 int i;
3c1720f0
SR
753
754 /* allocate a few pages */
755 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
756 if (!ftrace_pages_start)
757 return -1;
758
759 /*
760 * Allocate a few more pages.
761 *
762 * TODO: have some parser search vmlinux before
763 * final linking to find all calls to ftrace.
764 * Then we can:
765 * a) know how many pages to allocate.
766 * and/or
767 * b) set up the table then.
768 *
769 * The dynamic code is still necessary for
770 * modules.
771 */
772
773 pg = ftrace_pages = ftrace_pages_start;
774
68bf21aa 775 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 776 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 777 num_to_init, cnt + 1);
3c1720f0
SR
778
779 for (i = 0; i < cnt; i++) {
780 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
781
782 /* If we fail, we'll try later anyway */
783 if (!pg->next)
784 break;
785
786 pg = pg->next;
787 }
788
789 return 0;
790}
791
5072c59f
SR
792enum {
793 FTRACE_ITER_FILTER = (1 << 0),
794 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 795 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 796 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 797 FTRACE_ITER_PRINTALL = (1 << 4),
8fc0c701 798 FTRACE_ITER_HASH = (1 << 5),
5072c59f
SR
799};
800
801#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
802
803struct ftrace_iterator {
5072c59f 804 struct ftrace_page *pg;
8fc0c701 805 int hidx;
431aa3fb 806 int idx;
5072c59f
SR
807 unsigned flags;
808 unsigned char buffer[FTRACE_BUFF_MAX+1];
809 unsigned buffer_idx;
810 unsigned filtered;
811};
812
8fc0c701
SR
813static void *
814t_hash_next(struct seq_file *m, void *v, loff_t *pos)
815{
816 struct ftrace_iterator *iter = m->private;
817 struct hlist_node *hnd = v;
818 struct hlist_head *hhd;
819
820 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
821
822 (*pos)++;
823
824 retry:
825 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
826 return NULL;
827
828 hhd = &ftrace_func_hash[iter->hidx];
829
830 if (hlist_empty(hhd)) {
831 iter->hidx++;
832 hnd = NULL;
833 goto retry;
834 }
835
836 if (!hnd)
837 hnd = hhd->first;
838 else {
839 hnd = hnd->next;
840 if (!hnd) {
841 iter->hidx++;
842 goto retry;
843 }
844 }
845
846 return hnd;
847}
848
849static void *t_hash_start(struct seq_file *m, loff_t *pos)
850{
851 struct ftrace_iterator *iter = m->private;
852 void *p = NULL;
853
854 iter->flags |= FTRACE_ITER_HASH;
855
856 return t_hash_next(m, p, pos);
857}
858
859static int t_hash_show(struct seq_file *m, void *v)
860{
b6887d79 861 struct ftrace_func_probe *rec;
8fc0c701
SR
862 struct hlist_node *hnd = v;
863 char str[KSYM_SYMBOL_LEN];
864
b6887d79 865 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
8fc0c701 866
809dcf29
SR
867 if (rec->ops->print)
868 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
869
8fc0c701
SR
870 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
871 seq_printf(m, "%s:", str);
872
873 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
874 seq_printf(m, "%s", str);
875
876 if (rec->data)
877 seq_printf(m, ":%p", rec->data);
878 seq_putc(m, '\n');
879
880 return 0;
881}
882
e309b41d 883static void *
5072c59f
SR
884t_next(struct seq_file *m, void *v, loff_t *pos)
885{
886 struct ftrace_iterator *iter = m->private;
887 struct dyn_ftrace *rec = NULL;
888
8fc0c701
SR
889 if (iter->flags & FTRACE_ITER_HASH)
890 return t_hash_next(m, v, pos);
891
5072c59f
SR
892 (*pos)++;
893
0c75a3ed
SR
894 if (iter->flags & FTRACE_ITER_PRINTALL)
895 return NULL;
896
5072c59f
SR
897 retry:
898 if (iter->idx >= iter->pg->index) {
899 if (iter->pg->next) {
900 iter->pg = iter->pg->next;
901 iter->idx = 0;
902 goto retry;
50cdaf08
LW
903 } else {
904 iter->idx = -1;
5072c59f
SR
905 }
906 } else {
907 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
908 if ((rec->flags & FTRACE_FL_FREE) ||
909
910 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
911 (rec->flags & FTRACE_FL_FAILED)) ||
912
913 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 914 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 915
0183fb1c
SR
916 ((iter->flags & FTRACE_ITER_FILTER) &&
917 !(rec->flags & FTRACE_FL_FILTER)) ||
918
41c52c0d
SR
919 ((iter->flags & FTRACE_ITER_NOTRACE) &&
920 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
921 rec = NULL;
922 goto retry;
923 }
924 }
925
5072c59f
SR
926 return rec;
927}
928
929static void *t_start(struct seq_file *m, loff_t *pos)
930{
931 struct ftrace_iterator *iter = m->private;
932 void *p = NULL;
5072c59f 933
8fc0c701 934 mutex_lock(&ftrace_lock);
0c75a3ed
SR
935 /*
936 * For set_ftrace_filter reading, if we have the filter
937 * off, we can short cut and just print out that all
938 * functions are enabled.
939 */
940 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
941 if (*pos > 0)
8fc0c701 942 return t_hash_start(m, pos);
0c75a3ed
SR
943 iter->flags |= FTRACE_ITER_PRINTALL;
944 (*pos)++;
945 return iter;
946 }
947
8fc0c701
SR
948 if (iter->flags & FTRACE_ITER_HASH)
949 return t_hash_start(m, pos);
950
50cdaf08
LW
951 if (*pos > 0) {
952 if (iter->idx < 0)
953 return p;
954 (*pos)--;
955 iter->idx--;
956 }
5821e1b7 957
50cdaf08 958 p = t_next(m, p, pos);
5072c59f 959
8fc0c701
SR
960 if (!p)
961 return t_hash_start(m, pos);
962
5072c59f
SR
963 return p;
964}
965
966static void t_stop(struct seq_file *m, void *p)
967{
8fc0c701 968 mutex_unlock(&ftrace_lock);
5072c59f
SR
969}
970
971static int t_show(struct seq_file *m, void *v)
972{
0c75a3ed 973 struct ftrace_iterator *iter = m->private;
5072c59f
SR
974 struct dyn_ftrace *rec = v;
975 char str[KSYM_SYMBOL_LEN];
976
8fc0c701
SR
977 if (iter->flags & FTRACE_ITER_HASH)
978 return t_hash_show(m, v);
979
0c75a3ed
SR
980 if (iter->flags & FTRACE_ITER_PRINTALL) {
981 seq_printf(m, "#### all functions enabled ####\n");
982 return 0;
983 }
984
5072c59f
SR
985 if (!rec)
986 return 0;
987
988 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
989
50cdaf08 990 seq_printf(m, "%s\n", str);
5072c59f
SR
991
992 return 0;
993}
994
995static struct seq_operations show_ftrace_seq_ops = {
996 .start = t_start,
997 .next = t_next,
998 .stop = t_stop,
999 .show = t_show,
1000};
1001
e309b41d 1002static int
5072c59f
SR
1003ftrace_avail_open(struct inode *inode, struct file *file)
1004{
1005 struct ftrace_iterator *iter;
1006 int ret;
1007
4eebcc81
SR
1008 if (unlikely(ftrace_disabled))
1009 return -ENODEV;
1010
5072c59f
SR
1011 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1012 if (!iter)
1013 return -ENOMEM;
1014
1015 iter->pg = ftrace_pages_start;
5072c59f
SR
1016
1017 ret = seq_open(file, &show_ftrace_seq_ops);
1018 if (!ret) {
1019 struct seq_file *m = file->private_data;
4bf39a94 1020
5072c59f 1021 m->private = iter;
4bf39a94 1022 } else {
5072c59f 1023 kfree(iter);
4bf39a94 1024 }
5072c59f
SR
1025
1026 return ret;
1027}
1028
1029int ftrace_avail_release(struct inode *inode, struct file *file)
1030{
1031 struct seq_file *m = (struct seq_file *)file->private_data;
1032 struct ftrace_iterator *iter = m->private;
1033
1034 seq_release(inode, file);
1035 kfree(iter);
4bf39a94 1036
5072c59f
SR
1037 return 0;
1038}
1039
eb9a7bf0
AS
1040static int
1041ftrace_failures_open(struct inode *inode, struct file *file)
1042{
1043 int ret;
1044 struct seq_file *m;
1045 struct ftrace_iterator *iter;
1046
1047 ret = ftrace_avail_open(inode, file);
1048 if (!ret) {
1049 m = (struct seq_file *)file->private_data;
1050 iter = (struct ftrace_iterator *)m->private;
1051 iter->flags = FTRACE_ITER_FAILURES;
1052 }
1053
1054 return ret;
1055}
1056
1057
41c52c0d 1058static void ftrace_filter_reset(int enable)
5072c59f
SR
1059{
1060 struct ftrace_page *pg;
1061 struct dyn_ftrace *rec;
41c52c0d 1062 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1063
52baf119 1064 mutex_lock(&ftrace_lock);
41c52c0d
SR
1065 if (enable)
1066 ftrace_filtered = 0;
265c831c
SR
1067 do_for_each_ftrace_rec(pg, rec) {
1068 if (rec->flags & FTRACE_FL_FAILED)
1069 continue;
1070 rec->flags &= ~type;
1071 } while_for_each_ftrace_rec();
52baf119 1072 mutex_unlock(&ftrace_lock);
5072c59f
SR
1073}
1074
e309b41d 1075static int
41c52c0d 1076ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1077{
1078 struct ftrace_iterator *iter;
1079 int ret = 0;
1080
4eebcc81
SR
1081 if (unlikely(ftrace_disabled))
1082 return -ENODEV;
1083
5072c59f
SR
1084 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1085 if (!iter)
1086 return -ENOMEM;
1087
41c52c0d 1088 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1089 if ((file->f_mode & FMODE_WRITE) &&
1090 !(file->f_flags & O_APPEND))
41c52c0d 1091 ftrace_filter_reset(enable);
5072c59f
SR
1092
1093 if (file->f_mode & FMODE_READ) {
1094 iter->pg = ftrace_pages_start;
41c52c0d
SR
1095 iter->flags = enable ? FTRACE_ITER_FILTER :
1096 FTRACE_ITER_NOTRACE;
5072c59f
SR
1097
1098 ret = seq_open(file, &show_ftrace_seq_ops);
1099 if (!ret) {
1100 struct seq_file *m = file->private_data;
1101 m->private = iter;
1102 } else
1103 kfree(iter);
1104 } else
1105 file->private_data = iter;
41c52c0d 1106 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1107
1108 return ret;
1109}
1110
41c52c0d
SR
1111static int
1112ftrace_filter_open(struct inode *inode, struct file *file)
1113{
1114 return ftrace_regex_open(inode, file, 1);
1115}
1116
1117static int
1118ftrace_notrace_open(struct inode *inode, struct file *file)
1119{
1120 return ftrace_regex_open(inode, file, 0);
1121}
1122
e309b41d 1123static ssize_t
41c52c0d 1124ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1125 size_t cnt, loff_t *ppos)
1126{
1127 if (file->f_mode & FMODE_READ)
1128 return seq_read(file, ubuf, cnt, ppos);
1129 else
1130 return -EPERM;
1131}
1132
e309b41d 1133static loff_t
41c52c0d 1134ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1135{
1136 loff_t ret;
1137
1138 if (file->f_mode & FMODE_READ)
1139 ret = seq_lseek(file, offset, origin);
1140 else
1141 file->f_pos = ret = 1;
1142
1143 return ret;
1144}
1145
1146enum {
1147 MATCH_FULL,
1148 MATCH_FRONT_ONLY,
1149 MATCH_MIDDLE_ONLY,
1150 MATCH_END_ONLY,
1151};
1152
9f4801e3
SR
1153/*
1154 * (static function - no need for kernel doc)
1155 *
1156 * Pass in a buffer containing a glob and this function will
1157 * set search to point to the search part of the buffer and
1158 * return the type of search it is (see enum above).
1159 * This does modify buff.
1160 *
1161 * Returns enum type.
1162 * search returns the pointer to use for comparison.
1163 * not returns 1 if buff started with a '!'
1164 * 0 otherwise.
1165 */
1166static int
64e7c440 1167ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1168{
5072c59f 1169 int type = MATCH_FULL;
9f4801e3 1170 int i;
ea3a6d6d
SR
1171
1172 if (buff[0] == '!') {
9f4801e3 1173 *not = 1;
ea3a6d6d
SR
1174 buff++;
1175 len--;
9f4801e3
SR
1176 } else
1177 *not = 0;
1178
1179 *search = buff;
5072c59f
SR
1180
1181 for (i = 0; i < len; i++) {
1182 if (buff[i] == '*') {
1183 if (!i) {
9f4801e3 1184 *search = buff + 1;
5072c59f 1185 type = MATCH_END_ONLY;
5072c59f 1186 } else {
9f4801e3 1187 if (type == MATCH_END_ONLY)
5072c59f 1188 type = MATCH_MIDDLE_ONLY;
9f4801e3 1189 else
5072c59f 1190 type = MATCH_FRONT_ONLY;
5072c59f
SR
1191 buff[i] = 0;
1192 break;
1193 }
1194 }
1195 }
1196
9f4801e3
SR
1197 return type;
1198}
1199
64e7c440 1200static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1201{
9f4801e3
SR
1202 int matched = 0;
1203 char *ptr;
1204
9f4801e3
SR
1205 switch (type) {
1206 case MATCH_FULL:
1207 if (strcmp(str, regex) == 0)
1208 matched = 1;
1209 break;
1210 case MATCH_FRONT_ONLY:
1211 if (strncmp(str, regex, len) == 0)
1212 matched = 1;
1213 break;
1214 case MATCH_MIDDLE_ONLY:
1215 if (strstr(str, regex))
1216 matched = 1;
1217 break;
1218 case MATCH_END_ONLY:
1219 ptr = strstr(str, regex);
1220 if (ptr && (ptr[len] == 0))
1221 matched = 1;
1222 break;
1223 }
1224
1225 return matched;
1226}
1227
64e7c440
SR
1228static int
1229ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1230{
1231 char str[KSYM_SYMBOL_LEN];
1232
1233 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1234 return ftrace_match(str, regex, len, type);
1235}
1236
9f4801e3
SR
1237static void ftrace_match_records(char *buff, int len, int enable)
1238{
6a24a244 1239 unsigned int search_len;
9f4801e3
SR
1240 struct ftrace_page *pg;
1241 struct dyn_ftrace *rec;
6a24a244
SR
1242 unsigned long flag;
1243 char *search;
9f4801e3 1244 int type;
9f4801e3
SR
1245 int not;
1246
6a24a244 1247 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
9f4801e3
SR
1248 type = ftrace_setup_glob(buff, len, &search, &not);
1249
1250 search_len = strlen(search);
1251
52baf119 1252 mutex_lock(&ftrace_lock);
265c831c 1253 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1254
1255 if (rec->flags & FTRACE_FL_FAILED)
1256 continue;
9f4801e3
SR
1257
1258 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1259 if (not)
1260 rec->flags &= ~flag;
1261 else
1262 rec->flags |= flag;
1263 }
e68746a2
SR
1264 /*
1265 * Only enable filtering if we have a function that
1266 * is filtered on.
1267 */
1268 if (enable && (rec->flags & FTRACE_FL_FILTER))
1269 ftrace_filtered = 1;
265c831c 1270 } while_for_each_ftrace_rec();
52baf119 1271 mutex_unlock(&ftrace_lock);
5072c59f
SR
1272}
1273
64e7c440
SR
1274static int
1275ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1276 char *regex, int len, int type)
1277{
1278 char str[KSYM_SYMBOL_LEN];
1279 char *modname;
1280
1281 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1282
1283 if (!modname || strcmp(modname, mod))
1284 return 0;
1285
1286 /* blank search means to match all funcs in the mod */
1287 if (len)
1288 return ftrace_match(str, regex, len, type);
1289 else
1290 return 1;
1291}
1292
1293static void ftrace_match_module_records(char *buff, char *mod, int enable)
1294{
6a24a244 1295 unsigned search_len = 0;
64e7c440
SR
1296 struct ftrace_page *pg;
1297 struct dyn_ftrace *rec;
1298 int type = MATCH_FULL;
6a24a244
SR
1299 char *search = buff;
1300 unsigned long flag;
64e7c440
SR
1301 int not = 0;
1302
6a24a244
SR
1303 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1304
64e7c440
SR
1305 /* blank or '*' mean the same */
1306 if (strcmp(buff, "*") == 0)
1307 buff[0] = 0;
1308
1309 /* handle the case of 'dont filter this module' */
1310 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1311 buff[0] = 0;
1312 not = 1;
1313 }
1314
1315 if (strlen(buff)) {
1316 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1317 search_len = strlen(search);
1318 }
1319
52baf119 1320 mutex_lock(&ftrace_lock);
64e7c440
SR
1321 do_for_each_ftrace_rec(pg, rec) {
1322
1323 if (rec->flags & FTRACE_FL_FAILED)
1324 continue;
1325
1326 if (ftrace_match_module_record(rec, mod,
1327 search, search_len, type)) {
1328 if (not)
1329 rec->flags &= ~flag;
1330 else
1331 rec->flags |= flag;
1332 }
e68746a2
SR
1333 if (enable && (rec->flags & FTRACE_FL_FILTER))
1334 ftrace_filtered = 1;
64e7c440
SR
1335
1336 } while_for_each_ftrace_rec();
52baf119 1337 mutex_unlock(&ftrace_lock);
64e7c440
SR
1338}
1339
f6180773
SR
1340/*
1341 * We register the module command as a template to show others how
1342 * to register the a command as well.
1343 */
1344
1345static int
1346ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1347{
1348 char *mod;
1349
1350 /*
1351 * cmd == 'mod' because we only registered this func
1352 * for the 'mod' ftrace_func_command.
1353 * But if you register one func with multiple commands,
1354 * you can tell which command was used by the cmd
1355 * parameter.
1356 */
1357
1358 /* we must have a module name */
1359 if (!param)
1360 return -EINVAL;
1361
1362 mod = strsep(&param, ":");
1363 if (!strlen(mod))
1364 return -EINVAL;
1365
1366 ftrace_match_module_records(func, mod, enable);
1367 return 0;
1368}
1369
1370static struct ftrace_func_command ftrace_mod_cmd = {
1371 .name = "mod",
1372 .func = ftrace_mod_callback,
1373};
1374
1375static int __init ftrace_mod_cmd_init(void)
1376{
1377 return register_ftrace_command(&ftrace_mod_cmd);
1378}
1379device_initcall(ftrace_mod_cmd_init);
1380
59df055f 1381static void
b6887d79 1382function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1383{
b6887d79 1384 struct ftrace_func_probe *entry;
59df055f
SR
1385 struct hlist_head *hhd;
1386 struct hlist_node *n;
1387 unsigned long key;
1388 int resched;
1389
1390 key = hash_long(ip, FTRACE_HASH_BITS);
1391
1392 hhd = &ftrace_func_hash[key];
1393
1394 if (hlist_empty(hhd))
1395 return;
1396
1397 /*
1398 * Disable preemption for these calls to prevent a RCU grace
1399 * period. This syncs the hash iteration and freeing of items
1400 * on the hash. rcu_read_lock is too dangerous here.
1401 */
1402 resched = ftrace_preempt_disable();
1403 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1404 if (entry->ip == ip)
1405 entry->ops->func(ip, parent_ip, &entry->data);
1406 }
1407 ftrace_preempt_enable(resched);
1408}
1409
b6887d79 1410static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1411{
b6887d79 1412 .func = function_trace_probe_call,
59df055f
SR
1413};
1414
b6887d79 1415static int ftrace_probe_registered;
59df055f 1416
b6887d79 1417static void __enable_ftrace_function_probe(void)
59df055f
SR
1418{
1419 int i;
1420
b6887d79 1421 if (ftrace_probe_registered)
59df055f
SR
1422 return;
1423
1424 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1425 struct hlist_head *hhd = &ftrace_func_hash[i];
1426 if (hhd->first)
1427 break;
1428 }
1429 /* Nothing registered? */
1430 if (i == FTRACE_FUNC_HASHSIZE)
1431 return;
1432
b6887d79 1433 __register_ftrace_function(&trace_probe_ops);
59df055f 1434 ftrace_startup(0);
b6887d79 1435 ftrace_probe_registered = 1;
59df055f
SR
1436}
1437
b6887d79 1438static void __disable_ftrace_function_probe(void)
59df055f
SR
1439{
1440 int i;
1441
b6887d79 1442 if (!ftrace_probe_registered)
59df055f
SR
1443 return;
1444
1445 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1446 struct hlist_head *hhd = &ftrace_func_hash[i];
1447 if (hhd->first)
1448 return;
1449 }
1450
1451 /* no more funcs left */
b6887d79 1452 __unregister_ftrace_function(&trace_probe_ops);
59df055f 1453 ftrace_shutdown(0);
b6887d79 1454 ftrace_probe_registered = 0;
59df055f
SR
1455}
1456
1457
1458static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1459{
b6887d79
SR
1460 struct ftrace_func_probe *entry =
1461 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
1462
1463 if (entry->ops->free)
1464 entry->ops->free(&entry->data);
1465 kfree(entry);
1466}
1467
1468
1469int
b6887d79 1470register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1471 void *data)
1472{
b6887d79 1473 struct ftrace_func_probe *entry;
59df055f
SR
1474 struct ftrace_page *pg;
1475 struct dyn_ftrace *rec;
59df055f 1476 int type, len, not;
6a24a244 1477 unsigned long key;
59df055f
SR
1478 int count = 0;
1479 char *search;
1480
1481 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1482 len = strlen(search);
1483
b6887d79 1484 /* we do not support '!' for function probes */
59df055f
SR
1485 if (WARN_ON(not))
1486 return -EINVAL;
1487
1488 mutex_lock(&ftrace_lock);
1489 do_for_each_ftrace_rec(pg, rec) {
1490
1491 if (rec->flags & FTRACE_FL_FAILED)
1492 continue;
1493
1494 if (!ftrace_match_record(rec, search, len, type))
1495 continue;
1496
1497 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1498 if (!entry) {
b6887d79 1499 /* If we did not process any, then return error */
59df055f
SR
1500 if (!count)
1501 count = -ENOMEM;
1502 goto out_unlock;
1503 }
1504
1505 count++;
1506
1507 entry->data = data;
1508
1509 /*
1510 * The caller might want to do something special
1511 * for each function we find. We call the callback
1512 * to give the caller an opportunity to do so.
1513 */
1514 if (ops->callback) {
1515 if (ops->callback(rec->ip, &entry->data) < 0) {
1516 /* caller does not like this func */
1517 kfree(entry);
1518 continue;
1519 }
1520 }
1521
1522 entry->ops = ops;
1523 entry->ip = rec->ip;
1524
1525 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1526 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1527
1528 } while_for_each_ftrace_rec();
b6887d79 1529 __enable_ftrace_function_probe();
59df055f
SR
1530
1531 out_unlock:
1532 mutex_unlock(&ftrace_lock);
1533
1534 return count;
1535}
1536
1537enum {
b6887d79
SR
1538 PROBE_TEST_FUNC = 1,
1539 PROBE_TEST_DATA = 2
59df055f
SR
1540};
1541
1542static void
b6887d79 1543__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1544 void *data, int flags)
1545{
b6887d79 1546 struct ftrace_func_probe *entry;
59df055f
SR
1547 struct hlist_node *n, *tmp;
1548 char str[KSYM_SYMBOL_LEN];
1549 int type = MATCH_FULL;
1550 int i, len = 0;
1551 char *search;
1552
1553 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1554 glob = NULL;
1555 else {
1556 int not;
1557
1558 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1559 len = strlen(search);
1560
b6887d79 1561 /* we do not support '!' for function probes */
59df055f
SR
1562 if (WARN_ON(not))
1563 return;
1564 }
1565
1566 mutex_lock(&ftrace_lock);
1567 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1568 struct hlist_head *hhd = &ftrace_func_hash[i];
1569
1570 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1571
1572 /* break up if statements for readability */
b6887d79 1573 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
1574 continue;
1575
b6887d79 1576 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
1577 continue;
1578
1579 /* do this last, since it is the most expensive */
1580 if (glob) {
1581 kallsyms_lookup(entry->ip, NULL, NULL,
1582 NULL, str);
1583 if (!ftrace_match(str, glob, len, type))
1584 continue;
1585 }
1586
1587 hlist_del(&entry->node);
1588 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1589 }
1590 }
b6887d79 1591 __disable_ftrace_function_probe();
59df055f
SR
1592 mutex_unlock(&ftrace_lock);
1593}
1594
1595void
b6887d79 1596unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1597 void *data)
1598{
b6887d79
SR
1599 __unregister_ftrace_function_probe(glob, ops, data,
1600 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
1601}
1602
1603void
b6887d79 1604unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 1605{
b6887d79 1606 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
1607}
1608
b6887d79 1609void unregister_ftrace_function_probe_all(char *glob)
59df055f 1610{
b6887d79 1611 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
1612}
1613
f6180773
SR
1614static LIST_HEAD(ftrace_commands);
1615static DEFINE_MUTEX(ftrace_cmd_mutex);
1616
1617int register_ftrace_command(struct ftrace_func_command *cmd)
1618{
1619 struct ftrace_func_command *p;
1620 int ret = 0;
1621
1622 mutex_lock(&ftrace_cmd_mutex);
1623 list_for_each_entry(p, &ftrace_commands, list) {
1624 if (strcmp(cmd->name, p->name) == 0) {
1625 ret = -EBUSY;
1626 goto out_unlock;
1627 }
1628 }
1629 list_add(&cmd->list, &ftrace_commands);
1630 out_unlock:
1631 mutex_unlock(&ftrace_cmd_mutex);
1632
1633 return ret;
1634}
1635
1636int unregister_ftrace_command(struct ftrace_func_command *cmd)
1637{
1638 struct ftrace_func_command *p, *n;
1639 int ret = -ENODEV;
1640
1641 mutex_lock(&ftrace_cmd_mutex);
1642 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1643 if (strcmp(cmd->name, p->name) == 0) {
1644 ret = 0;
1645 list_del_init(&p->list);
1646 goto out_unlock;
1647 }
1648 }
1649 out_unlock:
1650 mutex_unlock(&ftrace_cmd_mutex);
1651
1652 return ret;
1653}
1654
64e7c440
SR
1655static int ftrace_process_regex(char *buff, int len, int enable)
1656{
f6180773 1657 char *func, *command, *next = buff;
6a24a244 1658 struct ftrace_func_command *p;
f6180773 1659 int ret = -EINVAL;
64e7c440
SR
1660
1661 func = strsep(&next, ":");
1662
1663 if (!next) {
1664 ftrace_match_records(func, len, enable);
1665 return 0;
1666 }
1667
f6180773 1668 /* command found */
64e7c440
SR
1669
1670 command = strsep(&next, ":");
1671
f6180773
SR
1672 mutex_lock(&ftrace_cmd_mutex);
1673 list_for_each_entry(p, &ftrace_commands, list) {
1674 if (strcmp(p->name, command) == 0) {
1675 ret = p->func(func, command, next, enable);
1676 goto out_unlock;
1677 }
64e7c440 1678 }
f6180773
SR
1679 out_unlock:
1680 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 1681
f6180773 1682 return ret;
64e7c440
SR
1683}
1684
e309b41d 1685static ssize_t
41c52c0d
SR
1686ftrace_regex_write(struct file *file, const char __user *ubuf,
1687 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1688{
1689 struct ftrace_iterator *iter;
1690 char ch;
1691 size_t read = 0;
1692 ssize_t ret;
1693
1694 if (!cnt || cnt < 0)
1695 return 0;
1696
41c52c0d 1697 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1698
1699 if (file->f_mode & FMODE_READ) {
1700 struct seq_file *m = file->private_data;
1701 iter = m->private;
1702 } else
1703 iter = file->private_data;
1704
1705 if (!*ppos) {
1706 iter->flags &= ~FTRACE_ITER_CONT;
1707 iter->buffer_idx = 0;
1708 }
1709
1710 ret = get_user(ch, ubuf++);
1711 if (ret)
1712 goto out;
1713 read++;
1714 cnt--;
1715
1716 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1717 /* skip white space */
1718 while (cnt && isspace(ch)) {
1719 ret = get_user(ch, ubuf++);
1720 if (ret)
1721 goto out;
1722 read++;
1723 cnt--;
1724 }
1725
5072c59f
SR
1726 if (isspace(ch)) {
1727 file->f_pos += read;
1728 ret = read;
1729 goto out;
1730 }
1731
1732 iter->buffer_idx = 0;
1733 }
1734
1735 while (cnt && !isspace(ch)) {
1736 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1737 iter->buffer[iter->buffer_idx++] = ch;
1738 else {
1739 ret = -EINVAL;
1740 goto out;
1741 }
1742 ret = get_user(ch, ubuf++);
1743 if (ret)
1744 goto out;
1745 read++;
1746 cnt--;
1747 }
1748
1749 if (isspace(ch)) {
1750 iter->filtered++;
1751 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
1752 ret = ftrace_process_regex(iter->buffer,
1753 iter->buffer_idx, enable);
1754 if (ret)
1755 goto out;
5072c59f
SR
1756 iter->buffer_idx = 0;
1757 } else
1758 iter->flags |= FTRACE_ITER_CONT;
1759
1760
1761 file->f_pos += read;
1762
1763 ret = read;
1764 out:
41c52c0d 1765 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1766
1767 return ret;
1768}
1769
41c52c0d
SR
1770static ssize_t
1771ftrace_filter_write(struct file *file, const char __user *ubuf,
1772 size_t cnt, loff_t *ppos)
1773{
1774 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1775}
1776
1777static ssize_t
1778ftrace_notrace_write(struct file *file, const char __user *ubuf,
1779 size_t cnt, loff_t *ppos)
1780{
1781 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1782}
1783
1784static void
1785ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1786{
1787 if (unlikely(ftrace_disabled))
1788 return;
1789
1790 mutex_lock(&ftrace_regex_lock);
1791 if (reset)
1792 ftrace_filter_reset(enable);
1793 if (buf)
7f24b31b 1794 ftrace_match_records(buf, len, enable);
41c52c0d
SR
1795 mutex_unlock(&ftrace_regex_lock);
1796}
1797
77a2b37d
SR
1798/**
1799 * ftrace_set_filter - set a function to filter on in ftrace
1800 * @buf - the string that holds the function filter text.
1801 * @len - the length of the string.
1802 * @reset - non zero to reset all filters before applying this filter.
1803 *
1804 * Filters denote which functions should be enabled when tracing is enabled.
1805 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1806 */
e309b41d 1807void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1808{
41c52c0d
SR
1809 ftrace_set_regex(buf, len, reset, 1);
1810}
4eebcc81 1811
41c52c0d
SR
1812/**
1813 * ftrace_set_notrace - set a function to not trace in ftrace
1814 * @buf - the string that holds the function notrace text.
1815 * @len - the length of the string.
1816 * @reset - non zero to reset all filters before applying this filter.
1817 *
1818 * Notrace Filters denote which functions should not be enabled when tracing
1819 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1820 * for tracing.
1821 */
1822void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1823{
1824 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1825}
1826
e309b41d 1827static int
41c52c0d 1828ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1829{
1830 struct seq_file *m = (struct seq_file *)file->private_data;
1831 struct ftrace_iterator *iter;
1832
41c52c0d 1833 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1834 if (file->f_mode & FMODE_READ) {
1835 iter = m->private;
1836
1837 seq_release(inode, file);
1838 } else
1839 iter = file->private_data;
1840
1841 if (iter->buffer_idx) {
1842 iter->filtered++;
1843 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 1844 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1845 }
1846
e6ea44e9 1847 mutex_lock(&ftrace_lock);
ee02a2e5 1848 if (ftrace_start_up && ftrace_enabled)
5072c59f 1849 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 1850 mutex_unlock(&ftrace_lock);
5072c59f
SR
1851
1852 kfree(iter);
41c52c0d 1853 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1854 return 0;
1855}
1856
41c52c0d
SR
1857static int
1858ftrace_filter_release(struct inode *inode, struct file *file)
1859{
1860 return ftrace_regex_release(inode, file, 1);
1861}
1862
1863static int
1864ftrace_notrace_release(struct inode *inode, struct file *file)
1865{
1866 return ftrace_regex_release(inode, file, 0);
1867}
1868
5e2336a0 1869static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
1870 .open = ftrace_avail_open,
1871 .read = seq_read,
1872 .llseek = seq_lseek,
1873 .release = ftrace_avail_release,
1874};
1875
5e2336a0 1876static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
1877 .open = ftrace_failures_open,
1878 .read = seq_read,
1879 .llseek = seq_lseek,
1880 .release = ftrace_avail_release,
1881};
1882
5e2336a0 1883static const struct file_operations ftrace_filter_fops = {
5072c59f 1884 .open = ftrace_filter_open,
41c52c0d 1885 .read = ftrace_regex_read,
5072c59f 1886 .write = ftrace_filter_write,
41c52c0d 1887 .llseek = ftrace_regex_lseek,
5072c59f
SR
1888 .release = ftrace_filter_release,
1889};
1890
5e2336a0 1891static const struct file_operations ftrace_notrace_fops = {
41c52c0d
SR
1892 .open = ftrace_notrace_open,
1893 .read = ftrace_regex_read,
1894 .write = ftrace_notrace_write,
1895 .llseek = ftrace_regex_lseek,
1896 .release = ftrace_notrace_release,
1897};
1898
ea4e2bc4
SR
1899#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1900
1901static DEFINE_MUTEX(graph_lock);
1902
1903int ftrace_graph_count;
1904unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1905
1906static void *
1907g_next(struct seq_file *m, void *v, loff_t *pos)
1908{
1909 unsigned long *array = m->private;
1910 int index = *pos;
1911
1912 (*pos)++;
1913
1914 if (index >= ftrace_graph_count)
1915 return NULL;
1916
1917 return &array[index];
1918}
1919
1920static void *g_start(struct seq_file *m, loff_t *pos)
1921{
1922 void *p = NULL;
1923
1924 mutex_lock(&graph_lock);
1925
f9349a8f
FW
1926 /* Nothing, tell g_show to print all functions are enabled */
1927 if (!ftrace_graph_count && !*pos)
1928 return (void *)1;
1929
ea4e2bc4
SR
1930 p = g_next(m, p, pos);
1931
1932 return p;
1933}
1934
1935static void g_stop(struct seq_file *m, void *p)
1936{
1937 mutex_unlock(&graph_lock);
1938}
1939
1940static int g_show(struct seq_file *m, void *v)
1941{
1942 unsigned long *ptr = v;
1943 char str[KSYM_SYMBOL_LEN];
1944
1945 if (!ptr)
1946 return 0;
1947
f9349a8f
FW
1948 if (ptr == (unsigned long *)1) {
1949 seq_printf(m, "#### all functions enabled ####\n");
1950 return 0;
1951 }
1952
ea4e2bc4
SR
1953 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1954
1955 seq_printf(m, "%s\n", str);
1956
1957 return 0;
1958}
1959
1960static struct seq_operations ftrace_graph_seq_ops = {
1961 .start = g_start,
1962 .next = g_next,
1963 .stop = g_stop,
1964 .show = g_show,
1965};
1966
1967static int
1968ftrace_graph_open(struct inode *inode, struct file *file)
1969{
1970 int ret = 0;
1971
1972 if (unlikely(ftrace_disabled))
1973 return -ENODEV;
1974
1975 mutex_lock(&graph_lock);
1976 if ((file->f_mode & FMODE_WRITE) &&
1977 !(file->f_flags & O_APPEND)) {
1978 ftrace_graph_count = 0;
1979 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1980 }
1981
1982 if (file->f_mode & FMODE_READ) {
1983 ret = seq_open(file, &ftrace_graph_seq_ops);
1984 if (!ret) {
1985 struct seq_file *m = file->private_data;
1986 m->private = ftrace_graph_funcs;
1987 }
1988 } else
1989 file->private_data = ftrace_graph_funcs;
1990 mutex_unlock(&graph_lock);
1991
1992 return ret;
1993}
1994
1995static ssize_t
1996ftrace_graph_read(struct file *file, char __user *ubuf,
1997 size_t cnt, loff_t *ppos)
1998{
1999 if (file->f_mode & FMODE_READ)
2000 return seq_read(file, ubuf, cnt, ppos);
2001 else
2002 return -EPERM;
2003}
2004
2005static int
f9349a8f 2006ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 2007{
ea4e2bc4
SR
2008 struct dyn_ftrace *rec;
2009 struct ftrace_page *pg;
f9349a8f 2010 int search_len;
ea4e2bc4 2011 int found = 0;
f9349a8f
FW
2012 int type, not;
2013 char *search;
2014 bool exists;
2015 int i;
ea4e2bc4
SR
2016
2017 if (ftrace_disabled)
2018 return -ENODEV;
2019
f9349a8f
FW
2020 /* decode regex */
2021 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2022 if (not)
2023 return -EINVAL;
2024
2025 search_len = strlen(search);
2026
52baf119 2027 mutex_lock(&ftrace_lock);
265c831c
SR
2028 do_for_each_ftrace_rec(pg, rec) {
2029
f9349a8f
FW
2030 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2031 break;
2032
265c831c
SR
2033 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2034 continue;
2035
f9349a8f
FW
2036 if (ftrace_match_record(rec, search, search_len, type)) {
2037 /* ensure it is not already in the array */
2038 exists = false;
2039 for (i = 0; i < *idx; i++)
2040 if (array[i] == rec->ip) {
2041 exists = true;
265c831c
SR
2042 break;
2043 }
f9349a8f
FW
2044 if (!exists) {
2045 array[(*idx)++] = rec->ip;
2046 found = 1;
2047 }
ea4e2bc4 2048 }
265c831c 2049 } while_for_each_ftrace_rec();
f9349a8f 2050
52baf119 2051 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
2052
2053 return found ? 0 : -EINVAL;
2054}
2055
2056static ssize_t
2057ftrace_graph_write(struct file *file, const char __user *ubuf,
2058 size_t cnt, loff_t *ppos)
2059{
2060 unsigned char buffer[FTRACE_BUFF_MAX+1];
2061 unsigned long *array;
2062 size_t read = 0;
2063 ssize_t ret;
2064 int index = 0;
2065 char ch;
2066
2067 if (!cnt || cnt < 0)
2068 return 0;
2069
2070 mutex_lock(&graph_lock);
2071
2072 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2073 ret = -EBUSY;
2074 goto out;
2075 }
2076
2077 if (file->f_mode & FMODE_READ) {
2078 struct seq_file *m = file->private_data;
2079 array = m->private;
2080 } else
2081 array = file->private_data;
2082
2083 ret = get_user(ch, ubuf++);
2084 if (ret)
2085 goto out;
2086 read++;
2087 cnt--;
2088
2089 /* skip white space */
2090 while (cnt && isspace(ch)) {
2091 ret = get_user(ch, ubuf++);
2092 if (ret)
2093 goto out;
2094 read++;
2095 cnt--;
2096 }
2097
2098 if (isspace(ch)) {
2099 *ppos += read;
2100 ret = read;
2101 goto out;
2102 }
2103
2104 while (cnt && !isspace(ch)) {
2105 if (index < FTRACE_BUFF_MAX)
2106 buffer[index++] = ch;
2107 else {
2108 ret = -EINVAL;
2109 goto out;
2110 }
2111 ret = get_user(ch, ubuf++);
2112 if (ret)
2113 goto out;
2114 read++;
2115 cnt--;
2116 }
2117 buffer[index] = 0;
2118
f9349a8f
FW
2119 /* we allow only one expression at a time */
2120 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
ea4e2bc4
SR
2121 if (ret)
2122 goto out;
2123
ea4e2bc4
SR
2124 file->f_pos += read;
2125
2126 ret = read;
2127 out:
2128 mutex_unlock(&graph_lock);
2129
2130 return ret;
2131}
2132
2133static const struct file_operations ftrace_graph_fops = {
2134 .open = ftrace_graph_open,
2135 .read = ftrace_graph_read,
2136 .write = ftrace_graph_write,
2137};
2138#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2139
df4fc315 2140static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2141{
5072c59f
SR
2142 struct dentry *entry;
2143
5072c59f
SR
2144 entry = debugfs_create_file("available_filter_functions", 0444,
2145 d_tracer, NULL, &ftrace_avail_fops);
2146 if (!entry)
2147 pr_warning("Could not create debugfs "
2148 "'available_filter_functions' entry\n");
2149
eb9a7bf0
AS
2150 entry = debugfs_create_file("failures", 0444,
2151 d_tracer, NULL, &ftrace_failures_fops);
2152 if (!entry)
2153 pr_warning("Could not create debugfs 'failures' entry\n");
2154
5072c59f
SR
2155 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2156 NULL, &ftrace_filter_fops);
2157 if (!entry)
2158 pr_warning("Could not create debugfs "
2159 "'set_ftrace_filter' entry\n");
41c52c0d
SR
2160
2161 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2162 NULL, &ftrace_notrace_fops);
2163 if (!entry)
2164 pr_warning("Could not create debugfs "
2165 "'set_ftrace_notrace' entry\n");
ad90c0e3 2166
ea4e2bc4
SR
2167#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2168 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2169 NULL,
2170 &ftrace_graph_fops);
2171 if (!entry)
2172 pr_warning("Could not create debugfs "
2173 "'set_graph_function' entry\n");
2174#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2175
5072c59f
SR
2176 return 0;
2177}
2178
31e88909
SR
2179static int ftrace_convert_nops(struct module *mod,
2180 unsigned long *start,
68bf21aa
SR
2181 unsigned long *end)
2182{
2183 unsigned long *p;
2184 unsigned long addr;
2185 unsigned long flags;
2186
e6ea44e9 2187 mutex_lock(&ftrace_lock);
68bf21aa
SR
2188 p = start;
2189 while (p < end) {
2190 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2191 /*
2192 * Some architecture linkers will pad between
2193 * the different mcount_loc sections of different
2194 * object files to satisfy alignments.
2195 * Skip any NULL pointers.
2196 */
2197 if (!addr)
2198 continue;
68bf21aa 2199 ftrace_record_ip(addr);
68bf21aa
SR
2200 }
2201
08f5ac90 2202 /* disable interrupts to prevent kstop machine */
68bf21aa 2203 local_irq_save(flags);
31e88909 2204 ftrace_update_code(mod);
68bf21aa 2205 local_irq_restore(flags);
e6ea44e9 2206 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2207
2208 return 0;
2209}
2210
31e88909
SR
2211void ftrace_init_module(struct module *mod,
2212 unsigned long *start, unsigned long *end)
90d595fe 2213{
00fd61ae 2214 if (ftrace_disabled || start == end)
fed1939c 2215 return;
31e88909 2216 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2217}
2218
68bf21aa
SR
2219extern unsigned long __start_mcount_loc[];
2220extern unsigned long __stop_mcount_loc[];
2221
2222void __init ftrace_init(void)
2223{
2224 unsigned long count, addr, flags;
2225 int ret;
2226
2227 /* Keep the ftrace pointer to the stub */
2228 addr = (unsigned long)ftrace_stub;
2229
2230 local_irq_save(flags);
2231 ftrace_dyn_arch_init(&addr);
2232 local_irq_restore(flags);
2233
2234 /* ftrace_dyn_arch_init places the return code in addr */
2235 if (addr)
2236 goto failed;
2237
2238 count = __stop_mcount_loc - __start_mcount_loc;
2239
2240 ret = ftrace_dyn_table_alloc(count);
2241 if (ret)
2242 goto failed;
2243
2244 last_ftrace_enabled = ftrace_enabled = 1;
2245
31e88909
SR
2246 ret = ftrace_convert_nops(NULL,
2247 __start_mcount_loc,
68bf21aa
SR
2248 __stop_mcount_loc);
2249
2250 return;
2251 failed:
2252 ftrace_disabled = 1;
2253}
68bf21aa 2254
3d083395 2255#else
0b6e4d56
FW
2256
2257static int __init ftrace_nodyn_init(void)
2258{
2259 ftrace_enabled = 1;
2260 return 0;
2261}
2262device_initcall(ftrace_nodyn_init);
2263
df4fc315
SR
2264static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2265static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2266/* Keep as macros so we do not need to define the commands */
2267# define ftrace_startup(command) do { } while (0)
2268# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2269# define ftrace_startup_sysctl() do { } while (0)
2270# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2271#endif /* CONFIG_DYNAMIC_FTRACE */
2272
df4fc315
SR
2273static ssize_t
2274ftrace_pid_read(struct file *file, char __user *ubuf,
2275 size_t cnt, loff_t *ppos)
2276{
2277 char buf[64];
2278 int r;
2279
e32d8956
SR
2280 if (ftrace_pid_trace == ftrace_swapper_pid)
2281 r = sprintf(buf, "swapper tasks\n");
2282 else if (ftrace_pid_trace)
978f3a45 2283 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
2284 else
2285 r = sprintf(buf, "no pid\n");
2286
2287 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2288}
2289
e32d8956 2290static void clear_ftrace_swapper(void)
978f3a45
SR
2291{
2292 struct task_struct *p;
e32d8956 2293 int cpu;
978f3a45 2294
e32d8956
SR
2295 get_online_cpus();
2296 for_each_online_cpu(cpu) {
2297 p = idle_task(cpu);
978f3a45 2298 clear_tsk_trace_trace(p);
e32d8956
SR
2299 }
2300 put_online_cpus();
2301}
978f3a45 2302
e32d8956
SR
2303static void set_ftrace_swapper(void)
2304{
2305 struct task_struct *p;
2306 int cpu;
2307
2308 get_online_cpus();
2309 for_each_online_cpu(cpu) {
2310 p = idle_task(cpu);
2311 set_tsk_trace_trace(p);
2312 }
2313 put_online_cpus();
978f3a45
SR
2314}
2315
e32d8956
SR
2316static void clear_ftrace_pid(struct pid *pid)
2317{
2318 struct task_struct *p;
2319
229c4ef8 2320 rcu_read_lock();
e32d8956
SR
2321 do_each_pid_task(pid, PIDTYPE_PID, p) {
2322 clear_tsk_trace_trace(p);
2323 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2324 rcu_read_unlock();
2325
e32d8956
SR
2326 put_pid(pid);
2327}
2328
2329static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2330{
2331 struct task_struct *p;
2332
229c4ef8 2333 rcu_read_lock();
978f3a45
SR
2334 do_each_pid_task(pid, PIDTYPE_PID, p) {
2335 set_tsk_trace_trace(p);
2336 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2337 rcu_read_unlock();
978f3a45
SR
2338}
2339
e32d8956
SR
2340static void clear_ftrace_pid_task(struct pid **pid)
2341{
2342 if (*pid == ftrace_swapper_pid)
2343 clear_ftrace_swapper();
2344 else
2345 clear_ftrace_pid(*pid);
2346
2347 *pid = NULL;
2348}
2349
2350static void set_ftrace_pid_task(struct pid *pid)
2351{
2352 if (pid == ftrace_swapper_pid)
2353 set_ftrace_swapper();
2354 else
2355 set_ftrace_pid(pid);
2356}
2357
df4fc315
SR
2358static ssize_t
2359ftrace_pid_write(struct file *filp, const char __user *ubuf,
2360 size_t cnt, loff_t *ppos)
2361{
978f3a45 2362 struct pid *pid;
df4fc315
SR
2363 char buf[64];
2364 long val;
2365 int ret;
2366
2367 if (cnt >= sizeof(buf))
2368 return -EINVAL;
2369
2370 if (copy_from_user(&buf, ubuf, cnt))
2371 return -EFAULT;
2372
2373 buf[cnt] = 0;
2374
2375 ret = strict_strtol(buf, 10, &val);
2376 if (ret < 0)
2377 return ret;
2378
e6ea44e9 2379 mutex_lock(&ftrace_lock);
978f3a45 2380 if (val < 0) {
df4fc315 2381 /* disable pid tracing */
978f3a45 2382 if (!ftrace_pid_trace)
df4fc315 2383 goto out;
978f3a45
SR
2384
2385 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2386
2387 } else {
e32d8956
SR
2388 /* swapper task is special */
2389 if (!val) {
2390 pid = ftrace_swapper_pid;
2391 if (pid == ftrace_pid_trace)
2392 goto out;
2393 } else {
2394 pid = find_get_pid(val);
df4fc315 2395
e32d8956
SR
2396 if (pid == ftrace_pid_trace) {
2397 put_pid(pid);
2398 goto out;
2399 }
0ef8cde5 2400 }
0ef8cde5 2401
978f3a45
SR
2402 if (ftrace_pid_trace)
2403 clear_ftrace_pid_task(&ftrace_pid_trace);
2404
2405 if (!pid)
2406 goto out;
2407
2408 ftrace_pid_trace = pid;
2409
2410 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2411 }
2412
2413 /* update the function call */
2414 ftrace_update_pid_func();
2415 ftrace_startup_enable(0);
2416
2417 out:
e6ea44e9 2418 mutex_unlock(&ftrace_lock);
df4fc315
SR
2419
2420 return cnt;
2421}
2422
5e2336a0 2423static const struct file_operations ftrace_pid_fops = {
df4fc315
SR
2424 .read = ftrace_pid_read,
2425 .write = ftrace_pid_write,
2426};
2427
2428static __init int ftrace_init_debugfs(void)
2429{
2430 struct dentry *d_tracer;
2431 struct dentry *entry;
2432
2433 d_tracer = tracing_init_dentry();
2434 if (!d_tracer)
2435 return 0;
2436
2437 ftrace_init_dyn_debugfs(d_tracer);
2438
2439 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2440 NULL, &ftrace_pid_fops);
2441 if (!entry)
2442 pr_warning("Could not create debugfs "
2443 "'set_ftrace_pid' entry\n");
2444 return 0;
2445}
df4fc315
SR
2446fs_initcall(ftrace_init_debugfs);
2447
a2bb6a3d 2448/**
81adbdc0 2449 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2450 *
2451 * This function should be used by panic code. It stops ftrace
2452 * but in a not so nice way. If you need to simply kill ftrace
2453 * from a non-atomic section, use ftrace_kill.
2454 */
81adbdc0 2455void ftrace_kill(void)
a2bb6a3d
SR
2456{
2457 ftrace_disabled = 1;
2458 ftrace_enabled = 0;
a2bb6a3d
SR
2459 clear_ftrace_function();
2460}
2461
16444a8a 2462/**
3d083395
SR
2463 * register_ftrace_function - register a function for profiling
2464 * @ops - ops structure that holds the function for profiling.
16444a8a 2465 *
3d083395
SR
2466 * Register a function to be called by all functions in the
2467 * kernel.
2468 *
2469 * Note: @ops->func and all the functions it calls must be labeled
2470 * with "notrace", otherwise it will go into a
2471 * recursive loop.
16444a8a 2472 */
3d083395 2473int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2474{
b0fc494f
SR
2475 int ret;
2476
4eebcc81
SR
2477 if (unlikely(ftrace_disabled))
2478 return -1;
2479
e6ea44e9 2480 mutex_lock(&ftrace_lock);
e7d3737e 2481
b0fc494f 2482 ret = __register_ftrace_function(ops);
5a45cfe1 2483 ftrace_startup(0);
b0fc494f 2484
e6ea44e9 2485 mutex_unlock(&ftrace_lock);
b0fc494f 2486 return ret;
3d083395
SR
2487}
2488
2489/**
32632920 2490 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2491 * @ops - ops structure that holds the function to unregister
2492 *
2493 * Unregister a function that was added to be called by ftrace profiling.
2494 */
2495int unregister_ftrace_function(struct ftrace_ops *ops)
2496{
2497 int ret;
2498
e6ea44e9 2499 mutex_lock(&ftrace_lock);
3d083395 2500 ret = __unregister_ftrace_function(ops);
5a45cfe1 2501 ftrace_shutdown(0);
e6ea44e9 2502 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2503
2504 return ret;
2505}
2506
e309b41d 2507int
b0fc494f 2508ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2509 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2510 loff_t *ppos)
2511{
2512 int ret;
2513
4eebcc81
SR
2514 if (unlikely(ftrace_disabled))
2515 return -ENODEV;
2516
e6ea44e9 2517 mutex_lock(&ftrace_lock);
b0fc494f 2518
5072c59f 2519 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2520
2521 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2522 goto out;
2523
2524 last_ftrace_enabled = ftrace_enabled;
2525
2526 if (ftrace_enabled) {
2527
2528 ftrace_startup_sysctl();
2529
2530 /* we are starting ftrace again */
2531 if (ftrace_list != &ftrace_list_end) {
2532 if (ftrace_list->next == &ftrace_list_end)
2533 ftrace_trace_function = ftrace_list->func;
2534 else
2535 ftrace_trace_function = ftrace_list_func;
2536 }
2537
2538 } else {
2539 /* stopping ftrace calls (just send to ftrace_stub) */
2540 ftrace_trace_function = ftrace_stub;
2541
2542 ftrace_shutdown_sysctl();
2543 }
2544
2545 out:
e6ea44e9 2546 mutex_unlock(&ftrace_lock);
3d083395 2547 return ret;
16444a8a 2548}
f17845e5 2549
fb52607a 2550#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2551
287b6e68 2552static atomic_t ftrace_graph_active;
4a2b8dda 2553static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2554
e49dc19c
SR
2555int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2556{
2557 return 0;
2558}
2559
287b6e68
FW
2560/* The callbacks that hook a function */
2561trace_func_graph_ret_t ftrace_graph_return =
2562 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2563trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2564
2565/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2566static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2567{
2568 int i;
2569 int ret = 0;
2570 unsigned long flags;
2571 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2572 struct task_struct *g, *t;
2573
2574 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2575 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2576 * sizeof(struct ftrace_ret_stack),
2577 GFP_KERNEL);
2578 if (!ret_stack_list[i]) {
2579 start = 0;
2580 end = i;
2581 ret = -ENOMEM;
2582 goto free;
2583 }
2584 }
2585
2586 read_lock_irqsave(&tasklist_lock, flags);
2587 do_each_thread(g, t) {
2588 if (start == end) {
2589 ret = -EAGAIN;
2590 goto unlock;
2591 }
2592
2593 if (t->ret_stack == NULL) {
f201ae23 2594 t->curr_ret_stack = -1;
48d68b20
FW
2595 /* Make sure IRQs see the -1 first: */
2596 barrier();
2597 t->ret_stack = ret_stack_list[start++];
380c4b14 2598 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2599 atomic_set(&t->trace_overrun, 0);
2600 }
2601 } while_each_thread(g, t);
2602
2603unlock:
2604 read_unlock_irqrestore(&tasklist_lock, flags);
2605free:
2606 for (i = start; i < end; i++)
2607 kfree(ret_stack_list[i]);
2608 return ret;
2609}
2610
2611/* Allocate a return stack for each task */
fb52607a 2612static int start_graph_tracing(void)
f201ae23
FW
2613{
2614 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 2615 int ret, cpu;
f201ae23
FW
2616
2617 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2618 sizeof(struct ftrace_ret_stack *),
2619 GFP_KERNEL);
2620
2621 if (!ret_stack_list)
2622 return -ENOMEM;
2623
5b058bcd
FW
2624 /* The cpu_boot init_task->ret_stack will never be freed */
2625 for_each_online_cpu(cpu)
2626 ftrace_graph_init_task(idle_task(cpu));
2627
f201ae23
FW
2628 do {
2629 ret = alloc_retstack_tasklist(ret_stack_list);
2630 } while (ret == -EAGAIN);
2631
2632 kfree(ret_stack_list);
2633 return ret;
2634}
2635
4a2b8dda
FW
2636/*
2637 * Hibernation protection.
2638 * The state of the current task is too much unstable during
2639 * suspend/restore to disk. We want to protect against that.
2640 */
2641static int
2642ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2643 void *unused)
2644{
2645 switch (state) {
2646 case PM_HIBERNATION_PREPARE:
2647 pause_graph_tracing();
2648 break;
2649
2650 case PM_POST_HIBERNATION:
2651 unpause_graph_tracing();
2652 break;
2653 }
2654 return NOTIFY_DONE;
2655}
2656
287b6e68
FW
2657int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2658 trace_func_graph_ent_t entryfunc)
15e6cb36 2659{
e7d3737e
FW
2660 int ret = 0;
2661
e6ea44e9 2662 mutex_lock(&ftrace_lock);
e7d3737e 2663
4a2b8dda
FW
2664 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2665 register_pm_notifier(&ftrace_suspend_notifier);
2666
287b6e68 2667 atomic_inc(&ftrace_graph_active);
fb52607a 2668 ret = start_graph_tracing();
f201ae23 2669 if (ret) {
287b6e68 2670 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2671 goto out;
2672 }
e53a6319 2673
287b6e68
FW
2674 ftrace_graph_return = retfunc;
2675 ftrace_graph_entry = entryfunc;
e53a6319 2676
5a45cfe1 2677 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2678
2679out:
e6ea44e9 2680 mutex_unlock(&ftrace_lock);
e7d3737e 2681 return ret;
15e6cb36
FW
2682}
2683
fb52607a 2684void unregister_ftrace_graph(void)
15e6cb36 2685{
e6ea44e9 2686 mutex_lock(&ftrace_lock);
e7d3737e 2687
287b6e68
FW
2688 atomic_dec(&ftrace_graph_active);
2689 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2690 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2691 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 2692 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 2693
e6ea44e9 2694 mutex_unlock(&ftrace_lock);
15e6cb36 2695}
f201ae23
FW
2696
2697/* Allocate a return stack for newly created task */
fb52607a 2698void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2699{
287b6e68 2700 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2701 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2702 * sizeof(struct ftrace_ret_stack),
2703 GFP_KERNEL);
2704 if (!t->ret_stack)
2705 return;
2706 t->curr_ret_stack = -1;
380c4b14 2707 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2708 atomic_set(&t->trace_overrun, 0);
2709 } else
2710 t->ret_stack = NULL;
2711}
2712
fb52607a 2713void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2714{
eae849ca
FW
2715 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2716
f201ae23 2717 t->ret_stack = NULL;
eae849ca
FW
2718 /* NULL must become visible to IRQs before we free it: */
2719 barrier();
2720
2721 kfree(ret_stack);
f201ae23 2722}
14a866c5
SR
2723
2724void ftrace_graph_stop(void)
2725{
2726 ftrace_stop();
2727}
15e6cb36
FW
2728#endif
2729