]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/ftrace.c
ftrace: convert ftrace_lock from a spinlock to mutex
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395
SR
29#include <linux/list.h>
30
395a59d0
AS
31#include <asm/ftrace.h>
32
3d083395 33#include "trace.h"
16444a8a 34
6912896e
SR
35#define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41#define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
4eebcc81
SR
47/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly;
d61f82d0 49static int last_ftrace_enabled;
b0fc494f 50
0ef8cde5 51/* set when tracing only a pid */
978f3a45 52struct pid *ftrace_pid_trace;
21bbecda 53static struct pid * const ftrace_swapper_pid = &init_struct_pid;
df4fc315 54
60a7ecf4
SR
55/* Quick disabling of function tracer. */
56int function_trace_stop;
57
4eebcc81
SR
58/*
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
61 */
62static int ftrace_disabled __read_mostly;
63
52baf119 64static DEFINE_MUTEX(ftrace_lock);
b0fc494f 65static DEFINE_MUTEX(ftrace_sysctl_lock);
df4fc315 66static DEFINE_MUTEX(ftrace_start_lock);
b0fc494f 67
16444a8a
ACM
68static struct ftrace_ops ftrace_list_end __read_mostly =
69{
70 .func = ftrace_stub,
71};
72
73static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 75ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 76ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 77
f2252935 78static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
79{
80 struct ftrace_ops *op = ftrace_list;
81
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
84
85 while (op != &ftrace_list_end) {
86 /* silly alpha */
87 read_barrier_depends();
88 op->func(ip, parent_ip);
89 op = op->next;
90 };
91}
92
df4fc315
SR
93static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94{
0ef8cde5 95 if (!test_tsk_trace_trace(current))
df4fc315
SR
96 return;
97
98 ftrace_pid_function(ip, parent_ip);
99}
100
101static void set_ftrace_pid_function(ftrace_func_t func)
102{
103 /* do not set ftrace_pid_function to itself! */
104 if (func != ftrace_pid_func)
105 ftrace_pid_function = func;
106}
107
16444a8a 108/**
3d083395 109 * clear_ftrace_function - reset the ftrace function
16444a8a 110 *
3d083395
SR
111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
16444a8a 113 */
3d083395 114void clear_ftrace_function(void)
16444a8a 115{
3d083395 116 ftrace_trace_function = ftrace_stub;
60a7ecf4 117 __ftrace_trace_function = ftrace_stub;
df4fc315 118 ftrace_pid_function = ftrace_stub;
3d083395
SR
119}
120
60a7ecf4
SR
121#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122/*
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
125 */
126static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127{
128 if (function_trace_stop)
129 return;
130
131 __ftrace_trace_function(ip, parent_ip);
132}
133#endif
134
e309b41d 135static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 136{
52baf119 137 mutex_lock(&ftrace_lock);
16444a8a 138
16444a8a
ACM
139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
3d083395 148
b0fc494f 149 if (ftrace_enabled) {
df4fc315
SR
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
978f3a45 157 if (ftrace_pid_trace) {
df4fc315
SR
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
b0fc494f
SR
162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
60a7ecf4 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 167 ftrace_trace_function = func;
60a7ecf4 168#else
df4fc315 169 __ftrace_trace_function = func;
60a7ecf4
SR
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
b0fc494f 172 }
3d083395 173
52baf119 174 mutex_unlock(&ftrace_lock);
16444a8a
ACM
175
176 return 0;
177}
178
e309b41d 179static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 180{
16444a8a
ACM
181 struct ftrace_ops **p;
182 int ret = 0;
183
52baf119 184 mutex_lock(&ftrace_lock);
16444a8a
ACM
185
186 /*
3d083395
SR
187 * If we are removing the last function, then simply point
188 * to the ftrace_stub.
16444a8a
ACM
189 */
190 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
191 ftrace_trace_function = ftrace_stub;
192 ftrace_list = &ftrace_list_end;
193 goto out;
194 }
195
196 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
197 if (*p == ops)
198 break;
199
200 if (*p != ops) {
201 ret = -1;
202 goto out;
203 }
204
205 *p = (*p)->next;
206
b0fc494f
SR
207 if (ftrace_enabled) {
208 /* If we only have one func left, then call that directly */
df4fc315
SR
209 if (ftrace_list->next == &ftrace_list_end) {
210 ftrace_func_t func = ftrace_list->func;
211
978f3a45 212 if (ftrace_pid_trace) {
df4fc315
SR
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
215 }
216#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
218#else
219 __ftrace_trace_function = func;
220#endif
221 }
b0fc494f 222 }
16444a8a
ACM
223
224 out:
52baf119 225 mutex_unlock(&ftrace_lock);
3d083395
SR
226
227 return ret;
228}
229
df4fc315
SR
230static void ftrace_update_pid_func(void)
231{
232 ftrace_func_t func;
233
52baf119 234 mutex_lock(&ftrace_lock);
df4fc315
SR
235
236 if (ftrace_trace_function == ftrace_stub)
237 goto out;
238
239 func = ftrace_trace_function;
240
978f3a45 241 if (ftrace_pid_trace) {
df4fc315
SR
242 set_ftrace_pid_function(func);
243 func = ftrace_pid_func;
244 } else {
66eafebc
LW
245 if (func == ftrace_pid_func)
246 func = ftrace_pid_function;
df4fc315
SR
247 }
248
249#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
250 ftrace_trace_function = func;
251#else
252 __ftrace_trace_function = func;
253#endif
254
255 out:
52baf119 256 mutex_unlock(&ftrace_lock);
df4fc315
SR
257}
258
3d083395 259#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 260#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 261# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
262#endif
263
d61f82d0
SR
264enum {
265 FTRACE_ENABLE_CALLS = (1 << 0),
266 FTRACE_DISABLE_CALLS = (1 << 1),
267 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
268 FTRACE_ENABLE_MCOUNT = (1 << 3),
269 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
270 FTRACE_START_FUNC_RET = (1 << 5),
271 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
272};
273
5072c59f
SR
274static int ftrace_filtered;
275
08f5ac90 276static LIST_HEAD(ftrace_new_addrs);
3d083395 277
41c52c0d 278static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 279
3c1720f0
SR
280struct ftrace_page {
281 struct ftrace_page *next;
431aa3fb 282 int index;
3c1720f0 283 struct dyn_ftrace records[];
aa5e5cea 284};
3c1720f0
SR
285
286#define ENTRIES_PER_PAGE \
287 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
288
289/* estimate from running different kernels */
290#define NR_TO_INIT 10000
291
292static struct ftrace_page *ftrace_pages_start;
293static struct ftrace_page *ftrace_pages;
294
37ad5084
SR
295static struct dyn_ftrace *ftrace_free_records;
296
265c831c
SR
297/*
298 * This is a double for. Do not use 'break' to break out of the loop,
299 * you must use a goto.
300 */
301#define do_for_each_ftrace_rec(pg, rec) \
302 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
303 int _____i; \
304 for (_____i = 0; _____i < pg->index; _____i++) { \
305 rec = &pg->records[_____i];
306
307#define while_for_each_ftrace_rec() \
308 } \
309 }
ecea656d
AS
310
311#ifdef CONFIG_KPROBES
f17845e5
IM
312
313static int frozen_record_count;
314
ecea656d
AS
315static inline void freeze_record(struct dyn_ftrace *rec)
316{
317 if (!(rec->flags & FTRACE_FL_FROZEN)) {
318 rec->flags |= FTRACE_FL_FROZEN;
319 frozen_record_count++;
320 }
321}
322
323static inline void unfreeze_record(struct dyn_ftrace *rec)
324{
325 if (rec->flags & FTRACE_FL_FROZEN) {
326 rec->flags &= ~FTRACE_FL_FROZEN;
327 frozen_record_count--;
328 }
329}
330
331static inline int record_frozen(struct dyn_ftrace *rec)
332{
333 return rec->flags & FTRACE_FL_FROZEN;
334}
335#else
336# define freeze_record(rec) ({ 0; })
337# define unfreeze_record(rec) ({ 0; })
338# define record_frozen(rec) ({ 0; })
339#endif /* CONFIG_KPROBES */
340
e309b41d 341static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 342{
37ad5084
SR
343 rec->ip = (unsigned long)ftrace_free_records;
344 ftrace_free_records = rec;
345 rec->flags |= FTRACE_FL_FREE;
346}
347
fed1939c
SR
348void ftrace_release(void *start, unsigned long size)
349{
350 struct dyn_ftrace *rec;
351 struct ftrace_page *pg;
352 unsigned long s = (unsigned long)start;
353 unsigned long e = s + size;
fed1939c 354
00fd61ae 355 if (ftrace_disabled || !start)
fed1939c
SR
356 return;
357
52baf119 358 mutex_lock(&ftrace_lock);
265c831c
SR
359 do_for_each_ftrace_rec(pg, rec) {
360 if ((rec->ip >= s) && (rec->ip < e))
361 ftrace_free_rec(rec);
362 } while_for_each_ftrace_rec();
52baf119 363 mutex_unlock(&ftrace_lock);
fed1939c
SR
364}
365
e309b41d 366static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 367{
37ad5084
SR
368 struct dyn_ftrace *rec;
369
370 /* First check for freed records */
371 if (ftrace_free_records) {
372 rec = ftrace_free_records;
373
37ad5084 374 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 375 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
376 ftrace_free_records = NULL;
377 return NULL;
378 }
379
380 ftrace_free_records = (void *)rec->ip;
381 memset(rec, 0, sizeof(*rec));
382 return rec;
383 }
384
3c1720f0 385 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
386 if (!ftrace_pages->next) {
387 /* allocate another page */
388 ftrace_pages->next =
389 (void *)get_zeroed_page(GFP_KERNEL);
390 if (!ftrace_pages->next)
391 return NULL;
392 }
3c1720f0
SR
393 ftrace_pages = ftrace_pages->next;
394 }
395
396 return &ftrace_pages->records[ftrace_pages->index++];
397}
398
08f5ac90 399static struct dyn_ftrace *
d61f82d0 400ftrace_record_ip(unsigned long ip)
3d083395 401{
08f5ac90 402 struct dyn_ftrace *rec;
3d083395 403
f3c7ac40 404 if (ftrace_disabled)
08f5ac90 405 return NULL;
3d083395 406
08f5ac90
SR
407 rec = ftrace_alloc_dyn_node(ip);
408 if (!rec)
409 return NULL;
3d083395 410
08f5ac90 411 rec->ip = ip;
3d083395 412
08f5ac90 413 list_add(&rec->list, &ftrace_new_addrs);
3d083395 414
08f5ac90 415 return rec;
3d083395
SR
416}
417
b17e8a37
SR
418static void print_ip_ins(const char *fmt, unsigned char *p)
419{
420 int i;
421
422 printk(KERN_CONT "%s", fmt);
423
424 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
426}
427
31e88909 428static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
429{
430 switch (failed) {
431 case -EFAULT:
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
434 print_ip_sym(ip);
435 break;
436 case -EINVAL:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
439 print_ip_sym(ip);
b17e8a37 440 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
441 printk(KERN_CONT "\n");
442 break;
443 case -EPERM:
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
446 print_ip_sym(ip);
447 break;
448 default:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
451 print_ip_sym(ip);
452 }
453}
454
3c1720f0 455
0eb96701 456static int
31e88909 457__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 458{
41c52c0d 459 unsigned long ip, fl;
e7d3737e
FW
460 unsigned long ftrace_addr;
461
f0001207 462 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
463
464 ip = rec->ip;
465
982c350b
SR
466 /*
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
469 *
470 * If this record is not to be traced and
57794a9d 471 * it is enabled then disable it.
982c350b
SR
472 *
473 */
474 if (rec->flags & FTRACE_FL_NOTRACE) {
475 if (rec->flags & FTRACE_FL_ENABLED)
476 rec->flags &= ~FTRACE_FL_ENABLED;
477 else
478 return 0;
479
480 } else if (ftrace_filtered && enable) {
5072c59f 481 /*
982c350b 482 * Filtering is on:
5072c59f 483 */
a4500b84 484
982c350b 485 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 486
982c350b
SR
487 /* Record is filtered and enabled, do nothing */
488 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 489 return 0;
5072c59f 490
57794a9d 491 /* Record is not filtered or enabled, do nothing */
982c350b
SR
492 if (!fl)
493 return 0;
494
495 /* Record is not filtered but enabled, disable it */
496 if (fl == FTRACE_FL_ENABLED)
5072c59f 497 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
498 else
499 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 500 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 501 } else {
982c350b 502 /* Disable or not filtered */
5072c59f 503
41c52c0d 504 if (enable) {
982c350b 505 /* if record is enabled, do nothing */
5072c59f 506 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 507 return 0;
982c350b 508
5072c59f 509 rec->flags |= FTRACE_FL_ENABLED;
982c350b 510
5072c59f 511 } else {
982c350b 512
57794a9d 513 /* if record is not enabled, do nothing */
5072c59f 514 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 515 return 0;
982c350b 516
5072c59f
SR
517 rec->flags &= ~FTRACE_FL_ENABLED;
518 }
519 }
520
982c350b 521 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 522 return ftrace_make_call(rec, ftrace_addr);
31e88909 523 else
e7d3737e 524 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
525}
526
e309b41d 527static void ftrace_replace_code(int enable)
3c1720f0 528{
265c831c 529 int failed;
3c1720f0
SR
530 struct dyn_ftrace *rec;
531 struct ftrace_page *pg;
3c1720f0 532
265c831c
SR
533 do_for_each_ftrace_rec(pg, rec) {
534 /*
535 * Skip over free records and records that have
536 * failed.
537 */
538 if (rec->flags & FTRACE_FL_FREE ||
539 rec->flags & FTRACE_FL_FAILED)
540 continue;
541
542 /* ignore updates to this record's mcount site */
543 if (get_kprobe((void *)rec->ip)) {
544 freeze_record(rec);
545 continue;
546 } else {
547 unfreeze_record(rec);
548 }
f22f9a89 549
265c831c
SR
550 failed = __ftrace_replace_code(rec, enable);
551 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
552 rec->flags |= FTRACE_FL_FAILED;
553 if ((system_state == SYSTEM_BOOTING) ||
554 !core_kernel_text(rec->ip)) {
555 ftrace_free_rec(rec);
556 } else
557 ftrace_bug(failed, rec->ip);
3c1720f0 558 }
265c831c 559 } while_for_each_ftrace_rec();
3c1720f0
SR
560}
561
492a7ea5 562static int
31e88909 563ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
564{
565 unsigned long ip;
593eb8a2 566 int ret;
3c1720f0
SR
567
568 ip = rec->ip;
569
25aac9dc 570 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 571 if (ret) {
31e88909 572 ftrace_bug(ret, ip);
3c1720f0 573 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 574 return 0;
37ad5084 575 }
492a7ea5 576 return 1;
3c1720f0
SR
577}
578
e309b41d 579static int __ftrace_modify_code(void *data)
3d083395 580{
d61f82d0
SR
581 int *command = data;
582
a3583244 583 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 584 ftrace_replace_code(1);
a3583244 585 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
586 ftrace_replace_code(0);
587
588 if (*command & FTRACE_UPDATE_TRACE_FUNC)
589 ftrace_update_ftrace_func(ftrace_trace_function);
590
5a45cfe1
SR
591 if (*command & FTRACE_START_FUNC_RET)
592 ftrace_enable_ftrace_graph_caller();
593 else if (*command & FTRACE_STOP_FUNC_RET)
594 ftrace_disable_ftrace_graph_caller();
595
d61f82d0 596 return 0;
3d083395
SR
597}
598
e309b41d 599static void ftrace_run_update_code(int command)
3d083395 600{
784e2d76 601 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
602}
603
d61f82d0 604static ftrace_func_t saved_ftrace_func;
60a7ecf4 605static int ftrace_start_up;
df4fc315
SR
606
607static void ftrace_startup_enable(int command)
608{
609 if (saved_ftrace_func != ftrace_trace_function) {
610 saved_ftrace_func = ftrace_trace_function;
611 command |= FTRACE_UPDATE_TRACE_FUNC;
612 }
613
614 if (!command || !ftrace_enabled)
615 return;
616
617 ftrace_run_update_code(command);
618}
d61f82d0 619
5a45cfe1 620static void ftrace_startup(int command)
3d083395 621{
4eebcc81
SR
622 if (unlikely(ftrace_disabled))
623 return;
624
cb7be3b2 625 mutex_lock(&ftrace_start_lock);
60a7ecf4 626 ftrace_start_up++;
982c350b 627 command |= FTRACE_ENABLE_CALLS;
d61f82d0 628
df4fc315 629 ftrace_startup_enable(command);
3d083395 630
cb7be3b2 631 mutex_unlock(&ftrace_start_lock);
3d083395
SR
632}
633
5a45cfe1 634static void ftrace_shutdown(int command)
3d083395 635{
4eebcc81
SR
636 if (unlikely(ftrace_disabled))
637 return;
638
cb7be3b2 639 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
640 ftrace_start_up--;
641 if (!ftrace_start_up)
d61f82d0 642 command |= FTRACE_DISABLE_CALLS;
3d083395 643
d61f82d0
SR
644 if (saved_ftrace_func != ftrace_trace_function) {
645 saved_ftrace_func = ftrace_trace_function;
646 command |= FTRACE_UPDATE_TRACE_FUNC;
647 }
3d083395 648
d61f82d0
SR
649 if (!command || !ftrace_enabled)
650 goto out;
651
652 ftrace_run_update_code(command);
3d083395 653 out:
cb7be3b2 654 mutex_unlock(&ftrace_start_lock);
3d083395
SR
655}
656
e309b41d 657static void ftrace_startup_sysctl(void)
b0fc494f 658{
d61f82d0
SR
659 int command = FTRACE_ENABLE_MCOUNT;
660
4eebcc81
SR
661 if (unlikely(ftrace_disabled))
662 return;
663
cb7be3b2 664 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
665 /* Force update next time */
666 saved_ftrace_func = NULL;
60a7ecf4
SR
667 /* ftrace_start_up is true if we want ftrace running */
668 if (ftrace_start_up)
d61f82d0
SR
669 command |= FTRACE_ENABLE_CALLS;
670
671 ftrace_run_update_code(command);
cb7be3b2 672 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
673}
674
e309b41d 675static void ftrace_shutdown_sysctl(void)
b0fc494f 676{
d61f82d0
SR
677 int command = FTRACE_DISABLE_MCOUNT;
678
4eebcc81
SR
679 if (unlikely(ftrace_disabled))
680 return;
681
cb7be3b2 682 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
683 /* ftrace_start_up is true if ftrace is running */
684 if (ftrace_start_up)
d61f82d0
SR
685 command |= FTRACE_DISABLE_CALLS;
686
687 ftrace_run_update_code(command);
cb7be3b2 688 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
689}
690
3d083395
SR
691static cycle_t ftrace_update_time;
692static unsigned long ftrace_update_cnt;
693unsigned long ftrace_update_tot_cnt;
694
31e88909 695static int ftrace_update_code(struct module *mod)
3d083395 696{
08f5ac90 697 struct dyn_ftrace *p, *t;
f22f9a89 698 cycle_t start, stop;
3d083395 699
750ed1a4 700 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
701 ftrace_update_cnt = 0;
702
08f5ac90 703 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 704
08f5ac90
SR
705 /* If something went wrong, bail without enabling anything */
706 if (unlikely(ftrace_disabled))
707 return -1;
f22f9a89 708
08f5ac90 709 list_del_init(&p->list);
f22f9a89 710
08f5ac90 711 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 712 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
713 p->flags |= FTRACE_FL_CONVERTED;
714 ftrace_update_cnt++;
715 } else
716 ftrace_free_rec(p);
3d083395
SR
717 }
718
750ed1a4 719 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
720 ftrace_update_time = stop - start;
721 ftrace_update_tot_cnt += ftrace_update_cnt;
722
16444a8a
ACM
723 return 0;
724}
725
68bf21aa 726static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
727{
728 struct ftrace_page *pg;
729 int cnt;
730 int i;
3c1720f0
SR
731
732 /* allocate a few pages */
733 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
734 if (!ftrace_pages_start)
735 return -1;
736
737 /*
738 * Allocate a few more pages.
739 *
740 * TODO: have some parser search vmlinux before
741 * final linking to find all calls to ftrace.
742 * Then we can:
743 * a) know how many pages to allocate.
744 * and/or
745 * b) set up the table then.
746 *
747 * The dynamic code is still necessary for
748 * modules.
749 */
750
751 pg = ftrace_pages = ftrace_pages_start;
752
68bf21aa 753 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 754 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 755 num_to_init, cnt + 1);
3c1720f0
SR
756
757 for (i = 0; i < cnt; i++) {
758 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
759
760 /* If we fail, we'll try later anyway */
761 if (!pg->next)
762 break;
763
764 pg = pg->next;
765 }
766
767 return 0;
768}
769
5072c59f
SR
770enum {
771 FTRACE_ITER_FILTER = (1 << 0),
772 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 773 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 774 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 775 FTRACE_ITER_PRINTALL = (1 << 4),
5072c59f
SR
776};
777
778#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
779
780struct ftrace_iterator {
5072c59f 781 struct ftrace_page *pg;
431aa3fb 782 int idx;
5072c59f
SR
783 unsigned flags;
784 unsigned char buffer[FTRACE_BUFF_MAX+1];
785 unsigned buffer_idx;
786 unsigned filtered;
787};
788
e309b41d 789static void *
5072c59f
SR
790t_next(struct seq_file *m, void *v, loff_t *pos)
791{
792 struct ftrace_iterator *iter = m->private;
793 struct dyn_ftrace *rec = NULL;
794
795 (*pos)++;
796
0c75a3ed
SR
797 if (iter->flags & FTRACE_ITER_PRINTALL)
798 return NULL;
799
52baf119 800 mutex_lock(&ftrace_lock);
5072c59f
SR
801 retry:
802 if (iter->idx >= iter->pg->index) {
803 if (iter->pg->next) {
804 iter->pg = iter->pg->next;
805 iter->idx = 0;
806 goto retry;
50cdaf08
LW
807 } else {
808 iter->idx = -1;
5072c59f
SR
809 }
810 } else {
811 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
812 if ((rec->flags & FTRACE_FL_FREE) ||
813
814 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
815 (rec->flags & FTRACE_FL_FAILED)) ||
816
817 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 818 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 819
0183fb1c
SR
820 ((iter->flags & FTRACE_ITER_FILTER) &&
821 !(rec->flags & FTRACE_FL_FILTER)) ||
822
41c52c0d
SR
823 ((iter->flags & FTRACE_ITER_NOTRACE) &&
824 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
825 rec = NULL;
826 goto retry;
827 }
828 }
52baf119 829 mutex_unlock(&ftrace_lock);
5072c59f 830
5072c59f
SR
831 return rec;
832}
833
834static void *t_start(struct seq_file *m, loff_t *pos)
835{
836 struct ftrace_iterator *iter = m->private;
837 void *p = NULL;
5072c59f 838
0c75a3ed
SR
839 /*
840 * For set_ftrace_filter reading, if we have the filter
841 * off, we can short cut and just print out that all
842 * functions are enabled.
843 */
844 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
845 if (*pos > 0)
846 return NULL;
847 iter->flags |= FTRACE_ITER_PRINTALL;
848 (*pos)++;
849 return iter;
850 }
851
50cdaf08
LW
852 if (*pos > 0) {
853 if (iter->idx < 0)
854 return p;
855 (*pos)--;
856 iter->idx--;
857 }
5821e1b7 858
50cdaf08 859 p = t_next(m, p, pos);
5072c59f
SR
860
861 return p;
862}
863
864static void t_stop(struct seq_file *m, void *p)
865{
866}
867
868static int t_show(struct seq_file *m, void *v)
869{
0c75a3ed 870 struct ftrace_iterator *iter = m->private;
5072c59f
SR
871 struct dyn_ftrace *rec = v;
872 char str[KSYM_SYMBOL_LEN];
873
0c75a3ed
SR
874 if (iter->flags & FTRACE_ITER_PRINTALL) {
875 seq_printf(m, "#### all functions enabled ####\n");
876 return 0;
877 }
878
5072c59f
SR
879 if (!rec)
880 return 0;
881
882 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
883
50cdaf08 884 seq_printf(m, "%s\n", str);
5072c59f
SR
885
886 return 0;
887}
888
889static struct seq_operations show_ftrace_seq_ops = {
890 .start = t_start,
891 .next = t_next,
892 .stop = t_stop,
893 .show = t_show,
894};
895
e309b41d 896static int
5072c59f
SR
897ftrace_avail_open(struct inode *inode, struct file *file)
898{
899 struct ftrace_iterator *iter;
900 int ret;
901
4eebcc81
SR
902 if (unlikely(ftrace_disabled))
903 return -ENODEV;
904
5072c59f
SR
905 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
906 if (!iter)
907 return -ENOMEM;
908
909 iter->pg = ftrace_pages_start;
5072c59f
SR
910
911 ret = seq_open(file, &show_ftrace_seq_ops);
912 if (!ret) {
913 struct seq_file *m = file->private_data;
4bf39a94 914
5072c59f 915 m->private = iter;
4bf39a94 916 } else {
5072c59f 917 kfree(iter);
4bf39a94 918 }
5072c59f
SR
919
920 return ret;
921}
922
923int ftrace_avail_release(struct inode *inode, struct file *file)
924{
925 struct seq_file *m = (struct seq_file *)file->private_data;
926 struct ftrace_iterator *iter = m->private;
927
928 seq_release(inode, file);
929 kfree(iter);
4bf39a94 930
5072c59f
SR
931 return 0;
932}
933
eb9a7bf0
AS
934static int
935ftrace_failures_open(struct inode *inode, struct file *file)
936{
937 int ret;
938 struct seq_file *m;
939 struct ftrace_iterator *iter;
940
941 ret = ftrace_avail_open(inode, file);
942 if (!ret) {
943 m = (struct seq_file *)file->private_data;
944 iter = (struct ftrace_iterator *)m->private;
945 iter->flags = FTRACE_ITER_FAILURES;
946 }
947
948 return ret;
949}
950
951
41c52c0d 952static void ftrace_filter_reset(int enable)
5072c59f
SR
953{
954 struct ftrace_page *pg;
955 struct dyn_ftrace *rec;
41c52c0d 956 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 957
52baf119 958 mutex_lock(&ftrace_lock);
41c52c0d
SR
959 if (enable)
960 ftrace_filtered = 0;
265c831c
SR
961 do_for_each_ftrace_rec(pg, rec) {
962 if (rec->flags & FTRACE_FL_FAILED)
963 continue;
964 rec->flags &= ~type;
965 } while_for_each_ftrace_rec();
52baf119 966 mutex_unlock(&ftrace_lock);
5072c59f
SR
967}
968
e309b41d 969static int
41c52c0d 970ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
971{
972 struct ftrace_iterator *iter;
973 int ret = 0;
974
4eebcc81
SR
975 if (unlikely(ftrace_disabled))
976 return -ENODEV;
977
5072c59f
SR
978 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
979 if (!iter)
980 return -ENOMEM;
981
41c52c0d 982 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
983 if ((file->f_mode & FMODE_WRITE) &&
984 !(file->f_flags & O_APPEND))
41c52c0d 985 ftrace_filter_reset(enable);
5072c59f
SR
986
987 if (file->f_mode & FMODE_READ) {
988 iter->pg = ftrace_pages_start;
41c52c0d
SR
989 iter->flags = enable ? FTRACE_ITER_FILTER :
990 FTRACE_ITER_NOTRACE;
5072c59f
SR
991
992 ret = seq_open(file, &show_ftrace_seq_ops);
993 if (!ret) {
994 struct seq_file *m = file->private_data;
995 m->private = iter;
996 } else
997 kfree(iter);
998 } else
999 file->private_data = iter;
41c52c0d 1000 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1001
1002 return ret;
1003}
1004
41c52c0d
SR
1005static int
1006ftrace_filter_open(struct inode *inode, struct file *file)
1007{
1008 return ftrace_regex_open(inode, file, 1);
1009}
1010
1011static int
1012ftrace_notrace_open(struct inode *inode, struct file *file)
1013{
1014 return ftrace_regex_open(inode, file, 0);
1015}
1016
e309b41d 1017static ssize_t
41c52c0d 1018ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1019 size_t cnt, loff_t *ppos)
1020{
1021 if (file->f_mode & FMODE_READ)
1022 return seq_read(file, ubuf, cnt, ppos);
1023 else
1024 return -EPERM;
1025}
1026
e309b41d 1027static loff_t
41c52c0d 1028ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1029{
1030 loff_t ret;
1031
1032 if (file->f_mode & FMODE_READ)
1033 ret = seq_lseek(file, offset, origin);
1034 else
1035 file->f_pos = ret = 1;
1036
1037 return ret;
1038}
1039
1040enum {
1041 MATCH_FULL,
1042 MATCH_FRONT_ONLY,
1043 MATCH_MIDDLE_ONLY,
1044 MATCH_END_ONLY,
1045};
1046
9f4801e3
SR
1047/*
1048 * (static function - no need for kernel doc)
1049 *
1050 * Pass in a buffer containing a glob and this function will
1051 * set search to point to the search part of the buffer and
1052 * return the type of search it is (see enum above).
1053 * This does modify buff.
1054 *
1055 * Returns enum type.
1056 * search returns the pointer to use for comparison.
1057 * not returns 1 if buff started with a '!'
1058 * 0 otherwise.
1059 */
1060static int
64e7c440 1061ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1062{
5072c59f 1063 int type = MATCH_FULL;
9f4801e3 1064 int i;
ea3a6d6d
SR
1065
1066 if (buff[0] == '!') {
9f4801e3 1067 *not = 1;
ea3a6d6d
SR
1068 buff++;
1069 len--;
9f4801e3
SR
1070 } else
1071 *not = 0;
1072
1073 *search = buff;
5072c59f
SR
1074
1075 for (i = 0; i < len; i++) {
1076 if (buff[i] == '*') {
1077 if (!i) {
9f4801e3 1078 *search = buff + 1;
5072c59f 1079 type = MATCH_END_ONLY;
5072c59f 1080 } else {
9f4801e3 1081 if (type == MATCH_END_ONLY)
5072c59f 1082 type = MATCH_MIDDLE_ONLY;
9f4801e3 1083 else
5072c59f 1084 type = MATCH_FRONT_ONLY;
5072c59f
SR
1085 buff[i] = 0;
1086 break;
1087 }
1088 }
1089 }
1090
9f4801e3
SR
1091 return type;
1092}
1093
64e7c440 1094static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1095{
9f4801e3
SR
1096 int matched = 0;
1097 char *ptr;
1098
9f4801e3
SR
1099 switch (type) {
1100 case MATCH_FULL:
1101 if (strcmp(str, regex) == 0)
1102 matched = 1;
1103 break;
1104 case MATCH_FRONT_ONLY:
1105 if (strncmp(str, regex, len) == 0)
1106 matched = 1;
1107 break;
1108 case MATCH_MIDDLE_ONLY:
1109 if (strstr(str, regex))
1110 matched = 1;
1111 break;
1112 case MATCH_END_ONLY:
1113 ptr = strstr(str, regex);
1114 if (ptr && (ptr[len] == 0))
1115 matched = 1;
1116 break;
1117 }
1118
1119 return matched;
1120}
1121
64e7c440
SR
1122static int
1123ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1124{
1125 char str[KSYM_SYMBOL_LEN];
1126
1127 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1128 return ftrace_match(str, regex, len, type);
1129}
1130
9f4801e3
SR
1131static void ftrace_match_records(char *buff, int len, int enable)
1132{
1133 char *search;
1134 struct ftrace_page *pg;
1135 struct dyn_ftrace *rec;
1136 int type;
1137 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1138 unsigned search_len;
1139 int not;
1140
1141 type = ftrace_setup_glob(buff, len, &search, &not);
1142
1143 search_len = strlen(search);
1144
52baf119 1145 mutex_lock(&ftrace_lock);
265c831c 1146 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1147
1148 if (rec->flags & FTRACE_FL_FAILED)
1149 continue;
9f4801e3
SR
1150
1151 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1152 if (not)
1153 rec->flags &= ~flag;
1154 else
1155 rec->flags |= flag;
1156 }
e68746a2
SR
1157 /*
1158 * Only enable filtering if we have a function that
1159 * is filtered on.
1160 */
1161 if (enable && (rec->flags & FTRACE_FL_FILTER))
1162 ftrace_filtered = 1;
265c831c 1163 } while_for_each_ftrace_rec();
52baf119 1164 mutex_unlock(&ftrace_lock);
5072c59f
SR
1165}
1166
64e7c440
SR
1167static int
1168ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1169 char *regex, int len, int type)
1170{
1171 char str[KSYM_SYMBOL_LEN];
1172 char *modname;
1173
1174 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1175
1176 if (!modname || strcmp(modname, mod))
1177 return 0;
1178
1179 /* blank search means to match all funcs in the mod */
1180 if (len)
1181 return ftrace_match(str, regex, len, type);
1182 else
1183 return 1;
1184}
1185
1186static void ftrace_match_module_records(char *buff, char *mod, int enable)
1187{
1188 char *search = buff;
1189 struct ftrace_page *pg;
1190 struct dyn_ftrace *rec;
1191 int type = MATCH_FULL;
1192 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1193 unsigned search_len = 0;
1194 int not = 0;
1195
1196 /* blank or '*' mean the same */
1197 if (strcmp(buff, "*") == 0)
1198 buff[0] = 0;
1199
1200 /* handle the case of 'dont filter this module' */
1201 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1202 buff[0] = 0;
1203 not = 1;
1204 }
1205
1206 if (strlen(buff)) {
1207 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1208 search_len = strlen(search);
1209 }
1210
52baf119 1211 mutex_lock(&ftrace_lock);
64e7c440
SR
1212 do_for_each_ftrace_rec(pg, rec) {
1213
1214 if (rec->flags & FTRACE_FL_FAILED)
1215 continue;
1216
1217 if (ftrace_match_module_record(rec, mod,
1218 search, search_len, type)) {
1219 if (not)
1220 rec->flags &= ~flag;
1221 else
1222 rec->flags |= flag;
1223 }
e68746a2
SR
1224 if (enable && (rec->flags & FTRACE_FL_FILTER))
1225 ftrace_filtered = 1;
64e7c440
SR
1226
1227 } while_for_each_ftrace_rec();
52baf119 1228 mutex_unlock(&ftrace_lock);
64e7c440
SR
1229}
1230
f6180773
SR
1231/*
1232 * We register the module command as a template to show others how
1233 * to register the a command as well.
1234 */
1235
1236static int
1237ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1238{
1239 char *mod;
1240
1241 /*
1242 * cmd == 'mod' because we only registered this func
1243 * for the 'mod' ftrace_func_command.
1244 * But if you register one func with multiple commands,
1245 * you can tell which command was used by the cmd
1246 * parameter.
1247 */
1248
1249 /* we must have a module name */
1250 if (!param)
1251 return -EINVAL;
1252
1253 mod = strsep(&param, ":");
1254 if (!strlen(mod))
1255 return -EINVAL;
1256
1257 ftrace_match_module_records(func, mod, enable);
1258 return 0;
1259}
1260
1261static struct ftrace_func_command ftrace_mod_cmd = {
1262 .name = "mod",
1263 .func = ftrace_mod_callback,
1264};
1265
1266static int __init ftrace_mod_cmd_init(void)
1267{
1268 return register_ftrace_command(&ftrace_mod_cmd);
1269}
1270device_initcall(ftrace_mod_cmd_init);
1271
1272static LIST_HEAD(ftrace_commands);
1273static DEFINE_MUTEX(ftrace_cmd_mutex);
1274
1275int register_ftrace_command(struct ftrace_func_command *cmd)
1276{
1277 struct ftrace_func_command *p;
1278 int ret = 0;
1279
1280 mutex_lock(&ftrace_cmd_mutex);
1281 list_for_each_entry(p, &ftrace_commands, list) {
1282 if (strcmp(cmd->name, p->name) == 0) {
1283 ret = -EBUSY;
1284 goto out_unlock;
1285 }
1286 }
1287 list_add(&cmd->list, &ftrace_commands);
1288 out_unlock:
1289 mutex_unlock(&ftrace_cmd_mutex);
1290
1291 return ret;
1292}
1293
1294int unregister_ftrace_command(struct ftrace_func_command *cmd)
1295{
1296 struct ftrace_func_command *p, *n;
1297 int ret = -ENODEV;
1298
1299 mutex_lock(&ftrace_cmd_mutex);
1300 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1301 if (strcmp(cmd->name, p->name) == 0) {
1302 ret = 0;
1303 list_del_init(&p->list);
1304 goto out_unlock;
1305 }
1306 }
1307 out_unlock:
1308 mutex_unlock(&ftrace_cmd_mutex);
1309
1310 return ret;
1311}
1312
64e7c440
SR
1313static int ftrace_process_regex(char *buff, int len, int enable)
1314{
f6180773
SR
1315 struct ftrace_func_command *p;
1316 char *func, *command, *next = buff;
1317 int ret = -EINVAL;
64e7c440
SR
1318
1319 func = strsep(&next, ":");
1320
1321 if (!next) {
1322 ftrace_match_records(func, len, enable);
1323 return 0;
1324 }
1325
f6180773 1326 /* command found */
64e7c440
SR
1327
1328 command = strsep(&next, ":");
1329
f6180773
SR
1330 mutex_lock(&ftrace_cmd_mutex);
1331 list_for_each_entry(p, &ftrace_commands, list) {
1332 if (strcmp(p->name, command) == 0) {
1333 ret = p->func(func, command, next, enable);
1334 goto out_unlock;
1335 }
64e7c440 1336 }
f6180773
SR
1337 out_unlock:
1338 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 1339
f6180773 1340 return ret;
64e7c440
SR
1341}
1342
e309b41d 1343static ssize_t
41c52c0d
SR
1344ftrace_regex_write(struct file *file, const char __user *ubuf,
1345 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1346{
1347 struct ftrace_iterator *iter;
1348 char ch;
1349 size_t read = 0;
1350 ssize_t ret;
1351
1352 if (!cnt || cnt < 0)
1353 return 0;
1354
41c52c0d 1355 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1356
1357 if (file->f_mode & FMODE_READ) {
1358 struct seq_file *m = file->private_data;
1359 iter = m->private;
1360 } else
1361 iter = file->private_data;
1362
1363 if (!*ppos) {
1364 iter->flags &= ~FTRACE_ITER_CONT;
1365 iter->buffer_idx = 0;
1366 }
1367
1368 ret = get_user(ch, ubuf++);
1369 if (ret)
1370 goto out;
1371 read++;
1372 cnt--;
1373
1374 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1375 /* skip white space */
1376 while (cnt && isspace(ch)) {
1377 ret = get_user(ch, ubuf++);
1378 if (ret)
1379 goto out;
1380 read++;
1381 cnt--;
1382 }
1383
5072c59f
SR
1384 if (isspace(ch)) {
1385 file->f_pos += read;
1386 ret = read;
1387 goto out;
1388 }
1389
1390 iter->buffer_idx = 0;
1391 }
1392
1393 while (cnt && !isspace(ch)) {
1394 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1395 iter->buffer[iter->buffer_idx++] = ch;
1396 else {
1397 ret = -EINVAL;
1398 goto out;
1399 }
1400 ret = get_user(ch, ubuf++);
1401 if (ret)
1402 goto out;
1403 read++;
1404 cnt--;
1405 }
1406
1407 if (isspace(ch)) {
1408 iter->filtered++;
1409 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
1410 ret = ftrace_process_regex(iter->buffer,
1411 iter->buffer_idx, enable);
1412 if (ret)
1413 goto out;
5072c59f
SR
1414 iter->buffer_idx = 0;
1415 } else
1416 iter->flags |= FTRACE_ITER_CONT;
1417
1418
1419 file->f_pos += read;
1420
1421 ret = read;
1422 out:
41c52c0d 1423 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1424
1425 return ret;
1426}
1427
41c52c0d
SR
1428static ssize_t
1429ftrace_filter_write(struct file *file, const char __user *ubuf,
1430 size_t cnt, loff_t *ppos)
1431{
1432 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1433}
1434
1435static ssize_t
1436ftrace_notrace_write(struct file *file, const char __user *ubuf,
1437 size_t cnt, loff_t *ppos)
1438{
1439 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1440}
1441
1442static void
1443ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1444{
1445 if (unlikely(ftrace_disabled))
1446 return;
1447
1448 mutex_lock(&ftrace_regex_lock);
1449 if (reset)
1450 ftrace_filter_reset(enable);
1451 if (buf)
7f24b31b 1452 ftrace_match_records(buf, len, enable);
41c52c0d
SR
1453 mutex_unlock(&ftrace_regex_lock);
1454}
1455
77a2b37d
SR
1456/**
1457 * ftrace_set_filter - set a function to filter on in ftrace
1458 * @buf - the string that holds the function filter text.
1459 * @len - the length of the string.
1460 * @reset - non zero to reset all filters before applying this filter.
1461 *
1462 * Filters denote which functions should be enabled when tracing is enabled.
1463 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1464 */
e309b41d 1465void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1466{
41c52c0d
SR
1467 ftrace_set_regex(buf, len, reset, 1);
1468}
4eebcc81 1469
41c52c0d
SR
1470/**
1471 * ftrace_set_notrace - set a function to not trace in ftrace
1472 * @buf - the string that holds the function notrace text.
1473 * @len - the length of the string.
1474 * @reset - non zero to reset all filters before applying this filter.
1475 *
1476 * Notrace Filters denote which functions should not be enabled when tracing
1477 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1478 * for tracing.
1479 */
1480void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1481{
1482 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1483}
1484
e309b41d 1485static int
41c52c0d 1486ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1487{
1488 struct seq_file *m = (struct seq_file *)file->private_data;
1489 struct ftrace_iterator *iter;
1490
41c52c0d 1491 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1492 if (file->f_mode & FMODE_READ) {
1493 iter = m->private;
1494
1495 seq_release(inode, file);
1496 } else
1497 iter = file->private_data;
1498
1499 if (iter->buffer_idx) {
1500 iter->filtered++;
1501 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 1502 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1503 }
1504
1505 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2 1506 mutex_lock(&ftrace_start_lock);
ee02a2e5 1507 if (ftrace_start_up && ftrace_enabled)
5072c59f 1508 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1509 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1510 mutex_unlock(&ftrace_sysctl_lock);
1511
1512 kfree(iter);
41c52c0d 1513 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1514 return 0;
1515}
1516
41c52c0d
SR
1517static int
1518ftrace_filter_release(struct inode *inode, struct file *file)
1519{
1520 return ftrace_regex_release(inode, file, 1);
1521}
1522
1523static int
1524ftrace_notrace_release(struct inode *inode, struct file *file)
1525{
1526 return ftrace_regex_release(inode, file, 0);
1527}
1528
5072c59f
SR
1529static struct file_operations ftrace_avail_fops = {
1530 .open = ftrace_avail_open,
1531 .read = seq_read,
1532 .llseek = seq_lseek,
1533 .release = ftrace_avail_release,
1534};
1535
eb9a7bf0
AS
1536static struct file_operations ftrace_failures_fops = {
1537 .open = ftrace_failures_open,
1538 .read = seq_read,
1539 .llseek = seq_lseek,
1540 .release = ftrace_avail_release,
1541};
1542
5072c59f
SR
1543static struct file_operations ftrace_filter_fops = {
1544 .open = ftrace_filter_open,
41c52c0d 1545 .read = ftrace_regex_read,
5072c59f 1546 .write = ftrace_filter_write,
41c52c0d 1547 .llseek = ftrace_regex_lseek,
5072c59f
SR
1548 .release = ftrace_filter_release,
1549};
1550
41c52c0d
SR
1551static struct file_operations ftrace_notrace_fops = {
1552 .open = ftrace_notrace_open,
1553 .read = ftrace_regex_read,
1554 .write = ftrace_notrace_write,
1555 .llseek = ftrace_regex_lseek,
1556 .release = ftrace_notrace_release,
1557};
1558
ea4e2bc4
SR
1559#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1560
1561static DEFINE_MUTEX(graph_lock);
1562
1563int ftrace_graph_count;
1564unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1565
1566static void *
1567g_next(struct seq_file *m, void *v, loff_t *pos)
1568{
1569 unsigned long *array = m->private;
1570 int index = *pos;
1571
1572 (*pos)++;
1573
1574 if (index >= ftrace_graph_count)
1575 return NULL;
1576
1577 return &array[index];
1578}
1579
1580static void *g_start(struct seq_file *m, loff_t *pos)
1581{
1582 void *p = NULL;
1583
1584 mutex_lock(&graph_lock);
1585
1586 p = g_next(m, p, pos);
1587
1588 return p;
1589}
1590
1591static void g_stop(struct seq_file *m, void *p)
1592{
1593 mutex_unlock(&graph_lock);
1594}
1595
1596static int g_show(struct seq_file *m, void *v)
1597{
1598 unsigned long *ptr = v;
1599 char str[KSYM_SYMBOL_LEN];
1600
1601 if (!ptr)
1602 return 0;
1603
1604 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1605
1606 seq_printf(m, "%s\n", str);
1607
1608 return 0;
1609}
1610
1611static struct seq_operations ftrace_graph_seq_ops = {
1612 .start = g_start,
1613 .next = g_next,
1614 .stop = g_stop,
1615 .show = g_show,
1616};
1617
1618static int
1619ftrace_graph_open(struct inode *inode, struct file *file)
1620{
1621 int ret = 0;
1622
1623 if (unlikely(ftrace_disabled))
1624 return -ENODEV;
1625
1626 mutex_lock(&graph_lock);
1627 if ((file->f_mode & FMODE_WRITE) &&
1628 !(file->f_flags & O_APPEND)) {
1629 ftrace_graph_count = 0;
1630 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1631 }
1632
1633 if (file->f_mode & FMODE_READ) {
1634 ret = seq_open(file, &ftrace_graph_seq_ops);
1635 if (!ret) {
1636 struct seq_file *m = file->private_data;
1637 m->private = ftrace_graph_funcs;
1638 }
1639 } else
1640 file->private_data = ftrace_graph_funcs;
1641 mutex_unlock(&graph_lock);
1642
1643 return ret;
1644}
1645
1646static ssize_t
1647ftrace_graph_read(struct file *file, char __user *ubuf,
1648 size_t cnt, loff_t *ppos)
1649{
1650 if (file->f_mode & FMODE_READ)
1651 return seq_read(file, ubuf, cnt, ppos);
1652 else
1653 return -EPERM;
1654}
1655
1656static int
1657ftrace_set_func(unsigned long *array, int idx, char *buffer)
1658{
1659 char str[KSYM_SYMBOL_LEN];
1660 struct dyn_ftrace *rec;
1661 struct ftrace_page *pg;
1662 int found = 0;
265c831c 1663 int j;
ea4e2bc4
SR
1664
1665 if (ftrace_disabled)
1666 return -ENODEV;
1667
52baf119 1668 mutex_lock(&ftrace_lock);
265c831c
SR
1669 do_for_each_ftrace_rec(pg, rec) {
1670
1671 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1672 continue;
1673
1674 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1675 if (strcmp(str, buffer) == 0) {
1676 /* Return 1 if we add it to the array */
1677 found = 1;
1678 for (j = 0; j < idx; j++)
1679 if (array[j] == rec->ip) {
1680 found = 0;
1681 break;
1682 }
1683 if (found)
1684 array[idx] = rec->ip;
1685 goto out;
ea4e2bc4 1686 }
265c831c
SR
1687 } while_for_each_ftrace_rec();
1688 out:
52baf119 1689 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
1690
1691 return found ? 0 : -EINVAL;
1692}
1693
1694static ssize_t
1695ftrace_graph_write(struct file *file, const char __user *ubuf,
1696 size_t cnt, loff_t *ppos)
1697{
1698 unsigned char buffer[FTRACE_BUFF_MAX+1];
1699 unsigned long *array;
1700 size_t read = 0;
1701 ssize_t ret;
1702 int index = 0;
1703 char ch;
1704
1705 if (!cnt || cnt < 0)
1706 return 0;
1707
1708 mutex_lock(&graph_lock);
1709
1710 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1711 ret = -EBUSY;
1712 goto out;
1713 }
1714
1715 if (file->f_mode & FMODE_READ) {
1716 struct seq_file *m = file->private_data;
1717 array = m->private;
1718 } else
1719 array = file->private_data;
1720
1721 ret = get_user(ch, ubuf++);
1722 if (ret)
1723 goto out;
1724 read++;
1725 cnt--;
1726
1727 /* skip white space */
1728 while (cnt && isspace(ch)) {
1729 ret = get_user(ch, ubuf++);
1730 if (ret)
1731 goto out;
1732 read++;
1733 cnt--;
1734 }
1735
1736 if (isspace(ch)) {
1737 *ppos += read;
1738 ret = read;
1739 goto out;
1740 }
1741
1742 while (cnt && !isspace(ch)) {
1743 if (index < FTRACE_BUFF_MAX)
1744 buffer[index++] = ch;
1745 else {
1746 ret = -EINVAL;
1747 goto out;
1748 }
1749 ret = get_user(ch, ubuf++);
1750 if (ret)
1751 goto out;
1752 read++;
1753 cnt--;
1754 }
1755 buffer[index] = 0;
1756
1757 /* we allow only one at a time */
1758 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1759 if (ret)
1760 goto out;
1761
1762 ftrace_graph_count++;
1763
1764 file->f_pos += read;
1765
1766 ret = read;
1767 out:
1768 mutex_unlock(&graph_lock);
1769
1770 return ret;
1771}
1772
1773static const struct file_operations ftrace_graph_fops = {
1774 .open = ftrace_graph_open,
1775 .read = ftrace_graph_read,
1776 .write = ftrace_graph_write,
1777};
1778#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1779
df4fc315 1780static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 1781{
5072c59f
SR
1782 struct dentry *entry;
1783
5072c59f
SR
1784 entry = debugfs_create_file("available_filter_functions", 0444,
1785 d_tracer, NULL, &ftrace_avail_fops);
1786 if (!entry)
1787 pr_warning("Could not create debugfs "
1788 "'available_filter_functions' entry\n");
1789
eb9a7bf0
AS
1790 entry = debugfs_create_file("failures", 0444,
1791 d_tracer, NULL, &ftrace_failures_fops);
1792 if (!entry)
1793 pr_warning("Could not create debugfs 'failures' entry\n");
1794
5072c59f
SR
1795 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1796 NULL, &ftrace_filter_fops);
1797 if (!entry)
1798 pr_warning("Could not create debugfs "
1799 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1800
1801 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1802 NULL, &ftrace_notrace_fops);
1803 if (!entry)
1804 pr_warning("Could not create debugfs "
1805 "'set_ftrace_notrace' entry\n");
ad90c0e3 1806
ea4e2bc4
SR
1807#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1808 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1809 NULL,
1810 &ftrace_graph_fops);
1811 if (!entry)
1812 pr_warning("Could not create debugfs "
1813 "'set_graph_function' entry\n");
1814#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1815
5072c59f
SR
1816 return 0;
1817}
1818
31e88909
SR
1819static int ftrace_convert_nops(struct module *mod,
1820 unsigned long *start,
68bf21aa
SR
1821 unsigned long *end)
1822{
1823 unsigned long *p;
1824 unsigned long addr;
1825 unsigned long flags;
1826
08f5ac90 1827 mutex_lock(&ftrace_start_lock);
68bf21aa
SR
1828 p = start;
1829 while (p < end) {
1830 addr = ftrace_call_adjust(*p++);
20e5227e
SR
1831 /*
1832 * Some architecture linkers will pad between
1833 * the different mcount_loc sections of different
1834 * object files to satisfy alignments.
1835 * Skip any NULL pointers.
1836 */
1837 if (!addr)
1838 continue;
68bf21aa 1839 ftrace_record_ip(addr);
68bf21aa
SR
1840 }
1841
08f5ac90 1842 /* disable interrupts to prevent kstop machine */
68bf21aa 1843 local_irq_save(flags);
31e88909 1844 ftrace_update_code(mod);
68bf21aa 1845 local_irq_restore(flags);
08f5ac90 1846 mutex_unlock(&ftrace_start_lock);
68bf21aa
SR
1847
1848 return 0;
1849}
1850
31e88909
SR
1851void ftrace_init_module(struct module *mod,
1852 unsigned long *start, unsigned long *end)
90d595fe 1853{
00fd61ae 1854 if (ftrace_disabled || start == end)
fed1939c 1855 return;
31e88909 1856 ftrace_convert_nops(mod, start, end);
90d595fe
SR
1857}
1858
68bf21aa
SR
1859extern unsigned long __start_mcount_loc[];
1860extern unsigned long __stop_mcount_loc[];
1861
1862void __init ftrace_init(void)
1863{
1864 unsigned long count, addr, flags;
1865 int ret;
1866
1867 /* Keep the ftrace pointer to the stub */
1868 addr = (unsigned long)ftrace_stub;
1869
1870 local_irq_save(flags);
1871 ftrace_dyn_arch_init(&addr);
1872 local_irq_restore(flags);
1873
1874 /* ftrace_dyn_arch_init places the return code in addr */
1875 if (addr)
1876 goto failed;
1877
1878 count = __stop_mcount_loc - __start_mcount_loc;
1879
1880 ret = ftrace_dyn_table_alloc(count);
1881 if (ret)
1882 goto failed;
1883
1884 last_ftrace_enabled = ftrace_enabled = 1;
1885
31e88909
SR
1886 ret = ftrace_convert_nops(NULL,
1887 __start_mcount_loc,
68bf21aa
SR
1888 __stop_mcount_loc);
1889
1890 return;
1891 failed:
1892 ftrace_disabled = 1;
1893}
68bf21aa 1894
3d083395 1895#else
0b6e4d56
FW
1896
1897static int __init ftrace_nodyn_init(void)
1898{
1899 ftrace_enabled = 1;
1900 return 0;
1901}
1902device_initcall(ftrace_nodyn_init);
1903
df4fc315
SR
1904static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1905static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
1906/* Keep as macros so we do not need to define the commands */
1907# define ftrace_startup(command) do { } while (0)
1908# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
1909# define ftrace_startup_sysctl() do { } while (0)
1910# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1911#endif /* CONFIG_DYNAMIC_FTRACE */
1912
df4fc315
SR
1913static ssize_t
1914ftrace_pid_read(struct file *file, char __user *ubuf,
1915 size_t cnt, loff_t *ppos)
1916{
1917 char buf[64];
1918 int r;
1919
e32d8956
SR
1920 if (ftrace_pid_trace == ftrace_swapper_pid)
1921 r = sprintf(buf, "swapper tasks\n");
1922 else if (ftrace_pid_trace)
978f3a45 1923 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
1924 else
1925 r = sprintf(buf, "no pid\n");
1926
1927 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1928}
1929
e32d8956 1930static void clear_ftrace_swapper(void)
978f3a45
SR
1931{
1932 struct task_struct *p;
e32d8956 1933 int cpu;
978f3a45 1934
e32d8956
SR
1935 get_online_cpus();
1936 for_each_online_cpu(cpu) {
1937 p = idle_task(cpu);
978f3a45 1938 clear_tsk_trace_trace(p);
e32d8956
SR
1939 }
1940 put_online_cpus();
1941}
978f3a45 1942
e32d8956
SR
1943static void set_ftrace_swapper(void)
1944{
1945 struct task_struct *p;
1946 int cpu;
1947
1948 get_online_cpus();
1949 for_each_online_cpu(cpu) {
1950 p = idle_task(cpu);
1951 set_tsk_trace_trace(p);
1952 }
1953 put_online_cpus();
978f3a45
SR
1954}
1955
e32d8956
SR
1956static void clear_ftrace_pid(struct pid *pid)
1957{
1958 struct task_struct *p;
1959
229c4ef8 1960 rcu_read_lock();
e32d8956
SR
1961 do_each_pid_task(pid, PIDTYPE_PID, p) {
1962 clear_tsk_trace_trace(p);
1963 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
1964 rcu_read_unlock();
1965
e32d8956
SR
1966 put_pid(pid);
1967}
1968
1969static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
1970{
1971 struct task_struct *p;
1972
229c4ef8 1973 rcu_read_lock();
978f3a45
SR
1974 do_each_pid_task(pid, PIDTYPE_PID, p) {
1975 set_tsk_trace_trace(p);
1976 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 1977 rcu_read_unlock();
978f3a45
SR
1978}
1979
e32d8956
SR
1980static void clear_ftrace_pid_task(struct pid **pid)
1981{
1982 if (*pid == ftrace_swapper_pid)
1983 clear_ftrace_swapper();
1984 else
1985 clear_ftrace_pid(*pid);
1986
1987 *pid = NULL;
1988}
1989
1990static void set_ftrace_pid_task(struct pid *pid)
1991{
1992 if (pid == ftrace_swapper_pid)
1993 set_ftrace_swapper();
1994 else
1995 set_ftrace_pid(pid);
1996}
1997
df4fc315
SR
1998static ssize_t
1999ftrace_pid_write(struct file *filp, const char __user *ubuf,
2000 size_t cnt, loff_t *ppos)
2001{
978f3a45 2002 struct pid *pid;
df4fc315
SR
2003 char buf[64];
2004 long val;
2005 int ret;
2006
2007 if (cnt >= sizeof(buf))
2008 return -EINVAL;
2009
2010 if (copy_from_user(&buf, ubuf, cnt))
2011 return -EFAULT;
2012
2013 buf[cnt] = 0;
2014
2015 ret = strict_strtol(buf, 10, &val);
2016 if (ret < 0)
2017 return ret;
2018
2019 mutex_lock(&ftrace_start_lock);
978f3a45 2020 if (val < 0) {
df4fc315 2021 /* disable pid tracing */
978f3a45 2022 if (!ftrace_pid_trace)
df4fc315 2023 goto out;
978f3a45
SR
2024
2025 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2026
2027 } else {
e32d8956
SR
2028 /* swapper task is special */
2029 if (!val) {
2030 pid = ftrace_swapper_pid;
2031 if (pid == ftrace_pid_trace)
2032 goto out;
2033 } else {
2034 pid = find_get_pid(val);
df4fc315 2035
e32d8956
SR
2036 if (pid == ftrace_pid_trace) {
2037 put_pid(pid);
2038 goto out;
2039 }
0ef8cde5 2040 }
0ef8cde5 2041
978f3a45
SR
2042 if (ftrace_pid_trace)
2043 clear_ftrace_pid_task(&ftrace_pid_trace);
2044
2045 if (!pid)
2046 goto out;
2047
2048 ftrace_pid_trace = pid;
2049
2050 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2051 }
2052
2053 /* update the function call */
2054 ftrace_update_pid_func();
2055 ftrace_startup_enable(0);
2056
2057 out:
2058 mutex_unlock(&ftrace_start_lock);
2059
2060 return cnt;
2061}
2062
2063static struct file_operations ftrace_pid_fops = {
2064 .read = ftrace_pid_read,
2065 .write = ftrace_pid_write,
2066};
2067
2068static __init int ftrace_init_debugfs(void)
2069{
2070 struct dentry *d_tracer;
2071 struct dentry *entry;
2072
2073 d_tracer = tracing_init_dentry();
2074 if (!d_tracer)
2075 return 0;
2076
2077 ftrace_init_dyn_debugfs(d_tracer);
2078
2079 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2080 NULL, &ftrace_pid_fops);
2081 if (!entry)
2082 pr_warning("Could not create debugfs "
2083 "'set_ftrace_pid' entry\n");
2084 return 0;
2085}
2086
2087fs_initcall(ftrace_init_debugfs);
2088
a2bb6a3d 2089/**
81adbdc0 2090 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2091 *
2092 * This function should be used by panic code. It stops ftrace
2093 * but in a not so nice way. If you need to simply kill ftrace
2094 * from a non-atomic section, use ftrace_kill.
2095 */
81adbdc0 2096void ftrace_kill(void)
a2bb6a3d
SR
2097{
2098 ftrace_disabled = 1;
2099 ftrace_enabled = 0;
a2bb6a3d
SR
2100 clear_ftrace_function();
2101}
2102
16444a8a 2103/**
3d083395
SR
2104 * register_ftrace_function - register a function for profiling
2105 * @ops - ops structure that holds the function for profiling.
16444a8a 2106 *
3d083395
SR
2107 * Register a function to be called by all functions in the
2108 * kernel.
2109 *
2110 * Note: @ops->func and all the functions it calls must be labeled
2111 * with "notrace", otherwise it will go into a
2112 * recursive loop.
16444a8a 2113 */
3d083395 2114int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2115{
b0fc494f
SR
2116 int ret;
2117
4eebcc81
SR
2118 if (unlikely(ftrace_disabled))
2119 return -1;
2120
b0fc494f 2121 mutex_lock(&ftrace_sysctl_lock);
e7d3737e 2122
b0fc494f 2123 ret = __register_ftrace_function(ops);
5a45cfe1 2124 ftrace_startup(0);
b0fc494f 2125
e7d3737e 2126 mutex_unlock(&ftrace_sysctl_lock);
b0fc494f 2127 return ret;
3d083395
SR
2128}
2129
2130/**
32632920 2131 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2132 * @ops - ops structure that holds the function to unregister
2133 *
2134 * Unregister a function that was added to be called by ftrace profiling.
2135 */
2136int unregister_ftrace_function(struct ftrace_ops *ops)
2137{
2138 int ret;
2139
b0fc494f 2140 mutex_lock(&ftrace_sysctl_lock);
3d083395 2141 ret = __unregister_ftrace_function(ops);
5a45cfe1 2142 ftrace_shutdown(0);
b0fc494f
SR
2143 mutex_unlock(&ftrace_sysctl_lock);
2144
2145 return ret;
2146}
2147
e309b41d 2148int
b0fc494f 2149ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2150 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2151 loff_t *ppos)
2152{
2153 int ret;
2154
4eebcc81
SR
2155 if (unlikely(ftrace_disabled))
2156 return -ENODEV;
2157
b0fc494f
SR
2158 mutex_lock(&ftrace_sysctl_lock);
2159
5072c59f 2160 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2161
2162 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2163 goto out;
2164
2165 last_ftrace_enabled = ftrace_enabled;
2166
2167 if (ftrace_enabled) {
2168
2169 ftrace_startup_sysctl();
2170
2171 /* we are starting ftrace again */
2172 if (ftrace_list != &ftrace_list_end) {
2173 if (ftrace_list->next == &ftrace_list_end)
2174 ftrace_trace_function = ftrace_list->func;
2175 else
2176 ftrace_trace_function = ftrace_list_func;
2177 }
2178
2179 } else {
2180 /* stopping ftrace calls (just send to ftrace_stub) */
2181 ftrace_trace_function = ftrace_stub;
2182
2183 ftrace_shutdown_sysctl();
2184 }
2185
2186 out:
2187 mutex_unlock(&ftrace_sysctl_lock);
3d083395 2188 return ret;
16444a8a 2189}
f17845e5 2190
fb52607a 2191#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2192
287b6e68 2193static atomic_t ftrace_graph_active;
4a2b8dda 2194static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2195
e49dc19c
SR
2196int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2197{
2198 return 0;
2199}
2200
287b6e68
FW
2201/* The callbacks that hook a function */
2202trace_func_graph_ret_t ftrace_graph_return =
2203 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2204trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2205
2206/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2207static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2208{
2209 int i;
2210 int ret = 0;
2211 unsigned long flags;
2212 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2213 struct task_struct *g, *t;
2214
2215 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2216 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2217 * sizeof(struct ftrace_ret_stack),
2218 GFP_KERNEL);
2219 if (!ret_stack_list[i]) {
2220 start = 0;
2221 end = i;
2222 ret = -ENOMEM;
2223 goto free;
2224 }
2225 }
2226
2227 read_lock_irqsave(&tasklist_lock, flags);
2228 do_each_thread(g, t) {
2229 if (start == end) {
2230 ret = -EAGAIN;
2231 goto unlock;
2232 }
2233
2234 if (t->ret_stack == NULL) {
f201ae23 2235 t->curr_ret_stack = -1;
48d68b20
FW
2236 /* Make sure IRQs see the -1 first: */
2237 barrier();
2238 t->ret_stack = ret_stack_list[start++];
380c4b14 2239 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2240 atomic_set(&t->trace_overrun, 0);
2241 }
2242 } while_each_thread(g, t);
2243
2244unlock:
2245 read_unlock_irqrestore(&tasklist_lock, flags);
2246free:
2247 for (i = start; i < end; i++)
2248 kfree(ret_stack_list[i]);
2249 return ret;
2250}
2251
2252/* Allocate a return stack for each task */
fb52607a 2253static int start_graph_tracing(void)
f201ae23
FW
2254{
2255 struct ftrace_ret_stack **ret_stack_list;
2256 int ret;
2257
2258 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2259 sizeof(struct ftrace_ret_stack *),
2260 GFP_KERNEL);
2261
2262 if (!ret_stack_list)
2263 return -ENOMEM;
2264
2265 do {
2266 ret = alloc_retstack_tasklist(ret_stack_list);
2267 } while (ret == -EAGAIN);
2268
2269 kfree(ret_stack_list);
2270 return ret;
2271}
2272
4a2b8dda
FW
2273/*
2274 * Hibernation protection.
2275 * The state of the current task is too much unstable during
2276 * suspend/restore to disk. We want to protect against that.
2277 */
2278static int
2279ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2280 void *unused)
2281{
2282 switch (state) {
2283 case PM_HIBERNATION_PREPARE:
2284 pause_graph_tracing();
2285 break;
2286
2287 case PM_POST_HIBERNATION:
2288 unpause_graph_tracing();
2289 break;
2290 }
2291 return NOTIFY_DONE;
2292}
2293
287b6e68
FW
2294int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2295 trace_func_graph_ent_t entryfunc)
15e6cb36 2296{
e7d3737e
FW
2297 int ret = 0;
2298
2299 mutex_lock(&ftrace_sysctl_lock);
2300
4a2b8dda
FW
2301 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2302 register_pm_notifier(&ftrace_suspend_notifier);
2303
287b6e68 2304 atomic_inc(&ftrace_graph_active);
fb52607a 2305 ret = start_graph_tracing();
f201ae23 2306 if (ret) {
287b6e68 2307 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2308 goto out;
2309 }
e53a6319 2310
287b6e68
FW
2311 ftrace_graph_return = retfunc;
2312 ftrace_graph_entry = entryfunc;
e53a6319 2313
5a45cfe1 2314 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2315
2316out:
2317 mutex_unlock(&ftrace_sysctl_lock);
2318 return ret;
15e6cb36
FW
2319}
2320
fb52607a 2321void unregister_ftrace_graph(void)
15e6cb36 2322{
e7d3737e
FW
2323 mutex_lock(&ftrace_sysctl_lock);
2324
287b6e68
FW
2325 atomic_dec(&ftrace_graph_active);
2326 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2327 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2328 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 2329 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e
FW
2330
2331 mutex_unlock(&ftrace_sysctl_lock);
15e6cb36 2332}
f201ae23
FW
2333
2334/* Allocate a return stack for newly created task */
fb52607a 2335void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2336{
287b6e68 2337 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2338 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2339 * sizeof(struct ftrace_ret_stack),
2340 GFP_KERNEL);
2341 if (!t->ret_stack)
2342 return;
2343 t->curr_ret_stack = -1;
380c4b14 2344 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2345 atomic_set(&t->trace_overrun, 0);
2346 } else
2347 t->ret_stack = NULL;
2348}
2349
fb52607a 2350void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2351{
eae849ca
FW
2352 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2353
f201ae23 2354 t->ret_stack = NULL;
eae849ca
FW
2355 /* NULL must become visible to IRQs before we free it: */
2356 barrier();
2357
2358 kfree(ret_stack);
f201ae23 2359}
14a866c5
SR
2360
2361void ftrace_graph_stop(void)
2362{
2363 ftrace_stop();
2364}
15e6cb36
FW
2365#endif
2366