]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/ftrace.c
ftrace: consolidate mutexes
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395
SR
29#include <linux/list.h>
30
395a59d0
AS
31#include <asm/ftrace.h>
32
3d083395 33#include "trace.h"
16444a8a 34
6912896e
SR
35#define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41#define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
4eebcc81
SR
47/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly;
d61f82d0 49static int last_ftrace_enabled;
b0fc494f 50
0ef8cde5 51/* set when tracing only a pid */
978f3a45 52struct pid *ftrace_pid_trace;
21bbecda 53static struct pid * const ftrace_swapper_pid = &init_struct_pid;
df4fc315 54
60a7ecf4
SR
55/* Quick disabling of function tracer. */
56int function_trace_stop;
57
4eebcc81
SR
58/*
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
61 */
62static int ftrace_disabled __read_mostly;
63
52baf119 64static DEFINE_MUTEX(ftrace_lock);
b0fc494f 65
16444a8a
ACM
66static struct ftrace_ops ftrace_list_end __read_mostly =
67{
68 .func = ftrace_stub,
69};
70
71static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
72ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 73ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 74ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 75
f2252935 76static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
77{
78 struct ftrace_ops *op = ftrace_list;
79
80 /* in case someone actually ports this to alpha! */
81 read_barrier_depends();
82
83 while (op != &ftrace_list_end) {
84 /* silly alpha */
85 read_barrier_depends();
86 op->func(ip, parent_ip);
87 op = op->next;
88 };
89}
90
df4fc315
SR
91static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
92{
0ef8cde5 93 if (!test_tsk_trace_trace(current))
df4fc315
SR
94 return;
95
96 ftrace_pid_function(ip, parent_ip);
97}
98
99static void set_ftrace_pid_function(ftrace_func_t func)
100{
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
104}
105
16444a8a 106/**
3d083395 107 * clear_ftrace_function - reset the ftrace function
16444a8a 108 *
3d083395
SR
109 * This NULLs the ftrace function and in essence stops
110 * tracing. There may be lag
16444a8a 111 */
3d083395 112void clear_ftrace_function(void)
16444a8a 113{
3d083395 114 ftrace_trace_function = ftrace_stub;
60a7ecf4 115 __ftrace_trace_function = ftrace_stub;
df4fc315 116 ftrace_pid_function = ftrace_stub;
3d083395
SR
117}
118
60a7ecf4
SR
119#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
120/*
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
123 */
124static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
125{
126 if (function_trace_stop)
127 return;
128
129 __ftrace_trace_function(ip, parent_ip);
130}
131#endif
132
e309b41d 133static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 134{
16444a8a
ACM
135 ops->next = ftrace_list;
136 /*
137 * We are entering ops into the ftrace_list but another
138 * CPU might be walking that list. We need to make sure
139 * the ops->next pointer is valid before another CPU sees
140 * the ops pointer included into the ftrace_list.
141 */
142 smp_wmb();
143 ftrace_list = ops;
3d083395 144
b0fc494f 145 if (ftrace_enabled) {
df4fc315
SR
146 ftrace_func_t func;
147
148 if (ops->next == &ftrace_list_end)
149 func = ops->func;
150 else
151 func = ftrace_list_func;
152
978f3a45 153 if (ftrace_pid_trace) {
df4fc315
SR
154 set_ftrace_pid_function(func);
155 func = ftrace_pid_func;
156 }
157
b0fc494f
SR
158 /*
159 * For one func, simply call it directly.
160 * For more than one func, call the chain.
161 */
60a7ecf4 162#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 163 ftrace_trace_function = func;
60a7ecf4 164#else
df4fc315 165 __ftrace_trace_function = func;
60a7ecf4
SR
166 ftrace_trace_function = ftrace_test_stop_func;
167#endif
b0fc494f 168 }
3d083395 169
16444a8a
ACM
170 return 0;
171}
172
e309b41d 173static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 174{
16444a8a 175 struct ftrace_ops **p;
16444a8a
ACM
176
177 /*
3d083395
SR
178 * If we are removing the last function, then simply point
179 * to the ftrace_stub.
16444a8a
ACM
180 */
181 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
182 ftrace_trace_function = ftrace_stub;
183 ftrace_list = &ftrace_list_end;
e6ea44e9 184 return 0;
16444a8a
ACM
185 }
186
187 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
188 if (*p == ops)
189 break;
190
e6ea44e9
SR
191 if (*p != ops)
192 return -1;
16444a8a
ACM
193
194 *p = (*p)->next;
195
b0fc494f
SR
196 if (ftrace_enabled) {
197 /* If we only have one func left, then call that directly */
df4fc315
SR
198 if (ftrace_list->next == &ftrace_list_end) {
199 ftrace_func_t func = ftrace_list->func;
200
978f3a45 201 if (ftrace_pid_trace) {
df4fc315
SR
202 set_ftrace_pid_function(func);
203 func = ftrace_pid_func;
204 }
205#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
206 ftrace_trace_function = func;
207#else
208 __ftrace_trace_function = func;
209#endif
210 }
b0fc494f 211 }
16444a8a 212
e6ea44e9 213 return 0;
3d083395
SR
214}
215
df4fc315
SR
216static void ftrace_update_pid_func(void)
217{
218 ftrace_func_t func;
219
52baf119 220 mutex_lock(&ftrace_lock);
df4fc315
SR
221
222 if (ftrace_trace_function == ftrace_stub)
223 goto out;
224
225 func = ftrace_trace_function;
226
978f3a45 227 if (ftrace_pid_trace) {
df4fc315
SR
228 set_ftrace_pid_function(func);
229 func = ftrace_pid_func;
230 } else {
66eafebc
LW
231 if (func == ftrace_pid_func)
232 func = ftrace_pid_function;
df4fc315
SR
233 }
234
235#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
236 ftrace_trace_function = func;
237#else
238 __ftrace_trace_function = func;
239#endif
240
241 out:
52baf119 242 mutex_unlock(&ftrace_lock);
df4fc315
SR
243}
244
3d083395 245#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 246#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 247# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
248#endif
249
d61f82d0
SR
250enum {
251 FTRACE_ENABLE_CALLS = (1 << 0),
252 FTRACE_DISABLE_CALLS = (1 << 1),
253 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
254 FTRACE_ENABLE_MCOUNT = (1 << 3),
255 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
256 FTRACE_START_FUNC_RET = (1 << 5),
257 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
258};
259
5072c59f
SR
260static int ftrace_filtered;
261
08f5ac90 262static LIST_HEAD(ftrace_new_addrs);
3d083395 263
41c52c0d 264static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 265
3c1720f0
SR
266struct ftrace_page {
267 struct ftrace_page *next;
431aa3fb 268 int index;
3c1720f0 269 struct dyn_ftrace records[];
aa5e5cea 270};
3c1720f0
SR
271
272#define ENTRIES_PER_PAGE \
273 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
274
275/* estimate from running different kernels */
276#define NR_TO_INIT 10000
277
278static struct ftrace_page *ftrace_pages_start;
279static struct ftrace_page *ftrace_pages;
280
37ad5084
SR
281static struct dyn_ftrace *ftrace_free_records;
282
265c831c
SR
283/*
284 * This is a double for. Do not use 'break' to break out of the loop,
285 * you must use a goto.
286 */
287#define do_for_each_ftrace_rec(pg, rec) \
288 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
289 int _____i; \
290 for (_____i = 0; _____i < pg->index; _____i++) { \
291 rec = &pg->records[_____i];
292
293#define while_for_each_ftrace_rec() \
294 } \
295 }
ecea656d
AS
296
297#ifdef CONFIG_KPROBES
f17845e5
IM
298
299static int frozen_record_count;
300
ecea656d
AS
301static inline void freeze_record(struct dyn_ftrace *rec)
302{
303 if (!(rec->flags & FTRACE_FL_FROZEN)) {
304 rec->flags |= FTRACE_FL_FROZEN;
305 frozen_record_count++;
306 }
307}
308
309static inline void unfreeze_record(struct dyn_ftrace *rec)
310{
311 if (rec->flags & FTRACE_FL_FROZEN) {
312 rec->flags &= ~FTRACE_FL_FROZEN;
313 frozen_record_count--;
314 }
315}
316
317static inline int record_frozen(struct dyn_ftrace *rec)
318{
319 return rec->flags & FTRACE_FL_FROZEN;
320}
321#else
322# define freeze_record(rec) ({ 0; })
323# define unfreeze_record(rec) ({ 0; })
324# define record_frozen(rec) ({ 0; })
325#endif /* CONFIG_KPROBES */
326
e309b41d 327static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 328{
37ad5084
SR
329 rec->ip = (unsigned long)ftrace_free_records;
330 ftrace_free_records = rec;
331 rec->flags |= FTRACE_FL_FREE;
332}
333
fed1939c
SR
334void ftrace_release(void *start, unsigned long size)
335{
336 struct dyn_ftrace *rec;
337 struct ftrace_page *pg;
338 unsigned long s = (unsigned long)start;
339 unsigned long e = s + size;
fed1939c 340
00fd61ae 341 if (ftrace_disabled || !start)
fed1939c
SR
342 return;
343
52baf119 344 mutex_lock(&ftrace_lock);
265c831c
SR
345 do_for_each_ftrace_rec(pg, rec) {
346 if ((rec->ip >= s) && (rec->ip < e))
347 ftrace_free_rec(rec);
348 } while_for_each_ftrace_rec();
52baf119 349 mutex_unlock(&ftrace_lock);
fed1939c
SR
350}
351
e309b41d 352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 353{
37ad5084
SR
354 struct dyn_ftrace *rec;
355
356 /* First check for freed records */
357 if (ftrace_free_records) {
358 rec = ftrace_free_records;
359
37ad5084 360 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 361 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
362 ftrace_free_records = NULL;
363 return NULL;
364 }
365
366 ftrace_free_records = (void *)rec->ip;
367 memset(rec, 0, sizeof(*rec));
368 return rec;
369 }
370
3c1720f0 371 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
372 if (!ftrace_pages->next) {
373 /* allocate another page */
374 ftrace_pages->next =
375 (void *)get_zeroed_page(GFP_KERNEL);
376 if (!ftrace_pages->next)
377 return NULL;
378 }
3c1720f0
SR
379 ftrace_pages = ftrace_pages->next;
380 }
381
382 return &ftrace_pages->records[ftrace_pages->index++];
383}
384
08f5ac90 385static struct dyn_ftrace *
d61f82d0 386ftrace_record_ip(unsigned long ip)
3d083395 387{
08f5ac90 388 struct dyn_ftrace *rec;
3d083395 389
f3c7ac40 390 if (ftrace_disabled)
08f5ac90 391 return NULL;
3d083395 392
08f5ac90
SR
393 rec = ftrace_alloc_dyn_node(ip);
394 if (!rec)
395 return NULL;
3d083395 396
08f5ac90 397 rec->ip = ip;
3d083395 398
08f5ac90 399 list_add(&rec->list, &ftrace_new_addrs);
3d083395 400
08f5ac90 401 return rec;
3d083395
SR
402}
403
b17e8a37
SR
404static void print_ip_ins(const char *fmt, unsigned char *p)
405{
406 int i;
407
408 printk(KERN_CONT "%s", fmt);
409
410 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
411 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
412}
413
31e88909 414static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
415{
416 switch (failed) {
417 case -EFAULT:
418 FTRACE_WARN_ON_ONCE(1);
419 pr_info("ftrace faulted on modifying ");
420 print_ip_sym(ip);
421 break;
422 case -EINVAL:
423 FTRACE_WARN_ON_ONCE(1);
424 pr_info("ftrace failed to modify ");
425 print_ip_sym(ip);
b17e8a37 426 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
427 printk(KERN_CONT "\n");
428 break;
429 case -EPERM:
430 FTRACE_WARN_ON_ONCE(1);
431 pr_info("ftrace faulted on writing ");
432 print_ip_sym(ip);
433 break;
434 default:
435 FTRACE_WARN_ON_ONCE(1);
436 pr_info("ftrace faulted on unknown error ");
437 print_ip_sym(ip);
438 }
439}
440
3c1720f0 441
0eb96701 442static int
31e88909 443__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 444{
41c52c0d 445 unsigned long ip, fl;
e7d3737e
FW
446 unsigned long ftrace_addr;
447
f0001207 448 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
449
450 ip = rec->ip;
451
982c350b
SR
452 /*
453 * If this record is not to be traced and
454 * it is not enabled then do nothing.
455 *
456 * If this record is not to be traced and
57794a9d 457 * it is enabled then disable it.
982c350b
SR
458 *
459 */
460 if (rec->flags & FTRACE_FL_NOTRACE) {
461 if (rec->flags & FTRACE_FL_ENABLED)
462 rec->flags &= ~FTRACE_FL_ENABLED;
463 else
464 return 0;
465
466 } else if (ftrace_filtered && enable) {
5072c59f 467 /*
982c350b 468 * Filtering is on:
5072c59f 469 */
a4500b84 470
982c350b 471 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 472
982c350b
SR
473 /* Record is filtered and enabled, do nothing */
474 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 475 return 0;
5072c59f 476
57794a9d 477 /* Record is not filtered or enabled, do nothing */
982c350b
SR
478 if (!fl)
479 return 0;
480
481 /* Record is not filtered but enabled, disable it */
482 if (fl == FTRACE_FL_ENABLED)
5072c59f 483 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
484 else
485 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 486 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 487 } else {
982c350b 488 /* Disable or not filtered */
5072c59f 489
41c52c0d 490 if (enable) {
982c350b 491 /* if record is enabled, do nothing */
5072c59f 492 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 493 return 0;
982c350b 494
5072c59f 495 rec->flags |= FTRACE_FL_ENABLED;
982c350b 496
5072c59f 497 } else {
982c350b 498
57794a9d 499 /* if record is not enabled, do nothing */
5072c59f 500 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 501 return 0;
982c350b 502
5072c59f
SR
503 rec->flags &= ~FTRACE_FL_ENABLED;
504 }
505 }
506
982c350b 507 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 508 return ftrace_make_call(rec, ftrace_addr);
31e88909 509 else
e7d3737e 510 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
511}
512
e309b41d 513static void ftrace_replace_code(int enable)
3c1720f0 514{
265c831c 515 int failed;
3c1720f0
SR
516 struct dyn_ftrace *rec;
517 struct ftrace_page *pg;
3c1720f0 518
265c831c
SR
519 do_for_each_ftrace_rec(pg, rec) {
520 /*
521 * Skip over free records and records that have
522 * failed.
523 */
524 if (rec->flags & FTRACE_FL_FREE ||
525 rec->flags & FTRACE_FL_FAILED)
526 continue;
527
528 /* ignore updates to this record's mcount site */
529 if (get_kprobe((void *)rec->ip)) {
530 freeze_record(rec);
531 continue;
532 } else {
533 unfreeze_record(rec);
534 }
f22f9a89 535
265c831c
SR
536 failed = __ftrace_replace_code(rec, enable);
537 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
538 rec->flags |= FTRACE_FL_FAILED;
539 if ((system_state == SYSTEM_BOOTING) ||
540 !core_kernel_text(rec->ip)) {
541 ftrace_free_rec(rec);
542 } else
543 ftrace_bug(failed, rec->ip);
3c1720f0 544 }
265c831c 545 } while_for_each_ftrace_rec();
3c1720f0
SR
546}
547
492a7ea5 548static int
31e88909 549ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
550{
551 unsigned long ip;
593eb8a2 552 int ret;
3c1720f0
SR
553
554 ip = rec->ip;
555
25aac9dc 556 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 557 if (ret) {
31e88909 558 ftrace_bug(ret, ip);
3c1720f0 559 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 560 return 0;
37ad5084 561 }
492a7ea5 562 return 1;
3c1720f0
SR
563}
564
e309b41d 565static int __ftrace_modify_code(void *data)
3d083395 566{
d61f82d0
SR
567 int *command = data;
568
a3583244 569 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 570 ftrace_replace_code(1);
a3583244 571 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
572 ftrace_replace_code(0);
573
574 if (*command & FTRACE_UPDATE_TRACE_FUNC)
575 ftrace_update_ftrace_func(ftrace_trace_function);
576
5a45cfe1
SR
577 if (*command & FTRACE_START_FUNC_RET)
578 ftrace_enable_ftrace_graph_caller();
579 else if (*command & FTRACE_STOP_FUNC_RET)
580 ftrace_disable_ftrace_graph_caller();
581
d61f82d0 582 return 0;
3d083395
SR
583}
584
e309b41d 585static void ftrace_run_update_code(int command)
3d083395 586{
784e2d76 587 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
588}
589
d61f82d0 590static ftrace_func_t saved_ftrace_func;
60a7ecf4 591static int ftrace_start_up;
df4fc315
SR
592
593static void ftrace_startup_enable(int command)
594{
595 if (saved_ftrace_func != ftrace_trace_function) {
596 saved_ftrace_func = ftrace_trace_function;
597 command |= FTRACE_UPDATE_TRACE_FUNC;
598 }
599
600 if (!command || !ftrace_enabled)
601 return;
602
603 ftrace_run_update_code(command);
604}
d61f82d0 605
5a45cfe1 606static void ftrace_startup(int command)
3d083395 607{
4eebcc81
SR
608 if (unlikely(ftrace_disabled))
609 return;
610
60a7ecf4 611 ftrace_start_up++;
982c350b 612 command |= FTRACE_ENABLE_CALLS;
d61f82d0 613
df4fc315 614 ftrace_startup_enable(command);
3d083395
SR
615}
616
5a45cfe1 617static void ftrace_shutdown(int command)
3d083395 618{
4eebcc81
SR
619 if (unlikely(ftrace_disabled))
620 return;
621
60a7ecf4
SR
622 ftrace_start_up--;
623 if (!ftrace_start_up)
d61f82d0 624 command |= FTRACE_DISABLE_CALLS;
3d083395 625
d61f82d0
SR
626 if (saved_ftrace_func != ftrace_trace_function) {
627 saved_ftrace_func = ftrace_trace_function;
628 command |= FTRACE_UPDATE_TRACE_FUNC;
629 }
3d083395 630
d61f82d0 631 if (!command || !ftrace_enabled)
e6ea44e9 632 return;
d61f82d0
SR
633
634 ftrace_run_update_code(command);
3d083395
SR
635}
636
e309b41d 637static void ftrace_startup_sysctl(void)
b0fc494f 638{
d61f82d0
SR
639 int command = FTRACE_ENABLE_MCOUNT;
640
4eebcc81
SR
641 if (unlikely(ftrace_disabled))
642 return;
643
d61f82d0
SR
644 /* Force update next time */
645 saved_ftrace_func = NULL;
60a7ecf4
SR
646 /* ftrace_start_up is true if we want ftrace running */
647 if (ftrace_start_up)
d61f82d0
SR
648 command |= FTRACE_ENABLE_CALLS;
649
650 ftrace_run_update_code(command);
b0fc494f
SR
651}
652
e309b41d 653static void ftrace_shutdown_sysctl(void)
b0fc494f 654{
d61f82d0
SR
655 int command = FTRACE_DISABLE_MCOUNT;
656
4eebcc81
SR
657 if (unlikely(ftrace_disabled))
658 return;
659
60a7ecf4
SR
660 /* ftrace_start_up is true if ftrace is running */
661 if (ftrace_start_up)
d61f82d0
SR
662 command |= FTRACE_DISABLE_CALLS;
663
664 ftrace_run_update_code(command);
b0fc494f
SR
665}
666
3d083395
SR
667static cycle_t ftrace_update_time;
668static unsigned long ftrace_update_cnt;
669unsigned long ftrace_update_tot_cnt;
670
31e88909 671static int ftrace_update_code(struct module *mod)
3d083395 672{
08f5ac90 673 struct dyn_ftrace *p, *t;
f22f9a89 674 cycle_t start, stop;
3d083395 675
750ed1a4 676 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
677 ftrace_update_cnt = 0;
678
08f5ac90 679 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 680
08f5ac90
SR
681 /* If something went wrong, bail without enabling anything */
682 if (unlikely(ftrace_disabled))
683 return -1;
f22f9a89 684
08f5ac90 685 list_del_init(&p->list);
f22f9a89 686
08f5ac90 687 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 688 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
689 p->flags |= FTRACE_FL_CONVERTED;
690 ftrace_update_cnt++;
691 } else
692 ftrace_free_rec(p);
3d083395
SR
693 }
694
750ed1a4 695 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
696 ftrace_update_time = stop - start;
697 ftrace_update_tot_cnt += ftrace_update_cnt;
698
16444a8a
ACM
699 return 0;
700}
701
68bf21aa 702static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
703{
704 struct ftrace_page *pg;
705 int cnt;
706 int i;
3c1720f0
SR
707
708 /* allocate a few pages */
709 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
710 if (!ftrace_pages_start)
711 return -1;
712
713 /*
714 * Allocate a few more pages.
715 *
716 * TODO: have some parser search vmlinux before
717 * final linking to find all calls to ftrace.
718 * Then we can:
719 * a) know how many pages to allocate.
720 * and/or
721 * b) set up the table then.
722 *
723 * The dynamic code is still necessary for
724 * modules.
725 */
726
727 pg = ftrace_pages = ftrace_pages_start;
728
68bf21aa 729 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 730 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 731 num_to_init, cnt + 1);
3c1720f0
SR
732
733 for (i = 0; i < cnt; i++) {
734 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
735
736 /* If we fail, we'll try later anyway */
737 if (!pg->next)
738 break;
739
740 pg = pg->next;
741 }
742
743 return 0;
744}
745
5072c59f
SR
746enum {
747 FTRACE_ITER_FILTER = (1 << 0),
748 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 749 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 750 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 751 FTRACE_ITER_PRINTALL = (1 << 4),
5072c59f
SR
752};
753
754#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
755
756struct ftrace_iterator {
5072c59f 757 struct ftrace_page *pg;
431aa3fb 758 int idx;
5072c59f
SR
759 unsigned flags;
760 unsigned char buffer[FTRACE_BUFF_MAX+1];
761 unsigned buffer_idx;
762 unsigned filtered;
763};
764
e309b41d 765static void *
5072c59f
SR
766t_next(struct seq_file *m, void *v, loff_t *pos)
767{
768 struct ftrace_iterator *iter = m->private;
769 struct dyn_ftrace *rec = NULL;
770
771 (*pos)++;
772
0c75a3ed
SR
773 if (iter->flags & FTRACE_ITER_PRINTALL)
774 return NULL;
775
52baf119 776 mutex_lock(&ftrace_lock);
5072c59f
SR
777 retry:
778 if (iter->idx >= iter->pg->index) {
779 if (iter->pg->next) {
780 iter->pg = iter->pg->next;
781 iter->idx = 0;
782 goto retry;
50cdaf08
LW
783 } else {
784 iter->idx = -1;
5072c59f
SR
785 }
786 } else {
787 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
788 if ((rec->flags & FTRACE_FL_FREE) ||
789
790 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
791 (rec->flags & FTRACE_FL_FAILED)) ||
792
793 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 794 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 795
0183fb1c
SR
796 ((iter->flags & FTRACE_ITER_FILTER) &&
797 !(rec->flags & FTRACE_FL_FILTER)) ||
798
41c52c0d
SR
799 ((iter->flags & FTRACE_ITER_NOTRACE) &&
800 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
801 rec = NULL;
802 goto retry;
803 }
804 }
52baf119 805 mutex_unlock(&ftrace_lock);
5072c59f 806
5072c59f
SR
807 return rec;
808}
809
810static void *t_start(struct seq_file *m, loff_t *pos)
811{
812 struct ftrace_iterator *iter = m->private;
813 void *p = NULL;
5072c59f 814
0c75a3ed
SR
815 /*
816 * For set_ftrace_filter reading, if we have the filter
817 * off, we can short cut and just print out that all
818 * functions are enabled.
819 */
820 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
821 if (*pos > 0)
822 return NULL;
823 iter->flags |= FTRACE_ITER_PRINTALL;
824 (*pos)++;
825 return iter;
826 }
827
50cdaf08
LW
828 if (*pos > 0) {
829 if (iter->idx < 0)
830 return p;
831 (*pos)--;
832 iter->idx--;
833 }
5821e1b7 834
50cdaf08 835 p = t_next(m, p, pos);
5072c59f
SR
836
837 return p;
838}
839
840static void t_stop(struct seq_file *m, void *p)
841{
842}
843
844static int t_show(struct seq_file *m, void *v)
845{
0c75a3ed 846 struct ftrace_iterator *iter = m->private;
5072c59f
SR
847 struct dyn_ftrace *rec = v;
848 char str[KSYM_SYMBOL_LEN];
849
0c75a3ed
SR
850 if (iter->flags & FTRACE_ITER_PRINTALL) {
851 seq_printf(m, "#### all functions enabled ####\n");
852 return 0;
853 }
854
5072c59f
SR
855 if (!rec)
856 return 0;
857
858 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
859
50cdaf08 860 seq_printf(m, "%s\n", str);
5072c59f
SR
861
862 return 0;
863}
864
865static struct seq_operations show_ftrace_seq_ops = {
866 .start = t_start,
867 .next = t_next,
868 .stop = t_stop,
869 .show = t_show,
870};
871
e309b41d 872static int
5072c59f
SR
873ftrace_avail_open(struct inode *inode, struct file *file)
874{
875 struct ftrace_iterator *iter;
876 int ret;
877
4eebcc81
SR
878 if (unlikely(ftrace_disabled))
879 return -ENODEV;
880
5072c59f
SR
881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
882 if (!iter)
883 return -ENOMEM;
884
885 iter->pg = ftrace_pages_start;
5072c59f
SR
886
887 ret = seq_open(file, &show_ftrace_seq_ops);
888 if (!ret) {
889 struct seq_file *m = file->private_data;
4bf39a94 890
5072c59f 891 m->private = iter;
4bf39a94 892 } else {
5072c59f 893 kfree(iter);
4bf39a94 894 }
5072c59f
SR
895
896 return ret;
897}
898
899int ftrace_avail_release(struct inode *inode, struct file *file)
900{
901 struct seq_file *m = (struct seq_file *)file->private_data;
902 struct ftrace_iterator *iter = m->private;
903
904 seq_release(inode, file);
905 kfree(iter);
4bf39a94 906
5072c59f
SR
907 return 0;
908}
909
eb9a7bf0
AS
910static int
911ftrace_failures_open(struct inode *inode, struct file *file)
912{
913 int ret;
914 struct seq_file *m;
915 struct ftrace_iterator *iter;
916
917 ret = ftrace_avail_open(inode, file);
918 if (!ret) {
919 m = (struct seq_file *)file->private_data;
920 iter = (struct ftrace_iterator *)m->private;
921 iter->flags = FTRACE_ITER_FAILURES;
922 }
923
924 return ret;
925}
926
927
41c52c0d 928static void ftrace_filter_reset(int enable)
5072c59f
SR
929{
930 struct ftrace_page *pg;
931 struct dyn_ftrace *rec;
41c52c0d 932 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 933
52baf119 934 mutex_lock(&ftrace_lock);
41c52c0d
SR
935 if (enable)
936 ftrace_filtered = 0;
265c831c
SR
937 do_for_each_ftrace_rec(pg, rec) {
938 if (rec->flags & FTRACE_FL_FAILED)
939 continue;
940 rec->flags &= ~type;
941 } while_for_each_ftrace_rec();
52baf119 942 mutex_unlock(&ftrace_lock);
5072c59f
SR
943}
944
e309b41d 945static int
41c52c0d 946ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
947{
948 struct ftrace_iterator *iter;
949 int ret = 0;
950
4eebcc81
SR
951 if (unlikely(ftrace_disabled))
952 return -ENODEV;
953
5072c59f
SR
954 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
955 if (!iter)
956 return -ENOMEM;
957
41c52c0d 958 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
959 if ((file->f_mode & FMODE_WRITE) &&
960 !(file->f_flags & O_APPEND))
41c52c0d 961 ftrace_filter_reset(enable);
5072c59f
SR
962
963 if (file->f_mode & FMODE_READ) {
964 iter->pg = ftrace_pages_start;
41c52c0d
SR
965 iter->flags = enable ? FTRACE_ITER_FILTER :
966 FTRACE_ITER_NOTRACE;
5072c59f
SR
967
968 ret = seq_open(file, &show_ftrace_seq_ops);
969 if (!ret) {
970 struct seq_file *m = file->private_data;
971 m->private = iter;
972 } else
973 kfree(iter);
974 } else
975 file->private_data = iter;
41c52c0d 976 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
977
978 return ret;
979}
980
41c52c0d
SR
981static int
982ftrace_filter_open(struct inode *inode, struct file *file)
983{
984 return ftrace_regex_open(inode, file, 1);
985}
986
987static int
988ftrace_notrace_open(struct inode *inode, struct file *file)
989{
990 return ftrace_regex_open(inode, file, 0);
991}
992
e309b41d 993static ssize_t
41c52c0d 994ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
995 size_t cnt, loff_t *ppos)
996{
997 if (file->f_mode & FMODE_READ)
998 return seq_read(file, ubuf, cnt, ppos);
999 else
1000 return -EPERM;
1001}
1002
e309b41d 1003static loff_t
41c52c0d 1004ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1005{
1006 loff_t ret;
1007
1008 if (file->f_mode & FMODE_READ)
1009 ret = seq_lseek(file, offset, origin);
1010 else
1011 file->f_pos = ret = 1;
1012
1013 return ret;
1014}
1015
1016enum {
1017 MATCH_FULL,
1018 MATCH_FRONT_ONLY,
1019 MATCH_MIDDLE_ONLY,
1020 MATCH_END_ONLY,
1021};
1022
9f4801e3
SR
1023/*
1024 * (static function - no need for kernel doc)
1025 *
1026 * Pass in a buffer containing a glob and this function will
1027 * set search to point to the search part of the buffer and
1028 * return the type of search it is (see enum above).
1029 * This does modify buff.
1030 *
1031 * Returns enum type.
1032 * search returns the pointer to use for comparison.
1033 * not returns 1 if buff started with a '!'
1034 * 0 otherwise.
1035 */
1036static int
64e7c440 1037ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1038{
5072c59f 1039 int type = MATCH_FULL;
9f4801e3 1040 int i;
ea3a6d6d
SR
1041
1042 if (buff[0] == '!') {
9f4801e3 1043 *not = 1;
ea3a6d6d
SR
1044 buff++;
1045 len--;
9f4801e3
SR
1046 } else
1047 *not = 0;
1048
1049 *search = buff;
5072c59f
SR
1050
1051 for (i = 0; i < len; i++) {
1052 if (buff[i] == '*') {
1053 if (!i) {
9f4801e3 1054 *search = buff + 1;
5072c59f 1055 type = MATCH_END_ONLY;
5072c59f 1056 } else {
9f4801e3 1057 if (type == MATCH_END_ONLY)
5072c59f 1058 type = MATCH_MIDDLE_ONLY;
9f4801e3 1059 else
5072c59f 1060 type = MATCH_FRONT_ONLY;
5072c59f
SR
1061 buff[i] = 0;
1062 break;
1063 }
1064 }
1065 }
1066
9f4801e3
SR
1067 return type;
1068}
1069
64e7c440 1070static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1071{
9f4801e3
SR
1072 int matched = 0;
1073 char *ptr;
1074
9f4801e3
SR
1075 switch (type) {
1076 case MATCH_FULL:
1077 if (strcmp(str, regex) == 0)
1078 matched = 1;
1079 break;
1080 case MATCH_FRONT_ONLY:
1081 if (strncmp(str, regex, len) == 0)
1082 matched = 1;
1083 break;
1084 case MATCH_MIDDLE_ONLY:
1085 if (strstr(str, regex))
1086 matched = 1;
1087 break;
1088 case MATCH_END_ONLY:
1089 ptr = strstr(str, regex);
1090 if (ptr && (ptr[len] == 0))
1091 matched = 1;
1092 break;
1093 }
1094
1095 return matched;
1096}
1097
64e7c440
SR
1098static int
1099ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1100{
1101 char str[KSYM_SYMBOL_LEN];
1102
1103 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1104 return ftrace_match(str, regex, len, type);
1105}
1106
9f4801e3
SR
1107static void ftrace_match_records(char *buff, int len, int enable)
1108{
1109 char *search;
1110 struct ftrace_page *pg;
1111 struct dyn_ftrace *rec;
1112 int type;
1113 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1114 unsigned search_len;
1115 int not;
1116
1117 type = ftrace_setup_glob(buff, len, &search, &not);
1118
1119 search_len = strlen(search);
1120
52baf119 1121 mutex_lock(&ftrace_lock);
265c831c 1122 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1123
1124 if (rec->flags & FTRACE_FL_FAILED)
1125 continue;
9f4801e3
SR
1126
1127 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1128 if (not)
1129 rec->flags &= ~flag;
1130 else
1131 rec->flags |= flag;
1132 }
e68746a2
SR
1133 /*
1134 * Only enable filtering if we have a function that
1135 * is filtered on.
1136 */
1137 if (enable && (rec->flags & FTRACE_FL_FILTER))
1138 ftrace_filtered = 1;
265c831c 1139 } while_for_each_ftrace_rec();
52baf119 1140 mutex_unlock(&ftrace_lock);
5072c59f
SR
1141}
1142
64e7c440
SR
1143static int
1144ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1145 char *regex, int len, int type)
1146{
1147 char str[KSYM_SYMBOL_LEN];
1148 char *modname;
1149
1150 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1151
1152 if (!modname || strcmp(modname, mod))
1153 return 0;
1154
1155 /* blank search means to match all funcs in the mod */
1156 if (len)
1157 return ftrace_match(str, regex, len, type);
1158 else
1159 return 1;
1160}
1161
1162static void ftrace_match_module_records(char *buff, char *mod, int enable)
1163{
1164 char *search = buff;
1165 struct ftrace_page *pg;
1166 struct dyn_ftrace *rec;
1167 int type = MATCH_FULL;
1168 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1169 unsigned search_len = 0;
1170 int not = 0;
1171
1172 /* blank or '*' mean the same */
1173 if (strcmp(buff, "*") == 0)
1174 buff[0] = 0;
1175
1176 /* handle the case of 'dont filter this module' */
1177 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1178 buff[0] = 0;
1179 not = 1;
1180 }
1181
1182 if (strlen(buff)) {
1183 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1184 search_len = strlen(search);
1185 }
1186
52baf119 1187 mutex_lock(&ftrace_lock);
64e7c440
SR
1188 do_for_each_ftrace_rec(pg, rec) {
1189
1190 if (rec->flags & FTRACE_FL_FAILED)
1191 continue;
1192
1193 if (ftrace_match_module_record(rec, mod,
1194 search, search_len, type)) {
1195 if (not)
1196 rec->flags &= ~flag;
1197 else
1198 rec->flags |= flag;
1199 }
e68746a2
SR
1200 if (enable && (rec->flags & FTRACE_FL_FILTER))
1201 ftrace_filtered = 1;
64e7c440
SR
1202
1203 } while_for_each_ftrace_rec();
52baf119 1204 mutex_unlock(&ftrace_lock);
64e7c440
SR
1205}
1206
f6180773
SR
1207/*
1208 * We register the module command as a template to show others how
1209 * to register the a command as well.
1210 */
1211
1212static int
1213ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1214{
1215 char *mod;
1216
1217 /*
1218 * cmd == 'mod' because we only registered this func
1219 * for the 'mod' ftrace_func_command.
1220 * But if you register one func with multiple commands,
1221 * you can tell which command was used by the cmd
1222 * parameter.
1223 */
1224
1225 /* we must have a module name */
1226 if (!param)
1227 return -EINVAL;
1228
1229 mod = strsep(&param, ":");
1230 if (!strlen(mod))
1231 return -EINVAL;
1232
1233 ftrace_match_module_records(func, mod, enable);
1234 return 0;
1235}
1236
1237static struct ftrace_func_command ftrace_mod_cmd = {
1238 .name = "mod",
1239 .func = ftrace_mod_callback,
1240};
1241
1242static int __init ftrace_mod_cmd_init(void)
1243{
1244 return register_ftrace_command(&ftrace_mod_cmd);
1245}
1246device_initcall(ftrace_mod_cmd_init);
1247
1248static LIST_HEAD(ftrace_commands);
1249static DEFINE_MUTEX(ftrace_cmd_mutex);
1250
1251int register_ftrace_command(struct ftrace_func_command *cmd)
1252{
1253 struct ftrace_func_command *p;
1254 int ret = 0;
1255
1256 mutex_lock(&ftrace_cmd_mutex);
1257 list_for_each_entry(p, &ftrace_commands, list) {
1258 if (strcmp(cmd->name, p->name) == 0) {
1259 ret = -EBUSY;
1260 goto out_unlock;
1261 }
1262 }
1263 list_add(&cmd->list, &ftrace_commands);
1264 out_unlock:
1265 mutex_unlock(&ftrace_cmd_mutex);
1266
1267 return ret;
1268}
1269
1270int unregister_ftrace_command(struct ftrace_func_command *cmd)
1271{
1272 struct ftrace_func_command *p, *n;
1273 int ret = -ENODEV;
1274
1275 mutex_lock(&ftrace_cmd_mutex);
1276 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1277 if (strcmp(cmd->name, p->name) == 0) {
1278 ret = 0;
1279 list_del_init(&p->list);
1280 goto out_unlock;
1281 }
1282 }
1283 out_unlock:
1284 mutex_unlock(&ftrace_cmd_mutex);
1285
1286 return ret;
1287}
1288
64e7c440
SR
1289static int ftrace_process_regex(char *buff, int len, int enable)
1290{
f6180773
SR
1291 struct ftrace_func_command *p;
1292 char *func, *command, *next = buff;
1293 int ret = -EINVAL;
64e7c440
SR
1294
1295 func = strsep(&next, ":");
1296
1297 if (!next) {
1298 ftrace_match_records(func, len, enable);
1299 return 0;
1300 }
1301
f6180773 1302 /* command found */
64e7c440
SR
1303
1304 command = strsep(&next, ":");
1305
f6180773
SR
1306 mutex_lock(&ftrace_cmd_mutex);
1307 list_for_each_entry(p, &ftrace_commands, list) {
1308 if (strcmp(p->name, command) == 0) {
1309 ret = p->func(func, command, next, enable);
1310 goto out_unlock;
1311 }
64e7c440 1312 }
f6180773
SR
1313 out_unlock:
1314 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 1315
f6180773 1316 return ret;
64e7c440
SR
1317}
1318
e309b41d 1319static ssize_t
41c52c0d
SR
1320ftrace_regex_write(struct file *file, const char __user *ubuf,
1321 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1322{
1323 struct ftrace_iterator *iter;
1324 char ch;
1325 size_t read = 0;
1326 ssize_t ret;
1327
1328 if (!cnt || cnt < 0)
1329 return 0;
1330
41c52c0d 1331 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1332
1333 if (file->f_mode & FMODE_READ) {
1334 struct seq_file *m = file->private_data;
1335 iter = m->private;
1336 } else
1337 iter = file->private_data;
1338
1339 if (!*ppos) {
1340 iter->flags &= ~FTRACE_ITER_CONT;
1341 iter->buffer_idx = 0;
1342 }
1343
1344 ret = get_user(ch, ubuf++);
1345 if (ret)
1346 goto out;
1347 read++;
1348 cnt--;
1349
1350 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1351 /* skip white space */
1352 while (cnt && isspace(ch)) {
1353 ret = get_user(ch, ubuf++);
1354 if (ret)
1355 goto out;
1356 read++;
1357 cnt--;
1358 }
1359
5072c59f
SR
1360 if (isspace(ch)) {
1361 file->f_pos += read;
1362 ret = read;
1363 goto out;
1364 }
1365
1366 iter->buffer_idx = 0;
1367 }
1368
1369 while (cnt && !isspace(ch)) {
1370 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1371 iter->buffer[iter->buffer_idx++] = ch;
1372 else {
1373 ret = -EINVAL;
1374 goto out;
1375 }
1376 ret = get_user(ch, ubuf++);
1377 if (ret)
1378 goto out;
1379 read++;
1380 cnt--;
1381 }
1382
1383 if (isspace(ch)) {
1384 iter->filtered++;
1385 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
1386 ret = ftrace_process_regex(iter->buffer,
1387 iter->buffer_idx, enable);
1388 if (ret)
1389 goto out;
5072c59f
SR
1390 iter->buffer_idx = 0;
1391 } else
1392 iter->flags |= FTRACE_ITER_CONT;
1393
1394
1395 file->f_pos += read;
1396
1397 ret = read;
1398 out:
41c52c0d 1399 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1400
1401 return ret;
1402}
1403
41c52c0d
SR
1404static ssize_t
1405ftrace_filter_write(struct file *file, const char __user *ubuf,
1406 size_t cnt, loff_t *ppos)
1407{
1408 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1409}
1410
1411static ssize_t
1412ftrace_notrace_write(struct file *file, const char __user *ubuf,
1413 size_t cnt, loff_t *ppos)
1414{
1415 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1416}
1417
1418static void
1419ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1420{
1421 if (unlikely(ftrace_disabled))
1422 return;
1423
1424 mutex_lock(&ftrace_regex_lock);
1425 if (reset)
1426 ftrace_filter_reset(enable);
1427 if (buf)
7f24b31b 1428 ftrace_match_records(buf, len, enable);
41c52c0d
SR
1429 mutex_unlock(&ftrace_regex_lock);
1430}
1431
77a2b37d
SR
1432/**
1433 * ftrace_set_filter - set a function to filter on in ftrace
1434 * @buf - the string that holds the function filter text.
1435 * @len - the length of the string.
1436 * @reset - non zero to reset all filters before applying this filter.
1437 *
1438 * Filters denote which functions should be enabled when tracing is enabled.
1439 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1440 */
e309b41d 1441void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1442{
41c52c0d
SR
1443 ftrace_set_regex(buf, len, reset, 1);
1444}
4eebcc81 1445
41c52c0d
SR
1446/**
1447 * ftrace_set_notrace - set a function to not trace in ftrace
1448 * @buf - the string that holds the function notrace text.
1449 * @len - the length of the string.
1450 * @reset - non zero to reset all filters before applying this filter.
1451 *
1452 * Notrace Filters denote which functions should not be enabled when tracing
1453 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1454 * for tracing.
1455 */
1456void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1457{
1458 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1459}
1460
e309b41d 1461static int
41c52c0d 1462ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1463{
1464 struct seq_file *m = (struct seq_file *)file->private_data;
1465 struct ftrace_iterator *iter;
1466
41c52c0d 1467 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1468 if (file->f_mode & FMODE_READ) {
1469 iter = m->private;
1470
1471 seq_release(inode, file);
1472 } else
1473 iter = file->private_data;
1474
1475 if (iter->buffer_idx) {
1476 iter->filtered++;
1477 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 1478 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1479 }
1480
e6ea44e9 1481 mutex_lock(&ftrace_lock);
ee02a2e5 1482 if (ftrace_start_up && ftrace_enabled)
5072c59f 1483 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 1484 mutex_unlock(&ftrace_lock);
5072c59f
SR
1485
1486 kfree(iter);
41c52c0d 1487 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1488 return 0;
1489}
1490
41c52c0d
SR
1491static int
1492ftrace_filter_release(struct inode *inode, struct file *file)
1493{
1494 return ftrace_regex_release(inode, file, 1);
1495}
1496
1497static int
1498ftrace_notrace_release(struct inode *inode, struct file *file)
1499{
1500 return ftrace_regex_release(inode, file, 0);
1501}
1502
5072c59f
SR
1503static struct file_operations ftrace_avail_fops = {
1504 .open = ftrace_avail_open,
1505 .read = seq_read,
1506 .llseek = seq_lseek,
1507 .release = ftrace_avail_release,
1508};
1509
eb9a7bf0
AS
1510static struct file_operations ftrace_failures_fops = {
1511 .open = ftrace_failures_open,
1512 .read = seq_read,
1513 .llseek = seq_lseek,
1514 .release = ftrace_avail_release,
1515};
1516
5072c59f
SR
1517static struct file_operations ftrace_filter_fops = {
1518 .open = ftrace_filter_open,
41c52c0d 1519 .read = ftrace_regex_read,
5072c59f 1520 .write = ftrace_filter_write,
41c52c0d 1521 .llseek = ftrace_regex_lseek,
5072c59f
SR
1522 .release = ftrace_filter_release,
1523};
1524
41c52c0d
SR
1525static struct file_operations ftrace_notrace_fops = {
1526 .open = ftrace_notrace_open,
1527 .read = ftrace_regex_read,
1528 .write = ftrace_notrace_write,
1529 .llseek = ftrace_regex_lseek,
1530 .release = ftrace_notrace_release,
1531};
1532
ea4e2bc4
SR
1533#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1534
1535static DEFINE_MUTEX(graph_lock);
1536
1537int ftrace_graph_count;
1538unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1539
1540static void *
1541g_next(struct seq_file *m, void *v, loff_t *pos)
1542{
1543 unsigned long *array = m->private;
1544 int index = *pos;
1545
1546 (*pos)++;
1547
1548 if (index >= ftrace_graph_count)
1549 return NULL;
1550
1551 return &array[index];
1552}
1553
1554static void *g_start(struct seq_file *m, loff_t *pos)
1555{
1556 void *p = NULL;
1557
1558 mutex_lock(&graph_lock);
1559
1560 p = g_next(m, p, pos);
1561
1562 return p;
1563}
1564
1565static void g_stop(struct seq_file *m, void *p)
1566{
1567 mutex_unlock(&graph_lock);
1568}
1569
1570static int g_show(struct seq_file *m, void *v)
1571{
1572 unsigned long *ptr = v;
1573 char str[KSYM_SYMBOL_LEN];
1574
1575 if (!ptr)
1576 return 0;
1577
1578 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1579
1580 seq_printf(m, "%s\n", str);
1581
1582 return 0;
1583}
1584
1585static struct seq_operations ftrace_graph_seq_ops = {
1586 .start = g_start,
1587 .next = g_next,
1588 .stop = g_stop,
1589 .show = g_show,
1590};
1591
1592static int
1593ftrace_graph_open(struct inode *inode, struct file *file)
1594{
1595 int ret = 0;
1596
1597 if (unlikely(ftrace_disabled))
1598 return -ENODEV;
1599
1600 mutex_lock(&graph_lock);
1601 if ((file->f_mode & FMODE_WRITE) &&
1602 !(file->f_flags & O_APPEND)) {
1603 ftrace_graph_count = 0;
1604 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1605 }
1606
1607 if (file->f_mode & FMODE_READ) {
1608 ret = seq_open(file, &ftrace_graph_seq_ops);
1609 if (!ret) {
1610 struct seq_file *m = file->private_data;
1611 m->private = ftrace_graph_funcs;
1612 }
1613 } else
1614 file->private_data = ftrace_graph_funcs;
1615 mutex_unlock(&graph_lock);
1616
1617 return ret;
1618}
1619
1620static ssize_t
1621ftrace_graph_read(struct file *file, char __user *ubuf,
1622 size_t cnt, loff_t *ppos)
1623{
1624 if (file->f_mode & FMODE_READ)
1625 return seq_read(file, ubuf, cnt, ppos);
1626 else
1627 return -EPERM;
1628}
1629
1630static int
1631ftrace_set_func(unsigned long *array, int idx, char *buffer)
1632{
1633 char str[KSYM_SYMBOL_LEN];
1634 struct dyn_ftrace *rec;
1635 struct ftrace_page *pg;
1636 int found = 0;
265c831c 1637 int j;
ea4e2bc4
SR
1638
1639 if (ftrace_disabled)
1640 return -ENODEV;
1641
52baf119 1642 mutex_lock(&ftrace_lock);
265c831c
SR
1643 do_for_each_ftrace_rec(pg, rec) {
1644
1645 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1646 continue;
1647
1648 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1649 if (strcmp(str, buffer) == 0) {
1650 /* Return 1 if we add it to the array */
1651 found = 1;
1652 for (j = 0; j < idx; j++)
1653 if (array[j] == rec->ip) {
1654 found = 0;
1655 break;
1656 }
1657 if (found)
1658 array[idx] = rec->ip;
1659 goto out;
ea4e2bc4 1660 }
265c831c
SR
1661 } while_for_each_ftrace_rec();
1662 out:
52baf119 1663 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
1664
1665 return found ? 0 : -EINVAL;
1666}
1667
1668static ssize_t
1669ftrace_graph_write(struct file *file, const char __user *ubuf,
1670 size_t cnt, loff_t *ppos)
1671{
1672 unsigned char buffer[FTRACE_BUFF_MAX+1];
1673 unsigned long *array;
1674 size_t read = 0;
1675 ssize_t ret;
1676 int index = 0;
1677 char ch;
1678
1679 if (!cnt || cnt < 0)
1680 return 0;
1681
1682 mutex_lock(&graph_lock);
1683
1684 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1685 ret = -EBUSY;
1686 goto out;
1687 }
1688
1689 if (file->f_mode & FMODE_READ) {
1690 struct seq_file *m = file->private_data;
1691 array = m->private;
1692 } else
1693 array = file->private_data;
1694
1695 ret = get_user(ch, ubuf++);
1696 if (ret)
1697 goto out;
1698 read++;
1699 cnt--;
1700
1701 /* skip white space */
1702 while (cnt && isspace(ch)) {
1703 ret = get_user(ch, ubuf++);
1704 if (ret)
1705 goto out;
1706 read++;
1707 cnt--;
1708 }
1709
1710 if (isspace(ch)) {
1711 *ppos += read;
1712 ret = read;
1713 goto out;
1714 }
1715
1716 while (cnt && !isspace(ch)) {
1717 if (index < FTRACE_BUFF_MAX)
1718 buffer[index++] = ch;
1719 else {
1720 ret = -EINVAL;
1721 goto out;
1722 }
1723 ret = get_user(ch, ubuf++);
1724 if (ret)
1725 goto out;
1726 read++;
1727 cnt--;
1728 }
1729 buffer[index] = 0;
1730
1731 /* we allow only one at a time */
1732 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1733 if (ret)
1734 goto out;
1735
1736 ftrace_graph_count++;
1737
1738 file->f_pos += read;
1739
1740 ret = read;
1741 out:
1742 mutex_unlock(&graph_lock);
1743
1744 return ret;
1745}
1746
1747static const struct file_operations ftrace_graph_fops = {
1748 .open = ftrace_graph_open,
1749 .read = ftrace_graph_read,
1750 .write = ftrace_graph_write,
1751};
1752#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1753
df4fc315 1754static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 1755{
5072c59f
SR
1756 struct dentry *entry;
1757
5072c59f
SR
1758 entry = debugfs_create_file("available_filter_functions", 0444,
1759 d_tracer, NULL, &ftrace_avail_fops);
1760 if (!entry)
1761 pr_warning("Could not create debugfs "
1762 "'available_filter_functions' entry\n");
1763
eb9a7bf0
AS
1764 entry = debugfs_create_file("failures", 0444,
1765 d_tracer, NULL, &ftrace_failures_fops);
1766 if (!entry)
1767 pr_warning("Could not create debugfs 'failures' entry\n");
1768
5072c59f
SR
1769 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1770 NULL, &ftrace_filter_fops);
1771 if (!entry)
1772 pr_warning("Could not create debugfs "
1773 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1774
1775 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1776 NULL, &ftrace_notrace_fops);
1777 if (!entry)
1778 pr_warning("Could not create debugfs "
1779 "'set_ftrace_notrace' entry\n");
ad90c0e3 1780
ea4e2bc4
SR
1781#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1782 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1783 NULL,
1784 &ftrace_graph_fops);
1785 if (!entry)
1786 pr_warning("Could not create debugfs "
1787 "'set_graph_function' entry\n");
1788#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1789
5072c59f
SR
1790 return 0;
1791}
1792
31e88909
SR
1793static int ftrace_convert_nops(struct module *mod,
1794 unsigned long *start,
68bf21aa
SR
1795 unsigned long *end)
1796{
1797 unsigned long *p;
1798 unsigned long addr;
1799 unsigned long flags;
1800
e6ea44e9 1801 mutex_lock(&ftrace_lock);
68bf21aa
SR
1802 p = start;
1803 while (p < end) {
1804 addr = ftrace_call_adjust(*p++);
20e5227e
SR
1805 /*
1806 * Some architecture linkers will pad between
1807 * the different mcount_loc sections of different
1808 * object files to satisfy alignments.
1809 * Skip any NULL pointers.
1810 */
1811 if (!addr)
1812 continue;
68bf21aa 1813 ftrace_record_ip(addr);
68bf21aa
SR
1814 }
1815
08f5ac90 1816 /* disable interrupts to prevent kstop machine */
68bf21aa 1817 local_irq_save(flags);
31e88909 1818 ftrace_update_code(mod);
68bf21aa 1819 local_irq_restore(flags);
e6ea44e9 1820 mutex_unlock(&ftrace_lock);
68bf21aa
SR
1821
1822 return 0;
1823}
1824
31e88909
SR
1825void ftrace_init_module(struct module *mod,
1826 unsigned long *start, unsigned long *end)
90d595fe 1827{
00fd61ae 1828 if (ftrace_disabled || start == end)
fed1939c 1829 return;
31e88909 1830 ftrace_convert_nops(mod, start, end);
90d595fe
SR
1831}
1832
68bf21aa
SR
1833extern unsigned long __start_mcount_loc[];
1834extern unsigned long __stop_mcount_loc[];
1835
1836void __init ftrace_init(void)
1837{
1838 unsigned long count, addr, flags;
1839 int ret;
1840
1841 /* Keep the ftrace pointer to the stub */
1842 addr = (unsigned long)ftrace_stub;
1843
1844 local_irq_save(flags);
1845 ftrace_dyn_arch_init(&addr);
1846 local_irq_restore(flags);
1847
1848 /* ftrace_dyn_arch_init places the return code in addr */
1849 if (addr)
1850 goto failed;
1851
1852 count = __stop_mcount_loc - __start_mcount_loc;
1853
1854 ret = ftrace_dyn_table_alloc(count);
1855 if (ret)
1856 goto failed;
1857
1858 last_ftrace_enabled = ftrace_enabled = 1;
1859
31e88909
SR
1860 ret = ftrace_convert_nops(NULL,
1861 __start_mcount_loc,
68bf21aa
SR
1862 __stop_mcount_loc);
1863
1864 return;
1865 failed:
1866 ftrace_disabled = 1;
1867}
68bf21aa 1868
3d083395 1869#else
0b6e4d56
FW
1870
1871static int __init ftrace_nodyn_init(void)
1872{
1873 ftrace_enabled = 1;
1874 return 0;
1875}
1876device_initcall(ftrace_nodyn_init);
1877
df4fc315
SR
1878static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1879static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
1880/* Keep as macros so we do not need to define the commands */
1881# define ftrace_startup(command) do { } while (0)
1882# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
1883# define ftrace_startup_sysctl() do { } while (0)
1884# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1885#endif /* CONFIG_DYNAMIC_FTRACE */
1886
df4fc315
SR
1887static ssize_t
1888ftrace_pid_read(struct file *file, char __user *ubuf,
1889 size_t cnt, loff_t *ppos)
1890{
1891 char buf[64];
1892 int r;
1893
e32d8956
SR
1894 if (ftrace_pid_trace == ftrace_swapper_pid)
1895 r = sprintf(buf, "swapper tasks\n");
1896 else if (ftrace_pid_trace)
978f3a45 1897 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
1898 else
1899 r = sprintf(buf, "no pid\n");
1900
1901 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1902}
1903
e32d8956 1904static void clear_ftrace_swapper(void)
978f3a45
SR
1905{
1906 struct task_struct *p;
e32d8956 1907 int cpu;
978f3a45 1908
e32d8956
SR
1909 get_online_cpus();
1910 for_each_online_cpu(cpu) {
1911 p = idle_task(cpu);
978f3a45 1912 clear_tsk_trace_trace(p);
e32d8956
SR
1913 }
1914 put_online_cpus();
1915}
978f3a45 1916
e32d8956
SR
1917static void set_ftrace_swapper(void)
1918{
1919 struct task_struct *p;
1920 int cpu;
1921
1922 get_online_cpus();
1923 for_each_online_cpu(cpu) {
1924 p = idle_task(cpu);
1925 set_tsk_trace_trace(p);
1926 }
1927 put_online_cpus();
978f3a45
SR
1928}
1929
e32d8956
SR
1930static void clear_ftrace_pid(struct pid *pid)
1931{
1932 struct task_struct *p;
1933
229c4ef8 1934 rcu_read_lock();
e32d8956
SR
1935 do_each_pid_task(pid, PIDTYPE_PID, p) {
1936 clear_tsk_trace_trace(p);
1937 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
1938 rcu_read_unlock();
1939
e32d8956
SR
1940 put_pid(pid);
1941}
1942
1943static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
1944{
1945 struct task_struct *p;
1946
229c4ef8 1947 rcu_read_lock();
978f3a45
SR
1948 do_each_pid_task(pid, PIDTYPE_PID, p) {
1949 set_tsk_trace_trace(p);
1950 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 1951 rcu_read_unlock();
978f3a45
SR
1952}
1953
e32d8956
SR
1954static void clear_ftrace_pid_task(struct pid **pid)
1955{
1956 if (*pid == ftrace_swapper_pid)
1957 clear_ftrace_swapper();
1958 else
1959 clear_ftrace_pid(*pid);
1960
1961 *pid = NULL;
1962}
1963
1964static void set_ftrace_pid_task(struct pid *pid)
1965{
1966 if (pid == ftrace_swapper_pid)
1967 set_ftrace_swapper();
1968 else
1969 set_ftrace_pid(pid);
1970}
1971
df4fc315
SR
1972static ssize_t
1973ftrace_pid_write(struct file *filp, const char __user *ubuf,
1974 size_t cnt, loff_t *ppos)
1975{
978f3a45 1976 struct pid *pid;
df4fc315
SR
1977 char buf[64];
1978 long val;
1979 int ret;
1980
1981 if (cnt >= sizeof(buf))
1982 return -EINVAL;
1983
1984 if (copy_from_user(&buf, ubuf, cnt))
1985 return -EFAULT;
1986
1987 buf[cnt] = 0;
1988
1989 ret = strict_strtol(buf, 10, &val);
1990 if (ret < 0)
1991 return ret;
1992
e6ea44e9 1993 mutex_lock(&ftrace_lock);
978f3a45 1994 if (val < 0) {
df4fc315 1995 /* disable pid tracing */
978f3a45 1996 if (!ftrace_pid_trace)
df4fc315 1997 goto out;
978f3a45
SR
1998
1999 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2000
2001 } else {
e32d8956
SR
2002 /* swapper task is special */
2003 if (!val) {
2004 pid = ftrace_swapper_pid;
2005 if (pid == ftrace_pid_trace)
2006 goto out;
2007 } else {
2008 pid = find_get_pid(val);
df4fc315 2009
e32d8956
SR
2010 if (pid == ftrace_pid_trace) {
2011 put_pid(pid);
2012 goto out;
2013 }
0ef8cde5 2014 }
0ef8cde5 2015
978f3a45
SR
2016 if (ftrace_pid_trace)
2017 clear_ftrace_pid_task(&ftrace_pid_trace);
2018
2019 if (!pid)
2020 goto out;
2021
2022 ftrace_pid_trace = pid;
2023
2024 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2025 }
2026
2027 /* update the function call */
2028 ftrace_update_pid_func();
2029 ftrace_startup_enable(0);
2030
2031 out:
e6ea44e9 2032 mutex_unlock(&ftrace_lock);
df4fc315
SR
2033
2034 return cnt;
2035}
2036
2037static struct file_operations ftrace_pid_fops = {
2038 .read = ftrace_pid_read,
2039 .write = ftrace_pid_write,
2040};
2041
2042static __init int ftrace_init_debugfs(void)
2043{
2044 struct dentry *d_tracer;
2045 struct dentry *entry;
2046
2047 d_tracer = tracing_init_dentry();
2048 if (!d_tracer)
2049 return 0;
2050
2051 ftrace_init_dyn_debugfs(d_tracer);
2052
2053 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2054 NULL, &ftrace_pid_fops);
2055 if (!entry)
2056 pr_warning("Could not create debugfs "
2057 "'set_ftrace_pid' entry\n");
2058 return 0;
2059}
2060
2061fs_initcall(ftrace_init_debugfs);
2062
a2bb6a3d 2063/**
81adbdc0 2064 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2065 *
2066 * This function should be used by panic code. It stops ftrace
2067 * but in a not so nice way. If you need to simply kill ftrace
2068 * from a non-atomic section, use ftrace_kill.
2069 */
81adbdc0 2070void ftrace_kill(void)
a2bb6a3d
SR
2071{
2072 ftrace_disabled = 1;
2073 ftrace_enabled = 0;
a2bb6a3d
SR
2074 clear_ftrace_function();
2075}
2076
16444a8a 2077/**
3d083395
SR
2078 * register_ftrace_function - register a function for profiling
2079 * @ops - ops structure that holds the function for profiling.
16444a8a 2080 *
3d083395
SR
2081 * Register a function to be called by all functions in the
2082 * kernel.
2083 *
2084 * Note: @ops->func and all the functions it calls must be labeled
2085 * with "notrace", otherwise it will go into a
2086 * recursive loop.
16444a8a 2087 */
3d083395 2088int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2089{
b0fc494f
SR
2090 int ret;
2091
4eebcc81
SR
2092 if (unlikely(ftrace_disabled))
2093 return -1;
2094
e6ea44e9 2095 mutex_lock(&ftrace_lock);
e7d3737e 2096
b0fc494f 2097 ret = __register_ftrace_function(ops);
5a45cfe1 2098 ftrace_startup(0);
b0fc494f 2099
e6ea44e9 2100 mutex_unlock(&ftrace_lock);
b0fc494f 2101 return ret;
3d083395
SR
2102}
2103
2104/**
32632920 2105 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2106 * @ops - ops structure that holds the function to unregister
2107 *
2108 * Unregister a function that was added to be called by ftrace profiling.
2109 */
2110int unregister_ftrace_function(struct ftrace_ops *ops)
2111{
2112 int ret;
2113
e6ea44e9 2114 mutex_lock(&ftrace_lock);
3d083395 2115 ret = __unregister_ftrace_function(ops);
5a45cfe1 2116 ftrace_shutdown(0);
e6ea44e9 2117 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2118
2119 return ret;
2120}
2121
e309b41d 2122int
b0fc494f 2123ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2124 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2125 loff_t *ppos)
2126{
2127 int ret;
2128
4eebcc81
SR
2129 if (unlikely(ftrace_disabled))
2130 return -ENODEV;
2131
e6ea44e9 2132 mutex_lock(&ftrace_lock);
b0fc494f 2133
5072c59f 2134 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2135
2136 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2137 goto out;
2138
2139 last_ftrace_enabled = ftrace_enabled;
2140
2141 if (ftrace_enabled) {
2142
2143 ftrace_startup_sysctl();
2144
2145 /* we are starting ftrace again */
2146 if (ftrace_list != &ftrace_list_end) {
2147 if (ftrace_list->next == &ftrace_list_end)
2148 ftrace_trace_function = ftrace_list->func;
2149 else
2150 ftrace_trace_function = ftrace_list_func;
2151 }
2152
2153 } else {
2154 /* stopping ftrace calls (just send to ftrace_stub) */
2155 ftrace_trace_function = ftrace_stub;
2156
2157 ftrace_shutdown_sysctl();
2158 }
2159
2160 out:
e6ea44e9 2161 mutex_unlock(&ftrace_lock);
3d083395 2162 return ret;
16444a8a 2163}
f17845e5 2164
fb52607a 2165#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2166
287b6e68 2167static atomic_t ftrace_graph_active;
4a2b8dda 2168static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2169
e49dc19c
SR
2170int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2171{
2172 return 0;
2173}
2174
287b6e68
FW
2175/* The callbacks that hook a function */
2176trace_func_graph_ret_t ftrace_graph_return =
2177 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2178trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2179
2180/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2181static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2182{
2183 int i;
2184 int ret = 0;
2185 unsigned long flags;
2186 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2187 struct task_struct *g, *t;
2188
2189 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2190 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2191 * sizeof(struct ftrace_ret_stack),
2192 GFP_KERNEL);
2193 if (!ret_stack_list[i]) {
2194 start = 0;
2195 end = i;
2196 ret = -ENOMEM;
2197 goto free;
2198 }
2199 }
2200
2201 read_lock_irqsave(&tasklist_lock, flags);
2202 do_each_thread(g, t) {
2203 if (start == end) {
2204 ret = -EAGAIN;
2205 goto unlock;
2206 }
2207
2208 if (t->ret_stack == NULL) {
f201ae23 2209 t->curr_ret_stack = -1;
48d68b20
FW
2210 /* Make sure IRQs see the -1 first: */
2211 barrier();
2212 t->ret_stack = ret_stack_list[start++];
380c4b14 2213 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2214 atomic_set(&t->trace_overrun, 0);
2215 }
2216 } while_each_thread(g, t);
2217
2218unlock:
2219 read_unlock_irqrestore(&tasklist_lock, flags);
2220free:
2221 for (i = start; i < end; i++)
2222 kfree(ret_stack_list[i]);
2223 return ret;
2224}
2225
2226/* Allocate a return stack for each task */
fb52607a 2227static int start_graph_tracing(void)
f201ae23
FW
2228{
2229 struct ftrace_ret_stack **ret_stack_list;
2230 int ret;
2231
2232 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2233 sizeof(struct ftrace_ret_stack *),
2234 GFP_KERNEL);
2235
2236 if (!ret_stack_list)
2237 return -ENOMEM;
2238
2239 do {
2240 ret = alloc_retstack_tasklist(ret_stack_list);
2241 } while (ret == -EAGAIN);
2242
2243 kfree(ret_stack_list);
2244 return ret;
2245}
2246
4a2b8dda
FW
2247/*
2248 * Hibernation protection.
2249 * The state of the current task is too much unstable during
2250 * suspend/restore to disk. We want to protect against that.
2251 */
2252static int
2253ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2254 void *unused)
2255{
2256 switch (state) {
2257 case PM_HIBERNATION_PREPARE:
2258 pause_graph_tracing();
2259 break;
2260
2261 case PM_POST_HIBERNATION:
2262 unpause_graph_tracing();
2263 break;
2264 }
2265 return NOTIFY_DONE;
2266}
2267
287b6e68
FW
2268int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2269 trace_func_graph_ent_t entryfunc)
15e6cb36 2270{
e7d3737e
FW
2271 int ret = 0;
2272
e6ea44e9 2273 mutex_lock(&ftrace_lock);
e7d3737e 2274
4a2b8dda
FW
2275 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2276 register_pm_notifier(&ftrace_suspend_notifier);
2277
287b6e68 2278 atomic_inc(&ftrace_graph_active);
fb52607a 2279 ret = start_graph_tracing();
f201ae23 2280 if (ret) {
287b6e68 2281 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2282 goto out;
2283 }
e53a6319 2284
287b6e68
FW
2285 ftrace_graph_return = retfunc;
2286 ftrace_graph_entry = entryfunc;
e53a6319 2287
5a45cfe1 2288 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2289
2290out:
e6ea44e9 2291 mutex_unlock(&ftrace_lock);
e7d3737e 2292 return ret;
15e6cb36
FW
2293}
2294
fb52607a 2295void unregister_ftrace_graph(void)
15e6cb36 2296{
e6ea44e9 2297 mutex_lock(&ftrace_lock);
e7d3737e 2298
287b6e68
FW
2299 atomic_dec(&ftrace_graph_active);
2300 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2301 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2302 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 2303 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 2304
e6ea44e9 2305 mutex_unlock(&ftrace_lock);
15e6cb36 2306}
f201ae23
FW
2307
2308/* Allocate a return stack for newly created task */
fb52607a 2309void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2310{
287b6e68 2311 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2312 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2313 * sizeof(struct ftrace_ret_stack),
2314 GFP_KERNEL);
2315 if (!t->ret_stack)
2316 return;
2317 t->curr_ret_stack = -1;
380c4b14 2318 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2319 atomic_set(&t->trace_overrun, 0);
2320 } else
2321 t->ret_stack = NULL;
2322}
2323
fb52607a 2324void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2325{
eae849ca
FW
2326 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2327
f201ae23 2328 t->ret_stack = NULL;
eae849ca
FW
2329 /* NULL must become visible to IRQs before we free it: */
2330 barrier();
2331
2332 kfree(ret_stack);
f201ae23 2333}
14a866c5
SR
2334
2335void ftrace_graph_stop(void)
2336{
2337 ftrace_stop();
2338}
15e6cb36
FW
2339#endif
2340