]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/ftrace.c
ftrace: show selected functions in set_ftrace_filter
[mirror_ubuntu-artful-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
395a59d0
AS
32#include <asm/ftrace.h>
33
3d083395 34#include "trace.h"
16444a8a 35
6912896e
SR
36#define FTRACE_WARN_ON(cond) \
37 do { \
38 if (WARN_ON(cond)) \
39 ftrace_kill(); \
40 } while (0)
41
42#define FTRACE_WARN_ON_ONCE(cond) \
43 do { \
44 if (WARN_ON_ONCE(cond)) \
45 ftrace_kill(); \
46 } while (0)
47
8fc0c701
SR
48/* hash bits for specific function selection */
49#define FTRACE_HASH_BITS 7
50#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
51
4eebcc81
SR
52/* ftrace_enabled is a method to turn ftrace on or off */
53int ftrace_enabled __read_mostly;
d61f82d0 54static int last_ftrace_enabled;
b0fc494f 55
60a7ecf4
SR
56/* Quick disabling of function tracer. */
57int function_trace_stop;
58
4eebcc81
SR
59/*
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
62 */
63static int ftrace_disabled __read_mostly;
64
52baf119 65static DEFINE_MUTEX(ftrace_lock);
b0fc494f 66
16444a8a
ACM
67static struct ftrace_ops ftrace_list_end __read_mostly =
68{
69 .func = ftrace_stub,
70};
71
72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 76
f2252935 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
78{
79 struct ftrace_ops *op = ftrace_list;
80
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
83
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
89 };
90}
91
df4fc315
SR
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
0ef8cde5 94 if (!test_tsk_trace_trace(current))
df4fc315
SR
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
16444a8a 107/**
3d083395 108 * clear_ftrace_function - reset the ftrace function
16444a8a 109 *
3d083395
SR
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
16444a8a 112 */
3d083395 113void clear_ftrace_function(void)
16444a8a 114{
3d083395 115 ftrace_trace_function = ftrace_stub;
60a7ecf4 116 __ftrace_trace_function = ftrace_stub;
df4fc315 117 ftrace_pid_function = ftrace_stub;
3d083395
SR
118}
119
60a7ecf4
SR
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
e309b41d 134static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 135{
16444a8a
ACM
136 ops->next = ftrace_list;
137 /*
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
142 */
143 smp_wmb();
144 ftrace_list = ops;
3d083395 145
b0fc494f 146 if (ftrace_enabled) {
df4fc315
SR
147 ftrace_func_t func;
148
149 if (ops->next == &ftrace_list_end)
150 func = ops->func;
151 else
152 func = ftrace_list_func;
153
978f3a45 154 if (ftrace_pid_trace) {
df4fc315
SR
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
157 }
158
b0fc494f
SR
159 /*
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
162 */
60a7ecf4 163#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 164 ftrace_trace_function = func;
60a7ecf4 165#else
df4fc315 166 __ftrace_trace_function = func;
60a7ecf4
SR
167 ftrace_trace_function = ftrace_test_stop_func;
168#endif
b0fc494f 169 }
3d083395 170
16444a8a
ACM
171 return 0;
172}
173
e309b41d 174static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 175{
16444a8a 176 struct ftrace_ops **p;
16444a8a
ACM
177
178 /*
3d083395
SR
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
16444a8a
ACM
181 */
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
e6ea44e9 185 return 0;
16444a8a
ACM
186 }
187
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189 if (*p == ops)
190 break;
191
e6ea44e9
SR
192 if (*p != ops)
193 return -1;
16444a8a
ACM
194
195 *p = (*p)->next;
196
b0fc494f
SR
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
df4fc315
SR
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
201
978f3a45 202 if (ftrace_pid_trace) {
df4fc315
SR
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
205 }
206#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
208#else
209 __ftrace_trace_function = func;
210#endif
211 }
b0fc494f 212 }
16444a8a 213
e6ea44e9 214 return 0;
3d083395
SR
215}
216
df4fc315
SR
217static void ftrace_update_pid_func(void)
218{
219 ftrace_func_t func;
220
52baf119 221 mutex_lock(&ftrace_lock);
df4fc315
SR
222
223 if (ftrace_trace_function == ftrace_stub)
224 goto out;
225
226 func = ftrace_trace_function;
227
978f3a45 228 if (ftrace_pid_trace) {
df4fc315
SR
229 set_ftrace_pid_function(func);
230 func = ftrace_pid_func;
231 } else {
66eafebc
LW
232 if (func == ftrace_pid_func)
233 func = ftrace_pid_function;
df4fc315
SR
234 }
235
236#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
237 ftrace_trace_function = func;
238#else
239 __ftrace_trace_function = func;
240#endif
241
242 out:
52baf119 243 mutex_unlock(&ftrace_lock);
df4fc315
SR
244}
245
3d083395 246#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 247#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 248# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
249#endif
250
8fc0c701
SR
251/* set when tracing only a pid */
252struct pid *ftrace_pid_trace;
253static struct pid * const ftrace_swapper_pid = &init_struct_pid;
254static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
255
256struct ftrace_func_hook {
257 struct hlist_node node;
258 struct ftrace_hook_ops *ops;
259 unsigned long flags;
260 unsigned long ip;
261 void *data;
262 struct rcu_head rcu;
263};
264
265
d61f82d0
SR
266enum {
267 FTRACE_ENABLE_CALLS = (1 << 0),
268 FTRACE_DISABLE_CALLS = (1 << 1),
269 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
270 FTRACE_ENABLE_MCOUNT = (1 << 3),
271 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
272 FTRACE_START_FUNC_RET = (1 << 5),
273 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
274};
275
5072c59f
SR
276static int ftrace_filtered;
277
08f5ac90 278static LIST_HEAD(ftrace_new_addrs);
3d083395 279
41c52c0d 280static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 281
3c1720f0
SR
282struct ftrace_page {
283 struct ftrace_page *next;
431aa3fb 284 int index;
3c1720f0 285 struct dyn_ftrace records[];
aa5e5cea 286};
3c1720f0
SR
287
288#define ENTRIES_PER_PAGE \
289 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
290
291/* estimate from running different kernels */
292#define NR_TO_INIT 10000
293
294static struct ftrace_page *ftrace_pages_start;
295static struct ftrace_page *ftrace_pages;
296
37ad5084
SR
297static struct dyn_ftrace *ftrace_free_records;
298
265c831c
SR
299/*
300 * This is a double for. Do not use 'break' to break out of the loop,
301 * you must use a goto.
302 */
303#define do_for_each_ftrace_rec(pg, rec) \
304 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
305 int _____i; \
306 for (_____i = 0; _____i < pg->index; _____i++) { \
307 rec = &pg->records[_____i];
308
309#define while_for_each_ftrace_rec() \
310 } \
311 }
ecea656d
AS
312
313#ifdef CONFIG_KPROBES
f17845e5
IM
314
315static int frozen_record_count;
316
ecea656d
AS
317static inline void freeze_record(struct dyn_ftrace *rec)
318{
319 if (!(rec->flags & FTRACE_FL_FROZEN)) {
320 rec->flags |= FTRACE_FL_FROZEN;
321 frozen_record_count++;
322 }
323}
324
325static inline void unfreeze_record(struct dyn_ftrace *rec)
326{
327 if (rec->flags & FTRACE_FL_FROZEN) {
328 rec->flags &= ~FTRACE_FL_FROZEN;
329 frozen_record_count--;
330 }
331}
332
333static inline int record_frozen(struct dyn_ftrace *rec)
334{
335 return rec->flags & FTRACE_FL_FROZEN;
336}
337#else
338# define freeze_record(rec) ({ 0; })
339# define unfreeze_record(rec) ({ 0; })
340# define record_frozen(rec) ({ 0; })
341#endif /* CONFIG_KPROBES */
342
e309b41d 343static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 344{
37ad5084
SR
345 rec->ip = (unsigned long)ftrace_free_records;
346 ftrace_free_records = rec;
347 rec->flags |= FTRACE_FL_FREE;
348}
349
fed1939c
SR
350void ftrace_release(void *start, unsigned long size)
351{
352 struct dyn_ftrace *rec;
353 struct ftrace_page *pg;
354 unsigned long s = (unsigned long)start;
355 unsigned long e = s + size;
fed1939c 356
00fd61ae 357 if (ftrace_disabled || !start)
fed1939c
SR
358 return;
359
52baf119 360 mutex_lock(&ftrace_lock);
265c831c
SR
361 do_for_each_ftrace_rec(pg, rec) {
362 if ((rec->ip >= s) && (rec->ip < e))
363 ftrace_free_rec(rec);
364 } while_for_each_ftrace_rec();
52baf119 365 mutex_unlock(&ftrace_lock);
fed1939c
SR
366}
367
e309b41d 368static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 369{
37ad5084
SR
370 struct dyn_ftrace *rec;
371
372 /* First check for freed records */
373 if (ftrace_free_records) {
374 rec = ftrace_free_records;
375
37ad5084 376 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 377 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
378 ftrace_free_records = NULL;
379 return NULL;
380 }
381
382 ftrace_free_records = (void *)rec->ip;
383 memset(rec, 0, sizeof(*rec));
384 return rec;
385 }
386
3c1720f0 387 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
388 if (!ftrace_pages->next) {
389 /* allocate another page */
390 ftrace_pages->next =
391 (void *)get_zeroed_page(GFP_KERNEL);
392 if (!ftrace_pages->next)
393 return NULL;
394 }
3c1720f0
SR
395 ftrace_pages = ftrace_pages->next;
396 }
397
398 return &ftrace_pages->records[ftrace_pages->index++];
399}
400
08f5ac90 401static struct dyn_ftrace *
d61f82d0 402ftrace_record_ip(unsigned long ip)
3d083395 403{
08f5ac90 404 struct dyn_ftrace *rec;
3d083395 405
f3c7ac40 406 if (ftrace_disabled)
08f5ac90 407 return NULL;
3d083395 408
08f5ac90
SR
409 rec = ftrace_alloc_dyn_node(ip);
410 if (!rec)
411 return NULL;
3d083395 412
08f5ac90 413 rec->ip = ip;
3d083395 414
08f5ac90 415 list_add(&rec->list, &ftrace_new_addrs);
3d083395 416
08f5ac90 417 return rec;
3d083395
SR
418}
419
b17e8a37
SR
420static void print_ip_ins(const char *fmt, unsigned char *p)
421{
422 int i;
423
424 printk(KERN_CONT "%s", fmt);
425
426 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
427 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
428}
429
31e88909 430static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
431{
432 switch (failed) {
433 case -EFAULT:
434 FTRACE_WARN_ON_ONCE(1);
435 pr_info("ftrace faulted on modifying ");
436 print_ip_sym(ip);
437 break;
438 case -EINVAL:
439 FTRACE_WARN_ON_ONCE(1);
440 pr_info("ftrace failed to modify ");
441 print_ip_sym(ip);
b17e8a37 442 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
443 printk(KERN_CONT "\n");
444 break;
445 case -EPERM:
446 FTRACE_WARN_ON_ONCE(1);
447 pr_info("ftrace faulted on writing ");
448 print_ip_sym(ip);
449 break;
450 default:
451 FTRACE_WARN_ON_ONCE(1);
452 pr_info("ftrace faulted on unknown error ");
453 print_ip_sym(ip);
454 }
455}
456
3c1720f0 457
0eb96701 458static int
31e88909 459__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 460{
41c52c0d 461 unsigned long ip, fl;
e7d3737e
FW
462 unsigned long ftrace_addr;
463
f0001207 464 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
465
466 ip = rec->ip;
467
982c350b
SR
468 /*
469 * If this record is not to be traced and
470 * it is not enabled then do nothing.
471 *
472 * If this record is not to be traced and
57794a9d 473 * it is enabled then disable it.
982c350b
SR
474 *
475 */
476 if (rec->flags & FTRACE_FL_NOTRACE) {
477 if (rec->flags & FTRACE_FL_ENABLED)
478 rec->flags &= ~FTRACE_FL_ENABLED;
479 else
480 return 0;
481
482 } else if (ftrace_filtered && enable) {
5072c59f 483 /*
982c350b 484 * Filtering is on:
5072c59f 485 */
a4500b84 486
982c350b 487 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 488
982c350b
SR
489 /* Record is filtered and enabled, do nothing */
490 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 491 return 0;
5072c59f 492
57794a9d 493 /* Record is not filtered or enabled, do nothing */
982c350b
SR
494 if (!fl)
495 return 0;
496
497 /* Record is not filtered but enabled, disable it */
498 if (fl == FTRACE_FL_ENABLED)
5072c59f 499 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
500 else
501 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 502 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 503 } else {
982c350b 504 /* Disable or not filtered */
5072c59f 505
41c52c0d 506 if (enable) {
982c350b 507 /* if record is enabled, do nothing */
5072c59f 508 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 509 return 0;
982c350b 510
5072c59f 511 rec->flags |= FTRACE_FL_ENABLED;
982c350b 512
5072c59f 513 } else {
982c350b 514
57794a9d 515 /* if record is not enabled, do nothing */
5072c59f 516 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 517 return 0;
982c350b 518
5072c59f
SR
519 rec->flags &= ~FTRACE_FL_ENABLED;
520 }
521 }
522
982c350b 523 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 524 return ftrace_make_call(rec, ftrace_addr);
31e88909 525 else
e7d3737e 526 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
527}
528
e309b41d 529static void ftrace_replace_code(int enable)
3c1720f0 530{
265c831c 531 int failed;
3c1720f0
SR
532 struct dyn_ftrace *rec;
533 struct ftrace_page *pg;
3c1720f0 534
265c831c
SR
535 do_for_each_ftrace_rec(pg, rec) {
536 /*
537 * Skip over free records and records that have
538 * failed.
539 */
540 if (rec->flags & FTRACE_FL_FREE ||
541 rec->flags & FTRACE_FL_FAILED)
542 continue;
543
544 /* ignore updates to this record's mcount site */
545 if (get_kprobe((void *)rec->ip)) {
546 freeze_record(rec);
547 continue;
548 } else {
549 unfreeze_record(rec);
550 }
f22f9a89 551
265c831c
SR
552 failed = __ftrace_replace_code(rec, enable);
553 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
554 rec->flags |= FTRACE_FL_FAILED;
555 if ((system_state == SYSTEM_BOOTING) ||
556 !core_kernel_text(rec->ip)) {
557 ftrace_free_rec(rec);
558 } else
559 ftrace_bug(failed, rec->ip);
3c1720f0 560 }
265c831c 561 } while_for_each_ftrace_rec();
3c1720f0
SR
562}
563
492a7ea5 564static int
31e88909 565ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
566{
567 unsigned long ip;
593eb8a2 568 int ret;
3c1720f0
SR
569
570 ip = rec->ip;
571
25aac9dc 572 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 573 if (ret) {
31e88909 574 ftrace_bug(ret, ip);
3c1720f0 575 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 576 return 0;
37ad5084 577 }
492a7ea5 578 return 1;
3c1720f0
SR
579}
580
e309b41d 581static int __ftrace_modify_code(void *data)
3d083395 582{
d61f82d0
SR
583 int *command = data;
584
a3583244 585 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 586 ftrace_replace_code(1);
a3583244 587 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
588 ftrace_replace_code(0);
589
590 if (*command & FTRACE_UPDATE_TRACE_FUNC)
591 ftrace_update_ftrace_func(ftrace_trace_function);
592
5a45cfe1
SR
593 if (*command & FTRACE_START_FUNC_RET)
594 ftrace_enable_ftrace_graph_caller();
595 else if (*command & FTRACE_STOP_FUNC_RET)
596 ftrace_disable_ftrace_graph_caller();
597
d61f82d0 598 return 0;
3d083395
SR
599}
600
e309b41d 601static void ftrace_run_update_code(int command)
3d083395 602{
784e2d76 603 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
604}
605
d61f82d0 606static ftrace_func_t saved_ftrace_func;
60a7ecf4 607static int ftrace_start_up;
df4fc315
SR
608
609static void ftrace_startup_enable(int command)
610{
611 if (saved_ftrace_func != ftrace_trace_function) {
612 saved_ftrace_func = ftrace_trace_function;
613 command |= FTRACE_UPDATE_TRACE_FUNC;
614 }
615
616 if (!command || !ftrace_enabled)
617 return;
618
619 ftrace_run_update_code(command);
620}
d61f82d0 621
5a45cfe1 622static void ftrace_startup(int command)
3d083395 623{
4eebcc81
SR
624 if (unlikely(ftrace_disabled))
625 return;
626
60a7ecf4 627 ftrace_start_up++;
982c350b 628 command |= FTRACE_ENABLE_CALLS;
d61f82d0 629
df4fc315 630 ftrace_startup_enable(command);
3d083395
SR
631}
632
5a45cfe1 633static void ftrace_shutdown(int command)
3d083395 634{
4eebcc81
SR
635 if (unlikely(ftrace_disabled))
636 return;
637
60a7ecf4
SR
638 ftrace_start_up--;
639 if (!ftrace_start_up)
d61f82d0 640 command |= FTRACE_DISABLE_CALLS;
3d083395 641
d61f82d0
SR
642 if (saved_ftrace_func != ftrace_trace_function) {
643 saved_ftrace_func = ftrace_trace_function;
644 command |= FTRACE_UPDATE_TRACE_FUNC;
645 }
3d083395 646
d61f82d0 647 if (!command || !ftrace_enabled)
e6ea44e9 648 return;
d61f82d0
SR
649
650 ftrace_run_update_code(command);
3d083395
SR
651}
652
e309b41d 653static void ftrace_startup_sysctl(void)
b0fc494f 654{
d61f82d0
SR
655 int command = FTRACE_ENABLE_MCOUNT;
656
4eebcc81
SR
657 if (unlikely(ftrace_disabled))
658 return;
659
d61f82d0
SR
660 /* Force update next time */
661 saved_ftrace_func = NULL;
60a7ecf4
SR
662 /* ftrace_start_up is true if we want ftrace running */
663 if (ftrace_start_up)
d61f82d0
SR
664 command |= FTRACE_ENABLE_CALLS;
665
666 ftrace_run_update_code(command);
b0fc494f
SR
667}
668
e309b41d 669static void ftrace_shutdown_sysctl(void)
b0fc494f 670{
d61f82d0
SR
671 int command = FTRACE_DISABLE_MCOUNT;
672
4eebcc81
SR
673 if (unlikely(ftrace_disabled))
674 return;
675
60a7ecf4
SR
676 /* ftrace_start_up is true if ftrace is running */
677 if (ftrace_start_up)
d61f82d0
SR
678 command |= FTRACE_DISABLE_CALLS;
679
680 ftrace_run_update_code(command);
b0fc494f
SR
681}
682
3d083395
SR
683static cycle_t ftrace_update_time;
684static unsigned long ftrace_update_cnt;
685unsigned long ftrace_update_tot_cnt;
686
31e88909 687static int ftrace_update_code(struct module *mod)
3d083395 688{
08f5ac90 689 struct dyn_ftrace *p, *t;
f22f9a89 690 cycle_t start, stop;
3d083395 691
750ed1a4 692 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
693 ftrace_update_cnt = 0;
694
08f5ac90 695 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 696
08f5ac90
SR
697 /* If something went wrong, bail without enabling anything */
698 if (unlikely(ftrace_disabled))
699 return -1;
f22f9a89 700
08f5ac90 701 list_del_init(&p->list);
f22f9a89 702
08f5ac90 703 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 704 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
705 p->flags |= FTRACE_FL_CONVERTED;
706 ftrace_update_cnt++;
707 } else
708 ftrace_free_rec(p);
3d083395
SR
709 }
710
750ed1a4 711 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
712 ftrace_update_time = stop - start;
713 ftrace_update_tot_cnt += ftrace_update_cnt;
714
16444a8a
ACM
715 return 0;
716}
717
68bf21aa 718static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
719{
720 struct ftrace_page *pg;
721 int cnt;
722 int i;
3c1720f0
SR
723
724 /* allocate a few pages */
725 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
726 if (!ftrace_pages_start)
727 return -1;
728
729 /*
730 * Allocate a few more pages.
731 *
732 * TODO: have some parser search vmlinux before
733 * final linking to find all calls to ftrace.
734 * Then we can:
735 * a) know how many pages to allocate.
736 * and/or
737 * b) set up the table then.
738 *
739 * The dynamic code is still necessary for
740 * modules.
741 */
742
743 pg = ftrace_pages = ftrace_pages_start;
744
68bf21aa 745 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 746 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 747 num_to_init, cnt + 1);
3c1720f0
SR
748
749 for (i = 0; i < cnt; i++) {
750 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
751
752 /* If we fail, we'll try later anyway */
753 if (!pg->next)
754 break;
755
756 pg = pg->next;
757 }
758
759 return 0;
760}
761
5072c59f
SR
762enum {
763 FTRACE_ITER_FILTER = (1 << 0),
764 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 765 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 766 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 767 FTRACE_ITER_PRINTALL = (1 << 4),
8fc0c701 768 FTRACE_ITER_HASH = (1 << 5),
5072c59f
SR
769};
770
771#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
772
773struct ftrace_iterator {
5072c59f 774 struct ftrace_page *pg;
8fc0c701 775 int hidx;
431aa3fb 776 int idx;
5072c59f
SR
777 unsigned flags;
778 unsigned char buffer[FTRACE_BUFF_MAX+1];
779 unsigned buffer_idx;
780 unsigned filtered;
781};
782
8fc0c701
SR
783static void *
784t_hash_next(struct seq_file *m, void *v, loff_t *pos)
785{
786 struct ftrace_iterator *iter = m->private;
787 struct hlist_node *hnd = v;
788 struct hlist_head *hhd;
789
790 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
791
792 (*pos)++;
793
794 retry:
795 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
796 return NULL;
797
798 hhd = &ftrace_func_hash[iter->hidx];
799
800 if (hlist_empty(hhd)) {
801 iter->hidx++;
802 hnd = NULL;
803 goto retry;
804 }
805
806 if (!hnd)
807 hnd = hhd->first;
808 else {
809 hnd = hnd->next;
810 if (!hnd) {
811 iter->hidx++;
812 goto retry;
813 }
814 }
815
816 return hnd;
817}
818
819static void *t_hash_start(struct seq_file *m, loff_t *pos)
820{
821 struct ftrace_iterator *iter = m->private;
822 void *p = NULL;
823
824 iter->flags |= FTRACE_ITER_HASH;
825
826 return t_hash_next(m, p, pos);
827}
828
829static int t_hash_show(struct seq_file *m, void *v)
830{
831 struct ftrace_func_hook *rec;
832 struct hlist_node *hnd = v;
833 char str[KSYM_SYMBOL_LEN];
834
835 rec = hlist_entry(hnd, struct ftrace_func_hook, node);
836
837 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
838 seq_printf(m, "%s:", str);
839
840 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
841 seq_printf(m, "%s", str);
842
843 if (rec->data)
844 seq_printf(m, ":%p", rec->data);
845 seq_putc(m, '\n');
846
847 return 0;
848}
849
e309b41d 850static void *
5072c59f
SR
851t_next(struct seq_file *m, void *v, loff_t *pos)
852{
853 struct ftrace_iterator *iter = m->private;
854 struct dyn_ftrace *rec = NULL;
855
8fc0c701
SR
856 if (iter->flags & FTRACE_ITER_HASH)
857 return t_hash_next(m, v, pos);
858
5072c59f
SR
859 (*pos)++;
860
0c75a3ed
SR
861 if (iter->flags & FTRACE_ITER_PRINTALL)
862 return NULL;
863
5072c59f
SR
864 retry:
865 if (iter->idx >= iter->pg->index) {
866 if (iter->pg->next) {
867 iter->pg = iter->pg->next;
868 iter->idx = 0;
869 goto retry;
50cdaf08
LW
870 } else {
871 iter->idx = -1;
5072c59f
SR
872 }
873 } else {
874 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
875 if ((rec->flags & FTRACE_FL_FREE) ||
876
877 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
878 (rec->flags & FTRACE_FL_FAILED)) ||
879
880 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 881 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 882
0183fb1c
SR
883 ((iter->flags & FTRACE_ITER_FILTER) &&
884 !(rec->flags & FTRACE_FL_FILTER)) ||
885
41c52c0d
SR
886 ((iter->flags & FTRACE_ITER_NOTRACE) &&
887 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
888 rec = NULL;
889 goto retry;
890 }
891 }
892
5072c59f
SR
893 return rec;
894}
895
896static void *t_start(struct seq_file *m, loff_t *pos)
897{
898 struct ftrace_iterator *iter = m->private;
899 void *p = NULL;
5072c59f 900
8fc0c701 901 mutex_lock(&ftrace_lock);
0c75a3ed
SR
902 /*
903 * For set_ftrace_filter reading, if we have the filter
904 * off, we can short cut and just print out that all
905 * functions are enabled.
906 */
907 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
908 if (*pos > 0)
8fc0c701 909 return t_hash_start(m, pos);
0c75a3ed
SR
910 iter->flags |= FTRACE_ITER_PRINTALL;
911 (*pos)++;
912 return iter;
913 }
914
8fc0c701
SR
915 if (iter->flags & FTRACE_ITER_HASH)
916 return t_hash_start(m, pos);
917
50cdaf08
LW
918 if (*pos > 0) {
919 if (iter->idx < 0)
920 return p;
921 (*pos)--;
922 iter->idx--;
923 }
5821e1b7 924
50cdaf08 925 p = t_next(m, p, pos);
5072c59f 926
8fc0c701
SR
927 if (!p)
928 return t_hash_start(m, pos);
929
5072c59f
SR
930 return p;
931}
932
933static void t_stop(struct seq_file *m, void *p)
934{
8fc0c701 935 mutex_unlock(&ftrace_lock);
5072c59f
SR
936}
937
938static int t_show(struct seq_file *m, void *v)
939{
0c75a3ed 940 struct ftrace_iterator *iter = m->private;
5072c59f
SR
941 struct dyn_ftrace *rec = v;
942 char str[KSYM_SYMBOL_LEN];
943
8fc0c701
SR
944 if (iter->flags & FTRACE_ITER_HASH)
945 return t_hash_show(m, v);
946
0c75a3ed
SR
947 if (iter->flags & FTRACE_ITER_PRINTALL) {
948 seq_printf(m, "#### all functions enabled ####\n");
949 return 0;
950 }
951
5072c59f
SR
952 if (!rec)
953 return 0;
954
955 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
956
50cdaf08 957 seq_printf(m, "%s\n", str);
5072c59f
SR
958
959 return 0;
960}
961
962static struct seq_operations show_ftrace_seq_ops = {
963 .start = t_start,
964 .next = t_next,
965 .stop = t_stop,
966 .show = t_show,
967};
968
e309b41d 969static int
5072c59f
SR
970ftrace_avail_open(struct inode *inode, struct file *file)
971{
972 struct ftrace_iterator *iter;
973 int ret;
974
4eebcc81
SR
975 if (unlikely(ftrace_disabled))
976 return -ENODEV;
977
5072c59f
SR
978 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
979 if (!iter)
980 return -ENOMEM;
981
982 iter->pg = ftrace_pages_start;
5072c59f
SR
983
984 ret = seq_open(file, &show_ftrace_seq_ops);
985 if (!ret) {
986 struct seq_file *m = file->private_data;
4bf39a94 987
5072c59f 988 m->private = iter;
4bf39a94 989 } else {
5072c59f 990 kfree(iter);
4bf39a94 991 }
5072c59f
SR
992
993 return ret;
994}
995
996int ftrace_avail_release(struct inode *inode, struct file *file)
997{
998 struct seq_file *m = (struct seq_file *)file->private_data;
999 struct ftrace_iterator *iter = m->private;
1000
1001 seq_release(inode, file);
1002 kfree(iter);
4bf39a94 1003
5072c59f
SR
1004 return 0;
1005}
1006
eb9a7bf0
AS
1007static int
1008ftrace_failures_open(struct inode *inode, struct file *file)
1009{
1010 int ret;
1011 struct seq_file *m;
1012 struct ftrace_iterator *iter;
1013
1014 ret = ftrace_avail_open(inode, file);
1015 if (!ret) {
1016 m = (struct seq_file *)file->private_data;
1017 iter = (struct ftrace_iterator *)m->private;
1018 iter->flags = FTRACE_ITER_FAILURES;
1019 }
1020
1021 return ret;
1022}
1023
1024
41c52c0d 1025static void ftrace_filter_reset(int enable)
5072c59f
SR
1026{
1027 struct ftrace_page *pg;
1028 struct dyn_ftrace *rec;
41c52c0d 1029 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1030
52baf119 1031 mutex_lock(&ftrace_lock);
41c52c0d
SR
1032 if (enable)
1033 ftrace_filtered = 0;
265c831c
SR
1034 do_for_each_ftrace_rec(pg, rec) {
1035 if (rec->flags & FTRACE_FL_FAILED)
1036 continue;
1037 rec->flags &= ~type;
1038 } while_for_each_ftrace_rec();
52baf119 1039 mutex_unlock(&ftrace_lock);
5072c59f
SR
1040}
1041
e309b41d 1042static int
41c52c0d 1043ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1044{
1045 struct ftrace_iterator *iter;
1046 int ret = 0;
1047
4eebcc81
SR
1048 if (unlikely(ftrace_disabled))
1049 return -ENODEV;
1050
5072c59f
SR
1051 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1052 if (!iter)
1053 return -ENOMEM;
1054
41c52c0d 1055 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1056 if ((file->f_mode & FMODE_WRITE) &&
1057 !(file->f_flags & O_APPEND))
41c52c0d 1058 ftrace_filter_reset(enable);
5072c59f
SR
1059
1060 if (file->f_mode & FMODE_READ) {
1061 iter->pg = ftrace_pages_start;
41c52c0d
SR
1062 iter->flags = enable ? FTRACE_ITER_FILTER :
1063 FTRACE_ITER_NOTRACE;
5072c59f
SR
1064
1065 ret = seq_open(file, &show_ftrace_seq_ops);
1066 if (!ret) {
1067 struct seq_file *m = file->private_data;
1068 m->private = iter;
1069 } else
1070 kfree(iter);
1071 } else
1072 file->private_data = iter;
41c52c0d 1073 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1074
1075 return ret;
1076}
1077
41c52c0d
SR
1078static int
1079ftrace_filter_open(struct inode *inode, struct file *file)
1080{
1081 return ftrace_regex_open(inode, file, 1);
1082}
1083
1084static int
1085ftrace_notrace_open(struct inode *inode, struct file *file)
1086{
1087 return ftrace_regex_open(inode, file, 0);
1088}
1089
e309b41d 1090static ssize_t
41c52c0d 1091ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1092 size_t cnt, loff_t *ppos)
1093{
1094 if (file->f_mode & FMODE_READ)
1095 return seq_read(file, ubuf, cnt, ppos);
1096 else
1097 return -EPERM;
1098}
1099
e309b41d 1100static loff_t
41c52c0d 1101ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1102{
1103 loff_t ret;
1104
1105 if (file->f_mode & FMODE_READ)
1106 ret = seq_lseek(file, offset, origin);
1107 else
1108 file->f_pos = ret = 1;
1109
1110 return ret;
1111}
1112
1113enum {
1114 MATCH_FULL,
1115 MATCH_FRONT_ONLY,
1116 MATCH_MIDDLE_ONLY,
1117 MATCH_END_ONLY,
1118};
1119
9f4801e3
SR
1120/*
1121 * (static function - no need for kernel doc)
1122 *
1123 * Pass in a buffer containing a glob and this function will
1124 * set search to point to the search part of the buffer and
1125 * return the type of search it is (see enum above).
1126 * This does modify buff.
1127 *
1128 * Returns enum type.
1129 * search returns the pointer to use for comparison.
1130 * not returns 1 if buff started with a '!'
1131 * 0 otherwise.
1132 */
1133static int
64e7c440 1134ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1135{
5072c59f 1136 int type = MATCH_FULL;
9f4801e3 1137 int i;
ea3a6d6d
SR
1138
1139 if (buff[0] == '!') {
9f4801e3 1140 *not = 1;
ea3a6d6d
SR
1141 buff++;
1142 len--;
9f4801e3
SR
1143 } else
1144 *not = 0;
1145
1146 *search = buff;
5072c59f
SR
1147
1148 for (i = 0; i < len; i++) {
1149 if (buff[i] == '*') {
1150 if (!i) {
9f4801e3 1151 *search = buff + 1;
5072c59f 1152 type = MATCH_END_ONLY;
5072c59f 1153 } else {
9f4801e3 1154 if (type == MATCH_END_ONLY)
5072c59f 1155 type = MATCH_MIDDLE_ONLY;
9f4801e3 1156 else
5072c59f 1157 type = MATCH_FRONT_ONLY;
5072c59f
SR
1158 buff[i] = 0;
1159 break;
1160 }
1161 }
1162 }
1163
9f4801e3
SR
1164 return type;
1165}
1166
64e7c440 1167static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1168{
9f4801e3
SR
1169 int matched = 0;
1170 char *ptr;
1171
9f4801e3
SR
1172 switch (type) {
1173 case MATCH_FULL:
1174 if (strcmp(str, regex) == 0)
1175 matched = 1;
1176 break;
1177 case MATCH_FRONT_ONLY:
1178 if (strncmp(str, regex, len) == 0)
1179 matched = 1;
1180 break;
1181 case MATCH_MIDDLE_ONLY:
1182 if (strstr(str, regex))
1183 matched = 1;
1184 break;
1185 case MATCH_END_ONLY:
1186 ptr = strstr(str, regex);
1187 if (ptr && (ptr[len] == 0))
1188 matched = 1;
1189 break;
1190 }
1191
1192 return matched;
1193}
1194
64e7c440
SR
1195static int
1196ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1197{
1198 char str[KSYM_SYMBOL_LEN];
1199
1200 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1201 return ftrace_match(str, regex, len, type);
1202}
1203
9f4801e3
SR
1204static void ftrace_match_records(char *buff, int len, int enable)
1205{
1206 char *search;
1207 struct ftrace_page *pg;
1208 struct dyn_ftrace *rec;
1209 int type;
1210 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1211 unsigned search_len;
1212 int not;
1213
1214 type = ftrace_setup_glob(buff, len, &search, &not);
1215
1216 search_len = strlen(search);
1217
52baf119 1218 mutex_lock(&ftrace_lock);
265c831c 1219 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1220
1221 if (rec->flags & FTRACE_FL_FAILED)
1222 continue;
9f4801e3
SR
1223
1224 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1225 if (not)
1226 rec->flags &= ~flag;
1227 else
1228 rec->flags |= flag;
1229 }
e68746a2
SR
1230 /*
1231 * Only enable filtering if we have a function that
1232 * is filtered on.
1233 */
1234 if (enable && (rec->flags & FTRACE_FL_FILTER))
1235 ftrace_filtered = 1;
265c831c 1236 } while_for_each_ftrace_rec();
52baf119 1237 mutex_unlock(&ftrace_lock);
5072c59f
SR
1238}
1239
64e7c440
SR
1240static int
1241ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1242 char *regex, int len, int type)
1243{
1244 char str[KSYM_SYMBOL_LEN];
1245 char *modname;
1246
1247 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1248
1249 if (!modname || strcmp(modname, mod))
1250 return 0;
1251
1252 /* blank search means to match all funcs in the mod */
1253 if (len)
1254 return ftrace_match(str, regex, len, type);
1255 else
1256 return 1;
1257}
1258
1259static void ftrace_match_module_records(char *buff, char *mod, int enable)
1260{
1261 char *search = buff;
1262 struct ftrace_page *pg;
1263 struct dyn_ftrace *rec;
1264 int type = MATCH_FULL;
1265 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1266 unsigned search_len = 0;
1267 int not = 0;
1268
1269 /* blank or '*' mean the same */
1270 if (strcmp(buff, "*") == 0)
1271 buff[0] = 0;
1272
1273 /* handle the case of 'dont filter this module' */
1274 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1275 buff[0] = 0;
1276 not = 1;
1277 }
1278
1279 if (strlen(buff)) {
1280 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1281 search_len = strlen(search);
1282 }
1283
52baf119 1284 mutex_lock(&ftrace_lock);
64e7c440
SR
1285 do_for_each_ftrace_rec(pg, rec) {
1286
1287 if (rec->flags & FTRACE_FL_FAILED)
1288 continue;
1289
1290 if (ftrace_match_module_record(rec, mod,
1291 search, search_len, type)) {
1292 if (not)
1293 rec->flags &= ~flag;
1294 else
1295 rec->flags |= flag;
1296 }
e68746a2
SR
1297 if (enable && (rec->flags & FTRACE_FL_FILTER))
1298 ftrace_filtered = 1;
64e7c440
SR
1299
1300 } while_for_each_ftrace_rec();
52baf119 1301 mutex_unlock(&ftrace_lock);
64e7c440
SR
1302}
1303
f6180773
SR
1304/*
1305 * We register the module command as a template to show others how
1306 * to register the a command as well.
1307 */
1308
1309static int
1310ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1311{
1312 char *mod;
1313
1314 /*
1315 * cmd == 'mod' because we only registered this func
1316 * for the 'mod' ftrace_func_command.
1317 * But if you register one func with multiple commands,
1318 * you can tell which command was used by the cmd
1319 * parameter.
1320 */
1321
1322 /* we must have a module name */
1323 if (!param)
1324 return -EINVAL;
1325
1326 mod = strsep(&param, ":");
1327 if (!strlen(mod))
1328 return -EINVAL;
1329
1330 ftrace_match_module_records(func, mod, enable);
1331 return 0;
1332}
1333
1334static struct ftrace_func_command ftrace_mod_cmd = {
1335 .name = "mod",
1336 .func = ftrace_mod_callback,
1337};
1338
1339static int __init ftrace_mod_cmd_init(void)
1340{
1341 return register_ftrace_command(&ftrace_mod_cmd);
1342}
1343device_initcall(ftrace_mod_cmd_init);
1344
59df055f
SR
1345static void
1346function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
1347{
1348 struct ftrace_func_hook *entry;
1349 struct hlist_head *hhd;
1350 struct hlist_node *n;
1351 unsigned long key;
1352 int resched;
1353
1354 key = hash_long(ip, FTRACE_HASH_BITS);
1355
1356 hhd = &ftrace_func_hash[key];
1357
1358 if (hlist_empty(hhd))
1359 return;
1360
1361 /*
1362 * Disable preemption for these calls to prevent a RCU grace
1363 * period. This syncs the hash iteration and freeing of items
1364 * on the hash. rcu_read_lock is too dangerous here.
1365 */
1366 resched = ftrace_preempt_disable();
1367 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1368 if (entry->ip == ip)
1369 entry->ops->func(ip, parent_ip, &entry->data);
1370 }
1371 ftrace_preempt_enable(resched);
1372}
1373
1374static struct ftrace_ops trace_hook_ops __read_mostly =
1375{
1376 .func = function_trace_hook_call,
1377};
1378
1379static int ftrace_hook_registered;
1380
1381static void __enable_ftrace_function_hook(void)
1382{
1383 int i;
1384
1385 if (ftrace_hook_registered)
1386 return;
1387
1388 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1389 struct hlist_head *hhd = &ftrace_func_hash[i];
1390 if (hhd->first)
1391 break;
1392 }
1393 /* Nothing registered? */
1394 if (i == FTRACE_FUNC_HASHSIZE)
1395 return;
1396
1397 __register_ftrace_function(&trace_hook_ops);
1398 ftrace_startup(0);
1399 ftrace_hook_registered = 1;
1400}
1401
1402static void __disable_ftrace_function_hook(void)
1403{
1404 int i;
1405
1406 if (!ftrace_hook_registered)
1407 return;
1408
1409 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1410 struct hlist_head *hhd = &ftrace_func_hash[i];
1411 if (hhd->first)
1412 return;
1413 }
1414
1415 /* no more funcs left */
1416 __unregister_ftrace_function(&trace_hook_ops);
1417 ftrace_shutdown(0);
1418 ftrace_hook_registered = 0;
1419}
1420
1421
1422static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1423{
1424 struct ftrace_func_hook *entry =
1425 container_of(rhp, struct ftrace_func_hook, rcu);
1426
1427 if (entry->ops->free)
1428 entry->ops->free(&entry->data);
1429 kfree(entry);
1430}
1431
1432
1433int
1434register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1435 void *data)
1436{
1437 struct ftrace_func_hook *entry;
1438 struct ftrace_page *pg;
1439 struct dyn_ftrace *rec;
1440 unsigned long key;
1441 int type, len, not;
1442 int count = 0;
1443 char *search;
1444
1445 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1446 len = strlen(search);
1447
1448 /* we do not support '!' for function hooks */
1449 if (WARN_ON(not))
1450 return -EINVAL;
1451
1452 mutex_lock(&ftrace_lock);
1453 do_for_each_ftrace_rec(pg, rec) {
1454
1455 if (rec->flags & FTRACE_FL_FAILED)
1456 continue;
1457
1458 if (!ftrace_match_record(rec, search, len, type))
1459 continue;
1460
1461 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1462 if (!entry) {
1463 /* If we did not hook to any, then return error */
1464 if (!count)
1465 count = -ENOMEM;
1466 goto out_unlock;
1467 }
1468
1469 count++;
1470
1471 entry->data = data;
1472
1473 /*
1474 * The caller might want to do something special
1475 * for each function we find. We call the callback
1476 * to give the caller an opportunity to do so.
1477 */
1478 if (ops->callback) {
1479 if (ops->callback(rec->ip, &entry->data) < 0) {
1480 /* caller does not like this func */
1481 kfree(entry);
1482 continue;
1483 }
1484 }
1485
1486 entry->ops = ops;
1487 entry->ip = rec->ip;
1488
1489 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1490 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1491
1492 } while_for_each_ftrace_rec();
1493 __enable_ftrace_function_hook();
1494
1495 out_unlock:
1496 mutex_unlock(&ftrace_lock);
1497
1498 return count;
1499}
1500
1501enum {
1502 HOOK_TEST_FUNC = 1,
1503 HOOK_TEST_DATA = 2
1504};
1505
1506static void
1507__unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1508 void *data, int flags)
1509{
1510 struct ftrace_func_hook *entry;
1511 struct hlist_node *n, *tmp;
1512 char str[KSYM_SYMBOL_LEN];
1513 int type = MATCH_FULL;
1514 int i, len = 0;
1515 char *search;
1516
1517 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1518 glob = NULL;
1519 else {
1520 int not;
1521
1522 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1523 len = strlen(search);
1524
1525 /* we do not support '!' for function hooks */
1526 if (WARN_ON(not))
1527 return;
1528 }
1529
1530 mutex_lock(&ftrace_lock);
1531 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1532 struct hlist_head *hhd = &ftrace_func_hash[i];
1533
1534 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1535
1536 /* break up if statements for readability */
1537 if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
1538 continue;
1539
1540 if ((flags & HOOK_TEST_DATA) && entry->data != data)
1541 continue;
1542
1543 /* do this last, since it is the most expensive */
1544 if (glob) {
1545 kallsyms_lookup(entry->ip, NULL, NULL,
1546 NULL, str);
1547 if (!ftrace_match(str, glob, len, type))
1548 continue;
1549 }
1550
1551 hlist_del(&entry->node);
1552 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1553 }
1554 }
1555 __disable_ftrace_function_hook();
1556 mutex_unlock(&ftrace_lock);
1557}
1558
1559void
1560unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1561 void *data)
1562{
1563 __unregister_ftrace_function_hook(glob, ops, data,
1564 HOOK_TEST_FUNC | HOOK_TEST_DATA);
1565}
1566
1567void
1568unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
1569{
1570 __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
1571}
1572
1573void unregister_ftrace_function_hook_all(char *glob)
1574{
1575 __unregister_ftrace_function_hook(glob, NULL, NULL, 0);
1576}
1577
f6180773
SR
1578static LIST_HEAD(ftrace_commands);
1579static DEFINE_MUTEX(ftrace_cmd_mutex);
1580
1581int register_ftrace_command(struct ftrace_func_command *cmd)
1582{
1583 struct ftrace_func_command *p;
1584 int ret = 0;
1585
1586 mutex_lock(&ftrace_cmd_mutex);
1587 list_for_each_entry(p, &ftrace_commands, list) {
1588 if (strcmp(cmd->name, p->name) == 0) {
1589 ret = -EBUSY;
1590 goto out_unlock;
1591 }
1592 }
1593 list_add(&cmd->list, &ftrace_commands);
1594 out_unlock:
1595 mutex_unlock(&ftrace_cmd_mutex);
1596
1597 return ret;
1598}
1599
1600int unregister_ftrace_command(struct ftrace_func_command *cmd)
1601{
1602 struct ftrace_func_command *p, *n;
1603 int ret = -ENODEV;
1604
1605 mutex_lock(&ftrace_cmd_mutex);
1606 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1607 if (strcmp(cmd->name, p->name) == 0) {
1608 ret = 0;
1609 list_del_init(&p->list);
1610 goto out_unlock;
1611 }
1612 }
1613 out_unlock:
1614 mutex_unlock(&ftrace_cmd_mutex);
1615
1616 return ret;
1617}
1618
64e7c440
SR
1619static int ftrace_process_regex(char *buff, int len, int enable)
1620{
f6180773
SR
1621 struct ftrace_func_command *p;
1622 char *func, *command, *next = buff;
1623 int ret = -EINVAL;
64e7c440
SR
1624
1625 func = strsep(&next, ":");
1626
1627 if (!next) {
1628 ftrace_match_records(func, len, enable);
1629 return 0;
1630 }
1631
f6180773 1632 /* command found */
64e7c440
SR
1633
1634 command = strsep(&next, ":");
1635
f6180773
SR
1636 mutex_lock(&ftrace_cmd_mutex);
1637 list_for_each_entry(p, &ftrace_commands, list) {
1638 if (strcmp(p->name, command) == 0) {
1639 ret = p->func(func, command, next, enable);
1640 goto out_unlock;
1641 }
64e7c440 1642 }
f6180773
SR
1643 out_unlock:
1644 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 1645
f6180773 1646 return ret;
64e7c440
SR
1647}
1648
e309b41d 1649static ssize_t
41c52c0d
SR
1650ftrace_regex_write(struct file *file, const char __user *ubuf,
1651 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1652{
1653 struct ftrace_iterator *iter;
1654 char ch;
1655 size_t read = 0;
1656 ssize_t ret;
1657
1658 if (!cnt || cnt < 0)
1659 return 0;
1660
41c52c0d 1661 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1662
1663 if (file->f_mode & FMODE_READ) {
1664 struct seq_file *m = file->private_data;
1665 iter = m->private;
1666 } else
1667 iter = file->private_data;
1668
1669 if (!*ppos) {
1670 iter->flags &= ~FTRACE_ITER_CONT;
1671 iter->buffer_idx = 0;
1672 }
1673
1674 ret = get_user(ch, ubuf++);
1675 if (ret)
1676 goto out;
1677 read++;
1678 cnt--;
1679
1680 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1681 /* skip white space */
1682 while (cnt && isspace(ch)) {
1683 ret = get_user(ch, ubuf++);
1684 if (ret)
1685 goto out;
1686 read++;
1687 cnt--;
1688 }
1689
5072c59f
SR
1690 if (isspace(ch)) {
1691 file->f_pos += read;
1692 ret = read;
1693 goto out;
1694 }
1695
1696 iter->buffer_idx = 0;
1697 }
1698
1699 while (cnt && !isspace(ch)) {
1700 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1701 iter->buffer[iter->buffer_idx++] = ch;
1702 else {
1703 ret = -EINVAL;
1704 goto out;
1705 }
1706 ret = get_user(ch, ubuf++);
1707 if (ret)
1708 goto out;
1709 read++;
1710 cnt--;
1711 }
1712
1713 if (isspace(ch)) {
1714 iter->filtered++;
1715 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
1716 ret = ftrace_process_regex(iter->buffer,
1717 iter->buffer_idx, enable);
1718 if (ret)
1719 goto out;
5072c59f
SR
1720 iter->buffer_idx = 0;
1721 } else
1722 iter->flags |= FTRACE_ITER_CONT;
1723
1724
1725 file->f_pos += read;
1726
1727 ret = read;
1728 out:
41c52c0d 1729 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1730
1731 return ret;
1732}
1733
41c52c0d
SR
1734static ssize_t
1735ftrace_filter_write(struct file *file, const char __user *ubuf,
1736 size_t cnt, loff_t *ppos)
1737{
1738 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1739}
1740
1741static ssize_t
1742ftrace_notrace_write(struct file *file, const char __user *ubuf,
1743 size_t cnt, loff_t *ppos)
1744{
1745 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1746}
1747
1748static void
1749ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1750{
1751 if (unlikely(ftrace_disabled))
1752 return;
1753
1754 mutex_lock(&ftrace_regex_lock);
1755 if (reset)
1756 ftrace_filter_reset(enable);
1757 if (buf)
7f24b31b 1758 ftrace_match_records(buf, len, enable);
41c52c0d
SR
1759 mutex_unlock(&ftrace_regex_lock);
1760}
1761
77a2b37d
SR
1762/**
1763 * ftrace_set_filter - set a function to filter on in ftrace
1764 * @buf - the string that holds the function filter text.
1765 * @len - the length of the string.
1766 * @reset - non zero to reset all filters before applying this filter.
1767 *
1768 * Filters denote which functions should be enabled when tracing is enabled.
1769 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1770 */
e309b41d 1771void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1772{
41c52c0d
SR
1773 ftrace_set_regex(buf, len, reset, 1);
1774}
4eebcc81 1775
41c52c0d
SR
1776/**
1777 * ftrace_set_notrace - set a function to not trace in ftrace
1778 * @buf - the string that holds the function notrace text.
1779 * @len - the length of the string.
1780 * @reset - non zero to reset all filters before applying this filter.
1781 *
1782 * Notrace Filters denote which functions should not be enabled when tracing
1783 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1784 * for tracing.
1785 */
1786void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1787{
1788 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1789}
1790
e309b41d 1791static int
41c52c0d 1792ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1793{
1794 struct seq_file *m = (struct seq_file *)file->private_data;
1795 struct ftrace_iterator *iter;
1796
41c52c0d 1797 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1798 if (file->f_mode & FMODE_READ) {
1799 iter = m->private;
1800
1801 seq_release(inode, file);
1802 } else
1803 iter = file->private_data;
1804
1805 if (iter->buffer_idx) {
1806 iter->filtered++;
1807 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 1808 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1809 }
1810
e6ea44e9 1811 mutex_lock(&ftrace_lock);
ee02a2e5 1812 if (ftrace_start_up && ftrace_enabled)
5072c59f 1813 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 1814 mutex_unlock(&ftrace_lock);
5072c59f
SR
1815
1816 kfree(iter);
41c52c0d 1817 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1818 return 0;
1819}
1820
41c52c0d
SR
1821static int
1822ftrace_filter_release(struct inode *inode, struct file *file)
1823{
1824 return ftrace_regex_release(inode, file, 1);
1825}
1826
1827static int
1828ftrace_notrace_release(struct inode *inode, struct file *file)
1829{
1830 return ftrace_regex_release(inode, file, 0);
1831}
1832
5072c59f
SR
1833static struct file_operations ftrace_avail_fops = {
1834 .open = ftrace_avail_open,
1835 .read = seq_read,
1836 .llseek = seq_lseek,
1837 .release = ftrace_avail_release,
1838};
1839
eb9a7bf0
AS
1840static struct file_operations ftrace_failures_fops = {
1841 .open = ftrace_failures_open,
1842 .read = seq_read,
1843 .llseek = seq_lseek,
1844 .release = ftrace_avail_release,
1845};
1846
5072c59f
SR
1847static struct file_operations ftrace_filter_fops = {
1848 .open = ftrace_filter_open,
41c52c0d 1849 .read = ftrace_regex_read,
5072c59f 1850 .write = ftrace_filter_write,
41c52c0d 1851 .llseek = ftrace_regex_lseek,
5072c59f
SR
1852 .release = ftrace_filter_release,
1853};
1854
41c52c0d
SR
1855static struct file_operations ftrace_notrace_fops = {
1856 .open = ftrace_notrace_open,
1857 .read = ftrace_regex_read,
1858 .write = ftrace_notrace_write,
1859 .llseek = ftrace_regex_lseek,
1860 .release = ftrace_notrace_release,
1861};
1862
ea4e2bc4
SR
1863#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1864
1865static DEFINE_MUTEX(graph_lock);
1866
1867int ftrace_graph_count;
1868unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1869
1870static void *
1871g_next(struct seq_file *m, void *v, loff_t *pos)
1872{
1873 unsigned long *array = m->private;
1874 int index = *pos;
1875
1876 (*pos)++;
1877
1878 if (index >= ftrace_graph_count)
1879 return NULL;
1880
1881 return &array[index];
1882}
1883
1884static void *g_start(struct seq_file *m, loff_t *pos)
1885{
1886 void *p = NULL;
1887
1888 mutex_lock(&graph_lock);
1889
1890 p = g_next(m, p, pos);
1891
1892 return p;
1893}
1894
1895static void g_stop(struct seq_file *m, void *p)
1896{
1897 mutex_unlock(&graph_lock);
1898}
1899
1900static int g_show(struct seq_file *m, void *v)
1901{
1902 unsigned long *ptr = v;
1903 char str[KSYM_SYMBOL_LEN];
1904
1905 if (!ptr)
1906 return 0;
1907
1908 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1909
1910 seq_printf(m, "%s\n", str);
1911
1912 return 0;
1913}
1914
1915static struct seq_operations ftrace_graph_seq_ops = {
1916 .start = g_start,
1917 .next = g_next,
1918 .stop = g_stop,
1919 .show = g_show,
1920};
1921
1922static int
1923ftrace_graph_open(struct inode *inode, struct file *file)
1924{
1925 int ret = 0;
1926
1927 if (unlikely(ftrace_disabled))
1928 return -ENODEV;
1929
1930 mutex_lock(&graph_lock);
1931 if ((file->f_mode & FMODE_WRITE) &&
1932 !(file->f_flags & O_APPEND)) {
1933 ftrace_graph_count = 0;
1934 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1935 }
1936
1937 if (file->f_mode & FMODE_READ) {
1938 ret = seq_open(file, &ftrace_graph_seq_ops);
1939 if (!ret) {
1940 struct seq_file *m = file->private_data;
1941 m->private = ftrace_graph_funcs;
1942 }
1943 } else
1944 file->private_data = ftrace_graph_funcs;
1945 mutex_unlock(&graph_lock);
1946
1947 return ret;
1948}
1949
1950static ssize_t
1951ftrace_graph_read(struct file *file, char __user *ubuf,
1952 size_t cnt, loff_t *ppos)
1953{
1954 if (file->f_mode & FMODE_READ)
1955 return seq_read(file, ubuf, cnt, ppos);
1956 else
1957 return -EPERM;
1958}
1959
1960static int
1961ftrace_set_func(unsigned long *array, int idx, char *buffer)
1962{
1963 char str[KSYM_SYMBOL_LEN];
1964 struct dyn_ftrace *rec;
1965 struct ftrace_page *pg;
1966 int found = 0;
265c831c 1967 int j;
ea4e2bc4
SR
1968
1969 if (ftrace_disabled)
1970 return -ENODEV;
1971
52baf119 1972 mutex_lock(&ftrace_lock);
265c831c
SR
1973 do_for_each_ftrace_rec(pg, rec) {
1974
1975 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1976 continue;
1977
1978 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1979 if (strcmp(str, buffer) == 0) {
1980 /* Return 1 if we add it to the array */
1981 found = 1;
1982 for (j = 0; j < idx; j++)
1983 if (array[j] == rec->ip) {
1984 found = 0;
1985 break;
1986 }
1987 if (found)
1988 array[idx] = rec->ip;
1989 goto out;
ea4e2bc4 1990 }
265c831c
SR
1991 } while_for_each_ftrace_rec();
1992 out:
52baf119 1993 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
1994
1995 return found ? 0 : -EINVAL;
1996}
1997
1998static ssize_t
1999ftrace_graph_write(struct file *file, const char __user *ubuf,
2000 size_t cnt, loff_t *ppos)
2001{
2002 unsigned char buffer[FTRACE_BUFF_MAX+1];
2003 unsigned long *array;
2004 size_t read = 0;
2005 ssize_t ret;
2006 int index = 0;
2007 char ch;
2008
2009 if (!cnt || cnt < 0)
2010 return 0;
2011
2012 mutex_lock(&graph_lock);
2013
2014 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2015 ret = -EBUSY;
2016 goto out;
2017 }
2018
2019 if (file->f_mode & FMODE_READ) {
2020 struct seq_file *m = file->private_data;
2021 array = m->private;
2022 } else
2023 array = file->private_data;
2024
2025 ret = get_user(ch, ubuf++);
2026 if (ret)
2027 goto out;
2028 read++;
2029 cnt--;
2030
2031 /* skip white space */
2032 while (cnt && isspace(ch)) {
2033 ret = get_user(ch, ubuf++);
2034 if (ret)
2035 goto out;
2036 read++;
2037 cnt--;
2038 }
2039
2040 if (isspace(ch)) {
2041 *ppos += read;
2042 ret = read;
2043 goto out;
2044 }
2045
2046 while (cnt && !isspace(ch)) {
2047 if (index < FTRACE_BUFF_MAX)
2048 buffer[index++] = ch;
2049 else {
2050 ret = -EINVAL;
2051 goto out;
2052 }
2053 ret = get_user(ch, ubuf++);
2054 if (ret)
2055 goto out;
2056 read++;
2057 cnt--;
2058 }
2059 buffer[index] = 0;
2060
2061 /* we allow only one at a time */
2062 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
2063 if (ret)
2064 goto out;
2065
2066 ftrace_graph_count++;
2067
2068 file->f_pos += read;
2069
2070 ret = read;
2071 out:
2072 mutex_unlock(&graph_lock);
2073
2074 return ret;
2075}
2076
2077static const struct file_operations ftrace_graph_fops = {
2078 .open = ftrace_graph_open,
2079 .read = ftrace_graph_read,
2080 .write = ftrace_graph_write,
2081};
2082#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2083
df4fc315 2084static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2085{
5072c59f
SR
2086 struct dentry *entry;
2087
5072c59f
SR
2088 entry = debugfs_create_file("available_filter_functions", 0444,
2089 d_tracer, NULL, &ftrace_avail_fops);
2090 if (!entry)
2091 pr_warning("Could not create debugfs "
2092 "'available_filter_functions' entry\n");
2093
eb9a7bf0
AS
2094 entry = debugfs_create_file("failures", 0444,
2095 d_tracer, NULL, &ftrace_failures_fops);
2096 if (!entry)
2097 pr_warning("Could not create debugfs 'failures' entry\n");
2098
5072c59f
SR
2099 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2100 NULL, &ftrace_filter_fops);
2101 if (!entry)
2102 pr_warning("Could not create debugfs "
2103 "'set_ftrace_filter' entry\n");
41c52c0d
SR
2104
2105 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2106 NULL, &ftrace_notrace_fops);
2107 if (!entry)
2108 pr_warning("Could not create debugfs "
2109 "'set_ftrace_notrace' entry\n");
ad90c0e3 2110
ea4e2bc4
SR
2111#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2112 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2113 NULL,
2114 &ftrace_graph_fops);
2115 if (!entry)
2116 pr_warning("Could not create debugfs "
2117 "'set_graph_function' entry\n");
2118#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2119
5072c59f
SR
2120 return 0;
2121}
2122
31e88909
SR
2123static int ftrace_convert_nops(struct module *mod,
2124 unsigned long *start,
68bf21aa
SR
2125 unsigned long *end)
2126{
2127 unsigned long *p;
2128 unsigned long addr;
2129 unsigned long flags;
2130
e6ea44e9 2131 mutex_lock(&ftrace_lock);
68bf21aa
SR
2132 p = start;
2133 while (p < end) {
2134 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2135 /*
2136 * Some architecture linkers will pad between
2137 * the different mcount_loc sections of different
2138 * object files to satisfy alignments.
2139 * Skip any NULL pointers.
2140 */
2141 if (!addr)
2142 continue;
68bf21aa 2143 ftrace_record_ip(addr);
68bf21aa
SR
2144 }
2145
08f5ac90 2146 /* disable interrupts to prevent kstop machine */
68bf21aa 2147 local_irq_save(flags);
31e88909 2148 ftrace_update_code(mod);
68bf21aa 2149 local_irq_restore(flags);
e6ea44e9 2150 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2151
2152 return 0;
2153}
2154
31e88909
SR
2155void ftrace_init_module(struct module *mod,
2156 unsigned long *start, unsigned long *end)
90d595fe 2157{
00fd61ae 2158 if (ftrace_disabled || start == end)
fed1939c 2159 return;
31e88909 2160 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2161}
2162
68bf21aa
SR
2163extern unsigned long __start_mcount_loc[];
2164extern unsigned long __stop_mcount_loc[];
2165
2166void __init ftrace_init(void)
2167{
2168 unsigned long count, addr, flags;
2169 int ret;
2170
2171 /* Keep the ftrace pointer to the stub */
2172 addr = (unsigned long)ftrace_stub;
2173
2174 local_irq_save(flags);
2175 ftrace_dyn_arch_init(&addr);
2176 local_irq_restore(flags);
2177
2178 /* ftrace_dyn_arch_init places the return code in addr */
2179 if (addr)
2180 goto failed;
2181
2182 count = __stop_mcount_loc - __start_mcount_loc;
2183
2184 ret = ftrace_dyn_table_alloc(count);
2185 if (ret)
2186 goto failed;
2187
2188 last_ftrace_enabled = ftrace_enabled = 1;
2189
31e88909
SR
2190 ret = ftrace_convert_nops(NULL,
2191 __start_mcount_loc,
68bf21aa
SR
2192 __stop_mcount_loc);
2193
2194 return;
2195 failed:
2196 ftrace_disabled = 1;
2197}
68bf21aa 2198
3d083395 2199#else
0b6e4d56
FW
2200
2201static int __init ftrace_nodyn_init(void)
2202{
2203 ftrace_enabled = 1;
2204 return 0;
2205}
2206device_initcall(ftrace_nodyn_init);
2207
df4fc315
SR
2208static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2209static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2210/* Keep as macros so we do not need to define the commands */
2211# define ftrace_startup(command) do { } while (0)
2212# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2213# define ftrace_startup_sysctl() do { } while (0)
2214# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2215#endif /* CONFIG_DYNAMIC_FTRACE */
2216
df4fc315
SR
2217static ssize_t
2218ftrace_pid_read(struct file *file, char __user *ubuf,
2219 size_t cnt, loff_t *ppos)
2220{
2221 char buf[64];
2222 int r;
2223
e32d8956
SR
2224 if (ftrace_pid_trace == ftrace_swapper_pid)
2225 r = sprintf(buf, "swapper tasks\n");
2226 else if (ftrace_pid_trace)
978f3a45 2227 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
2228 else
2229 r = sprintf(buf, "no pid\n");
2230
2231 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2232}
2233
e32d8956 2234static void clear_ftrace_swapper(void)
978f3a45
SR
2235{
2236 struct task_struct *p;
e32d8956 2237 int cpu;
978f3a45 2238
e32d8956
SR
2239 get_online_cpus();
2240 for_each_online_cpu(cpu) {
2241 p = idle_task(cpu);
978f3a45 2242 clear_tsk_trace_trace(p);
e32d8956
SR
2243 }
2244 put_online_cpus();
2245}
978f3a45 2246
e32d8956
SR
2247static void set_ftrace_swapper(void)
2248{
2249 struct task_struct *p;
2250 int cpu;
2251
2252 get_online_cpus();
2253 for_each_online_cpu(cpu) {
2254 p = idle_task(cpu);
2255 set_tsk_trace_trace(p);
2256 }
2257 put_online_cpus();
978f3a45
SR
2258}
2259
e32d8956
SR
2260static void clear_ftrace_pid(struct pid *pid)
2261{
2262 struct task_struct *p;
2263
229c4ef8 2264 rcu_read_lock();
e32d8956
SR
2265 do_each_pid_task(pid, PIDTYPE_PID, p) {
2266 clear_tsk_trace_trace(p);
2267 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2268 rcu_read_unlock();
2269
e32d8956
SR
2270 put_pid(pid);
2271}
2272
2273static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2274{
2275 struct task_struct *p;
2276
229c4ef8 2277 rcu_read_lock();
978f3a45
SR
2278 do_each_pid_task(pid, PIDTYPE_PID, p) {
2279 set_tsk_trace_trace(p);
2280 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2281 rcu_read_unlock();
978f3a45
SR
2282}
2283
e32d8956
SR
2284static void clear_ftrace_pid_task(struct pid **pid)
2285{
2286 if (*pid == ftrace_swapper_pid)
2287 clear_ftrace_swapper();
2288 else
2289 clear_ftrace_pid(*pid);
2290
2291 *pid = NULL;
2292}
2293
2294static void set_ftrace_pid_task(struct pid *pid)
2295{
2296 if (pid == ftrace_swapper_pid)
2297 set_ftrace_swapper();
2298 else
2299 set_ftrace_pid(pid);
2300}
2301
df4fc315
SR
2302static ssize_t
2303ftrace_pid_write(struct file *filp, const char __user *ubuf,
2304 size_t cnt, loff_t *ppos)
2305{
978f3a45 2306 struct pid *pid;
df4fc315
SR
2307 char buf[64];
2308 long val;
2309 int ret;
2310
2311 if (cnt >= sizeof(buf))
2312 return -EINVAL;
2313
2314 if (copy_from_user(&buf, ubuf, cnt))
2315 return -EFAULT;
2316
2317 buf[cnt] = 0;
2318
2319 ret = strict_strtol(buf, 10, &val);
2320 if (ret < 0)
2321 return ret;
2322
e6ea44e9 2323 mutex_lock(&ftrace_lock);
978f3a45 2324 if (val < 0) {
df4fc315 2325 /* disable pid tracing */
978f3a45 2326 if (!ftrace_pid_trace)
df4fc315 2327 goto out;
978f3a45
SR
2328
2329 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2330
2331 } else {
e32d8956
SR
2332 /* swapper task is special */
2333 if (!val) {
2334 pid = ftrace_swapper_pid;
2335 if (pid == ftrace_pid_trace)
2336 goto out;
2337 } else {
2338 pid = find_get_pid(val);
df4fc315 2339
e32d8956
SR
2340 if (pid == ftrace_pid_trace) {
2341 put_pid(pid);
2342 goto out;
2343 }
0ef8cde5 2344 }
0ef8cde5 2345
978f3a45
SR
2346 if (ftrace_pid_trace)
2347 clear_ftrace_pid_task(&ftrace_pid_trace);
2348
2349 if (!pid)
2350 goto out;
2351
2352 ftrace_pid_trace = pid;
2353
2354 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2355 }
2356
2357 /* update the function call */
2358 ftrace_update_pid_func();
2359 ftrace_startup_enable(0);
2360
2361 out:
e6ea44e9 2362 mutex_unlock(&ftrace_lock);
df4fc315
SR
2363
2364 return cnt;
2365}
2366
2367static struct file_operations ftrace_pid_fops = {
2368 .read = ftrace_pid_read,
2369 .write = ftrace_pid_write,
2370};
2371
2372static __init int ftrace_init_debugfs(void)
2373{
2374 struct dentry *d_tracer;
2375 struct dentry *entry;
2376
2377 d_tracer = tracing_init_dentry();
2378 if (!d_tracer)
2379 return 0;
2380
2381 ftrace_init_dyn_debugfs(d_tracer);
2382
2383 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2384 NULL, &ftrace_pid_fops);
2385 if (!entry)
2386 pr_warning("Could not create debugfs "
2387 "'set_ftrace_pid' entry\n");
2388 return 0;
2389}
2390
2391fs_initcall(ftrace_init_debugfs);
2392
a2bb6a3d 2393/**
81adbdc0 2394 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2395 *
2396 * This function should be used by panic code. It stops ftrace
2397 * but in a not so nice way. If you need to simply kill ftrace
2398 * from a non-atomic section, use ftrace_kill.
2399 */
81adbdc0 2400void ftrace_kill(void)
a2bb6a3d
SR
2401{
2402 ftrace_disabled = 1;
2403 ftrace_enabled = 0;
a2bb6a3d
SR
2404 clear_ftrace_function();
2405}
2406
16444a8a 2407/**
3d083395
SR
2408 * register_ftrace_function - register a function for profiling
2409 * @ops - ops structure that holds the function for profiling.
16444a8a 2410 *
3d083395
SR
2411 * Register a function to be called by all functions in the
2412 * kernel.
2413 *
2414 * Note: @ops->func and all the functions it calls must be labeled
2415 * with "notrace", otherwise it will go into a
2416 * recursive loop.
16444a8a 2417 */
3d083395 2418int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2419{
b0fc494f
SR
2420 int ret;
2421
4eebcc81
SR
2422 if (unlikely(ftrace_disabled))
2423 return -1;
2424
e6ea44e9 2425 mutex_lock(&ftrace_lock);
e7d3737e 2426
b0fc494f 2427 ret = __register_ftrace_function(ops);
5a45cfe1 2428 ftrace_startup(0);
b0fc494f 2429
e6ea44e9 2430 mutex_unlock(&ftrace_lock);
b0fc494f 2431 return ret;
3d083395
SR
2432}
2433
2434/**
32632920 2435 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2436 * @ops - ops structure that holds the function to unregister
2437 *
2438 * Unregister a function that was added to be called by ftrace profiling.
2439 */
2440int unregister_ftrace_function(struct ftrace_ops *ops)
2441{
2442 int ret;
2443
e6ea44e9 2444 mutex_lock(&ftrace_lock);
3d083395 2445 ret = __unregister_ftrace_function(ops);
5a45cfe1 2446 ftrace_shutdown(0);
e6ea44e9 2447 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2448
2449 return ret;
2450}
2451
e309b41d 2452int
b0fc494f 2453ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2454 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2455 loff_t *ppos)
2456{
2457 int ret;
2458
4eebcc81
SR
2459 if (unlikely(ftrace_disabled))
2460 return -ENODEV;
2461
e6ea44e9 2462 mutex_lock(&ftrace_lock);
b0fc494f 2463
5072c59f 2464 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2465
2466 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2467 goto out;
2468
2469 last_ftrace_enabled = ftrace_enabled;
2470
2471 if (ftrace_enabled) {
2472
2473 ftrace_startup_sysctl();
2474
2475 /* we are starting ftrace again */
2476 if (ftrace_list != &ftrace_list_end) {
2477 if (ftrace_list->next == &ftrace_list_end)
2478 ftrace_trace_function = ftrace_list->func;
2479 else
2480 ftrace_trace_function = ftrace_list_func;
2481 }
2482
2483 } else {
2484 /* stopping ftrace calls (just send to ftrace_stub) */
2485 ftrace_trace_function = ftrace_stub;
2486
2487 ftrace_shutdown_sysctl();
2488 }
2489
2490 out:
e6ea44e9 2491 mutex_unlock(&ftrace_lock);
3d083395 2492 return ret;
16444a8a 2493}
f17845e5 2494
fb52607a 2495#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2496
287b6e68 2497static atomic_t ftrace_graph_active;
4a2b8dda 2498static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2499
e49dc19c
SR
2500int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2501{
2502 return 0;
2503}
2504
287b6e68
FW
2505/* The callbacks that hook a function */
2506trace_func_graph_ret_t ftrace_graph_return =
2507 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2508trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2509
2510/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2511static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2512{
2513 int i;
2514 int ret = 0;
2515 unsigned long flags;
2516 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2517 struct task_struct *g, *t;
2518
2519 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2520 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2521 * sizeof(struct ftrace_ret_stack),
2522 GFP_KERNEL);
2523 if (!ret_stack_list[i]) {
2524 start = 0;
2525 end = i;
2526 ret = -ENOMEM;
2527 goto free;
2528 }
2529 }
2530
2531 read_lock_irqsave(&tasklist_lock, flags);
2532 do_each_thread(g, t) {
2533 if (start == end) {
2534 ret = -EAGAIN;
2535 goto unlock;
2536 }
2537
2538 if (t->ret_stack == NULL) {
f201ae23 2539 t->curr_ret_stack = -1;
48d68b20
FW
2540 /* Make sure IRQs see the -1 first: */
2541 barrier();
2542 t->ret_stack = ret_stack_list[start++];
380c4b14 2543 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2544 atomic_set(&t->trace_overrun, 0);
2545 }
2546 } while_each_thread(g, t);
2547
2548unlock:
2549 read_unlock_irqrestore(&tasklist_lock, flags);
2550free:
2551 for (i = start; i < end; i++)
2552 kfree(ret_stack_list[i]);
2553 return ret;
2554}
2555
2556/* Allocate a return stack for each task */
fb52607a 2557static int start_graph_tracing(void)
f201ae23
FW
2558{
2559 struct ftrace_ret_stack **ret_stack_list;
2560 int ret;
2561
2562 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2563 sizeof(struct ftrace_ret_stack *),
2564 GFP_KERNEL);
2565
2566 if (!ret_stack_list)
2567 return -ENOMEM;
2568
2569 do {
2570 ret = alloc_retstack_tasklist(ret_stack_list);
2571 } while (ret == -EAGAIN);
2572
2573 kfree(ret_stack_list);
2574 return ret;
2575}
2576
4a2b8dda
FW
2577/*
2578 * Hibernation protection.
2579 * The state of the current task is too much unstable during
2580 * suspend/restore to disk. We want to protect against that.
2581 */
2582static int
2583ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2584 void *unused)
2585{
2586 switch (state) {
2587 case PM_HIBERNATION_PREPARE:
2588 pause_graph_tracing();
2589 break;
2590
2591 case PM_POST_HIBERNATION:
2592 unpause_graph_tracing();
2593 break;
2594 }
2595 return NOTIFY_DONE;
2596}
2597
287b6e68
FW
2598int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2599 trace_func_graph_ent_t entryfunc)
15e6cb36 2600{
e7d3737e
FW
2601 int ret = 0;
2602
e6ea44e9 2603 mutex_lock(&ftrace_lock);
e7d3737e 2604
4a2b8dda
FW
2605 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2606 register_pm_notifier(&ftrace_suspend_notifier);
2607
287b6e68 2608 atomic_inc(&ftrace_graph_active);
fb52607a 2609 ret = start_graph_tracing();
f201ae23 2610 if (ret) {
287b6e68 2611 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2612 goto out;
2613 }
e53a6319 2614
287b6e68
FW
2615 ftrace_graph_return = retfunc;
2616 ftrace_graph_entry = entryfunc;
e53a6319 2617
5a45cfe1 2618 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2619
2620out:
e6ea44e9 2621 mutex_unlock(&ftrace_lock);
e7d3737e 2622 return ret;
15e6cb36
FW
2623}
2624
fb52607a 2625void unregister_ftrace_graph(void)
15e6cb36 2626{
e6ea44e9 2627 mutex_lock(&ftrace_lock);
e7d3737e 2628
287b6e68
FW
2629 atomic_dec(&ftrace_graph_active);
2630 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2631 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2632 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 2633 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 2634
e6ea44e9 2635 mutex_unlock(&ftrace_lock);
15e6cb36 2636}
f201ae23
FW
2637
2638/* Allocate a return stack for newly created task */
fb52607a 2639void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2640{
287b6e68 2641 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2642 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2643 * sizeof(struct ftrace_ret_stack),
2644 GFP_KERNEL);
2645 if (!t->ret_stack)
2646 return;
2647 t->curr_ret_stack = -1;
380c4b14 2648 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2649 atomic_set(&t->trace_overrun, 0);
2650 } else
2651 t->ret_stack = NULL;
2652}
2653
fb52607a 2654void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2655{
eae849ca
FW
2656 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2657
f201ae23 2658 t->ret_stack = NULL;
eae849ca
FW
2659 /* NULL must become visible to IRQs before we free it: */
2660 barrier();
2661
2662 kfree(ret_stack);
f201ae23 2663}
14a866c5
SR
2664
2665void ftrace_graph_stop(void)
2666{
2667 ftrace_stop();
2668}
15e6cb36
FW
2669#endif
2670