]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - kernel/trace/ftrace.c
ftrace: use seq_read
[mirror_ubuntu-focal-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
395a59d0
AS
32#include <asm/ftrace.h>
33
3d083395 34#include "trace.h"
16444a8a 35
6912896e
SR
36#define FTRACE_WARN_ON(cond) \
37 do { \
38 if (WARN_ON(cond)) \
39 ftrace_kill(); \
40 } while (0)
41
42#define FTRACE_WARN_ON_ONCE(cond) \
43 do { \
44 if (WARN_ON_ONCE(cond)) \
45 ftrace_kill(); \
46 } while (0)
47
8fc0c701
SR
48/* hash bits for specific function selection */
49#define FTRACE_HASH_BITS 7
50#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
51
4eebcc81
SR
52/* ftrace_enabled is a method to turn ftrace on or off */
53int ftrace_enabled __read_mostly;
d61f82d0 54static int last_ftrace_enabled;
b0fc494f 55
60a7ecf4
SR
56/* Quick disabling of function tracer. */
57int function_trace_stop;
58
4eebcc81
SR
59/*
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
62 */
63static int ftrace_disabled __read_mostly;
64
52baf119 65static DEFINE_MUTEX(ftrace_lock);
b0fc494f 66
16444a8a
ACM
67static struct ftrace_ops ftrace_list_end __read_mostly =
68{
69 .func = ftrace_stub,
70};
71
72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 76
f2252935 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
78{
79 struct ftrace_ops *op = ftrace_list;
80
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
83
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
89 };
90}
91
df4fc315
SR
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
0ef8cde5 94 if (!test_tsk_trace_trace(current))
df4fc315
SR
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
16444a8a 107/**
3d083395 108 * clear_ftrace_function - reset the ftrace function
16444a8a 109 *
3d083395
SR
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
16444a8a 112 */
3d083395 113void clear_ftrace_function(void)
16444a8a 114{
3d083395 115 ftrace_trace_function = ftrace_stub;
60a7ecf4 116 __ftrace_trace_function = ftrace_stub;
df4fc315 117 ftrace_pid_function = ftrace_stub;
3d083395
SR
118}
119
60a7ecf4
SR
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
e309b41d 134static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 135{
16444a8a
ACM
136 ops->next = ftrace_list;
137 /*
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
142 */
143 smp_wmb();
144 ftrace_list = ops;
3d083395 145
b0fc494f 146 if (ftrace_enabled) {
df4fc315
SR
147 ftrace_func_t func;
148
149 if (ops->next == &ftrace_list_end)
150 func = ops->func;
151 else
152 func = ftrace_list_func;
153
978f3a45 154 if (ftrace_pid_trace) {
df4fc315
SR
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
157 }
158
b0fc494f
SR
159 /*
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
162 */
60a7ecf4 163#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 164 ftrace_trace_function = func;
60a7ecf4 165#else
df4fc315 166 __ftrace_trace_function = func;
60a7ecf4
SR
167 ftrace_trace_function = ftrace_test_stop_func;
168#endif
b0fc494f 169 }
3d083395 170
16444a8a
ACM
171 return 0;
172}
173
e309b41d 174static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 175{
16444a8a 176 struct ftrace_ops **p;
16444a8a
ACM
177
178 /*
3d083395
SR
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
16444a8a
ACM
181 */
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
e6ea44e9 185 return 0;
16444a8a
ACM
186 }
187
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189 if (*p == ops)
190 break;
191
e6ea44e9
SR
192 if (*p != ops)
193 return -1;
16444a8a
ACM
194
195 *p = (*p)->next;
196
b0fc494f
SR
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
df4fc315
SR
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
201
978f3a45 202 if (ftrace_pid_trace) {
df4fc315
SR
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
205 }
206#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
208#else
209 __ftrace_trace_function = func;
210#endif
211 }
b0fc494f 212 }
16444a8a 213
e6ea44e9 214 return 0;
3d083395
SR
215}
216
df4fc315
SR
217static void ftrace_update_pid_func(void)
218{
219 ftrace_func_t func;
220
df4fc315 221 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 222 return;
df4fc315
SR
223
224 func = ftrace_trace_function;
225
978f3a45 226 if (ftrace_pid_trace) {
df4fc315
SR
227 set_ftrace_pid_function(func);
228 func = ftrace_pid_func;
229 } else {
66eafebc
LW
230 if (func == ftrace_pid_func)
231 func = ftrace_pid_function;
df4fc315
SR
232 }
233
234#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235 ftrace_trace_function = func;
236#else
237 __ftrace_trace_function = func;
238#endif
df4fc315
SR
239}
240
73d3fd96
IM
241/* set when tracing only a pid */
242struct pid *ftrace_pid_trace;
243static struct pid * const ftrace_swapper_pid = &init_struct_pid;
244
3d083395 245#ifdef CONFIG_DYNAMIC_FTRACE
73d3fd96 246
99ecdc43 247#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 248# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
249#endif
250
8fc0c701
SR
251static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
252
b6887d79 253struct ftrace_func_probe {
8fc0c701 254 struct hlist_node node;
b6887d79 255 struct ftrace_probe_ops *ops;
8fc0c701
SR
256 unsigned long flags;
257 unsigned long ip;
258 void *data;
259 struct rcu_head rcu;
260};
261
262
d61f82d0
SR
263enum {
264 FTRACE_ENABLE_CALLS = (1 << 0),
265 FTRACE_DISABLE_CALLS = (1 << 1),
266 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
267 FTRACE_ENABLE_MCOUNT = (1 << 3),
268 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
269 FTRACE_START_FUNC_RET = (1 << 5),
270 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
271};
272
5072c59f
SR
273static int ftrace_filtered;
274
08f5ac90 275static LIST_HEAD(ftrace_new_addrs);
3d083395 276
41c52c0d 277static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 278
3c1720f0
SR
279struct ftrace_page {
280 struct ftrace_page *next;
431aa3fb 281 int index;
3c1720f0 282 struct dyn_ftrace records[];
aa5e5cea 283};
3c1720f0
SR
284
285#define ENTRIES_PER_PAGE \
286 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
287
288/* estimate from running different kernels */
289#define NR_TO_INIT 10000
290
291static struct ftrace_page *ftrace_pages_start;
292static struct ftrace_page *ftrace_pages;
293
37ad5084
SR
294static struct dyn_ftrace *ftrace_free_records;
295
265c831c
SR
296/*
297 * This is a double for. Do not use 'break' to break out of the loop,
298 * you must use a goto.
299 */
300#define do_for_each_ftrace_rec(pg, rec) \
301 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
302 int _____i; \
303 for (_____i = 0; _____i < pg->index; _____i++) { \
304 rec = &pg->records[_____i];
305
306#define while_for_each_ftrace_rec() \
307 } \
308 }
ecea656d
AS
309
310#ifdef CONFIG_KPROBES
f17845e5
IM
311
312static int frozen_record_count;
313
ecea656d
AS
314static inline void freeze_record(struct dyn_ftrace *rec)
315{
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
319 }
320}
321
322static inline void unfreeze_record(struct dyn_ftrace *rec)
323{
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
327 }
328}
329
330static inline int record_frozen(struct dyn_ftrace *rec)
331{
332 return rec->flags & FTRACE_FL_FROZEN;
333}
334#else
335# define freeze_record(rec) ({ 0; })
336# define unfreeze_record(rec) ({ 0; })
337# define record_frozen(rec) ({ 0; })
338#endif /* CONFIG_KPROBES */
339
e309b41d 340static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 341{
37ad5084
SR
342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
345}
346
fed1939c
SR
347void ftrace_release(void *start, unsigned long size)
348{
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
fed1939c 353
00fd61ae 354 if (ftrace_disabled || !start)
fed1939c
SR
355 return;
356
52baf119 357 mutex_lock(&ftrace_lock);
265c831c 358 do_for_each_ftrace_rec(pg, rec) {
b00f0b6d
Z
359 if ((rec->ip >= s) && (rec->ip < e) &&
360 !(rec->flags & FTRACE_FL_FREE))
265c831c
SR
361 ftrace_free_rec(rec);
362 } while_for_each_ftrace_rec();
52baf119 363 mutex_unlock(&ftrace_lock);
fed1939c
SR
364}
365
e309b41d 366static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 367{
37ad5084
SR
368 struct dyn_ftrace *rec;
369
370 /* First check for freed records */
371 if (ftrace_free_records) {
372 rec = ftrace_free_records;
373
37ad5084 374 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 375 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
376 ftrace_free_records = NULL;
377 return NULL;
378 }
379
380 ftrace_free_records = (void *)rec->ip;
381 memset(rec, 0, sizeof(*rec));
382 return rec;
383 }
384
3c1720f0 385 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
386 if (!ftrace_pages->next) {
387 /* allocate another page */
388 ftrace_pages->next =
389 (void *)get_zeroed_page(GFP_KERNEL);
390 if (!ftrace_pages->next)
391 return NULL;
392 }
3c1720f0
SR
393 ftrace_pages = ftrace_pages->next;
394 }
395
396 return &ftrace_pages->records[ftrace_pages->index++];
397}
398
08f5ac90 399static struct dyn_ftrace *
d61f82d0 400ftrace_record_ip(unsigned long ip)
3d083395 401{
08f5ac90 402 struct dyn_ftrace *rec;
3d083395 403
f3c7ac40 404 if (ftrace_disabled)
08f5ac90 405 return NULL;
3d083395 406
08f5ac90
SR
407 rec = ftrace_alloc_dyn_node(ip);
408 if (!rec)
409 return NULL;
3d083395 410
08f5ac90 411 rec->ip = ip;
3d083395 412
08f5ac90 413 list_add(&rec->list, &ftrace_new_addrs);
3d083395 414
08f5ac90 415 return rec;
3d083395
SR
416}
417
b17e8a37
SR
418static void print_ip_ins(const char *fmt, unsigned char *p)
419{
420 int i;
421
422 printk(KERN_CONT "%s", fmt);
423
424 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
426}
427
31e88909 428static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
429{
430 switch (failed) {
431 case -EFAULT:
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
434 print_ip_sym(ip);
435 break;
436 case -EINVAL:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
439 print_ip_sym(ip);
b17e8a37 440 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
441 printk(KERN_CONT "\n");
442 break;
443 case -EPERM:
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
446 print_ip_sym(ip);
447 break;
448 default:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
451 print_ip_sym(ip);
452 }
453}
454
3c1720f0 455
0eb96701 456static int
31e88909 457__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 458{
e7d3737e 459 unsigned long ftrace_addr;
6a24a244 460 unsigned long ip, fl;
e7d3737e 461
f0001207 462 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
463
464 ip = rec->ip;
465
982c350b
SR
466 /*
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
469 *
470 * If this record is not to be traced and
57794a9d 471 * it is enabled then disable it.
982c350b
SR
472 *
473 */
474 if (rec->flags & FTRACE_FL_NOTRACE) {
475 if (rec->flags & FTRACE_FL_ENABLED)
476 rec->flags &= ~FTRACE_FL_ENABLED;
477 else
478 return 0;
479
480 } else if (ftrace_filtered && enable) {
5072c59f 481 /*
982c350b 482 * Filtering is on:
5072c59f 483 */
a4500b84 484
982c350b 485 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 486
982c350b
SR
487 /* Record is filtered and enabled, do nothing */
488 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 489 return 0;
5072c59f 490
57794a9d 491 /* Record is not filtered or enabled, do nothing */
982c350b
SR
492 if (!fl)
493 return 0;
494
495 /* Record is not filtered but enabled, disable it */
496 if (fl == FTRACE_FL_ENABLED)
5072c59f 497 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
498 else
499 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 500 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 501 } else {
982c350b 502 /* Disable or not filtered */
5072c59f 503
41c52c0d 504 if (enable) {
982c350b 505 /* if record is enabled, do nothing */
5072c59f 506 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 507 return 0;
982c350b 508
5072c59f 509 rec->flags |= FTRACE_FL_ENABLED;
982c350b 510
5072c59f 511 } else {
982c350b 512
57794a9d 513 /* if record is not enabled, do nothing */
5072c59f 514 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 515 return 0;
982c350b 516
5072c59f
SR
517 rec->flags &= ~FTRACE_FL_ENABLED;
518 }
519 }
520
982c350b 521 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 522 return ftrace_make_call(rec, ftrace_addr);
31e88909 523 else
e7d3737e 524 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
525}
526
e309b41d 527static void ftrace_replace_code(int enable)
3c1720f0 528{
3c1720f0
SR
529 struct dyn_ftrace *rec;
530 struct ftrace_page *pg;
6a24a244 531 int failed;
3c1720f0 532
265c831c
SR
533 do_for_each_ftrace_rec(pg, rec) {
534 /*
fa9d13cf
Z
535 * Skip over free records, records that have
536 * failed and not converted.
265c831c
SR
537 */
538 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf
Z
539 rec->flags & FTRACE_FL_FAILED ||
540 rec->flags & FTRACE_FL_CONVERTED)
265c831c
SR
541 continue;
542
543 /* ignore updates to this record's mcount site */
544 if (get_kprobe((void *)rec->ip)) {
545 freeze_record(rec);
546 continue;
547 } else {
548 unfreeze_record(rec);
549 }
f22f9a89 550
265c831c 551 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 552 if (failed) {
265c831c
SR
553 rec->flags |= FTRACE_FL_FAILED;
554 if ((system_state == SYSTEM_BOOTING) ||
555 !core_kernel_text(rec->ip)) {
556 ftrace_free_rec(rec);
4377245a 557 } else {
265c831c 558 ftrace_bug(failed, rec->ip);
4377245a
SR
559 /* Stop processing */
560 return;
561 }
3c1720f0 562 }
265c831c 563 } while_for_each_ftrace_rec();
3c1720f0
SR
564}
565
492a7ea5 566static int
31e88909 567ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
568{
569 unsigned long ip;
593eb8a2 570 int ret;
3c1720f0
SR
571
572 ip = rec->ip;
573
25aac9dc 574 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 575 if (ret) {
31e88909 576 ftrace_bug(ret, ip);
3c1720f0 577 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 578 return 0;
37ad5084 579 }
492a7ea5 580 return 1;
3c1720f0
SR
581}
582
000ab691
SR
583/*
584 * archs can override this function if they must do something
585 * before the modifying code is performed.
586 */
587int __weak ftrace_arch_code_modify_prepare(void)
588{
589 return 0;
590}
591
592/*
593 * archs can override this function if they must do something
594 * after the modifying code is performed.
595 */
596int __weak ftrace_arch_code_modify_post_process(void)
597{
598 return 0;
599}
600
e309b41d 601static int __ftrace_modify_code(void *data)
3d083395 602{
d61f82d0
SR
603 int *command = data;
604
a3583244 605 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 606 ftrace_replace_code(1);
a3583244 607 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
608 ftrace_replace_code(0);
609
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
612
5a45cfe1
SR
613 if (*command & FTRACE_START_FUNC_RET)
614 ftrace_enable_ftrace_graph_caller();
615 else if (*command & FTRACE_STOP_FUNC_RET)
616 ftrace_disable_ftrace_graph_caller();
617
d61f82d0 618 return 0;
3d083395
SR
619}
620
e309b41d 621static void ftrace_run_update_code(int command)
3d083395 622{
000ab691
SR
623 int ret;
624
625 ret = ftrace_arch_code_modify_prepare();
626 FTRACE_WARN_ON(ret);
627 if (ret)
628 return;
629
784e2d76 630 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
631
632 ret = ftrace_arch_code_modify_post_process();
633 FTRACE_WARN_ON(ret);
3d083395
SR
634}
635
d61f82d0 636static ftrace_func_t saved_ftrace_func;
60a7ecf4 637static int ftrace_start_up;
df4fc315
SR
638
639static void ftrace_startup_enable(int command)
640{
641 if (saved_ftrace_func != ftrace_trace_function) {
642 saved_ftrace_func = ftrace_trace_function;
643 command |= FTRACE_UPDATE_TRACE_FUNC;
644 }
645
646 if (!command || !ftrace_enabled)
647 return;
648
649 ftrace_run_update_code(command);
650}
d61f82d0 651
5a45cfe1 652static void ftrace_startup(int command)
3d083395 653{
4eebcc81
SR
654 if (unlikely(ftrace_disabled))
655 return;
656
60a7ecf4 657 ftrace_start_up++;
982c350b 658 command |= FTRACE_ENABLE_CALLS;
d61f82d0 659
df4fc315 660 ftrace_startup_enable(command);
3d083395
SR
661}
662
5a45cfe1 663static void ftrace_shutdown(int command)
3d083395 664{
4eebcc81
SR
665 if (unlikely(ftrace_disabled))
666 return;
667
60a7ecf4
SR
668 ftrace_start_up--;
669 if (!ftrace_start_up)
d61f82d0 670 command |= FTRACE_DISABLE_CALLS;
3d083395 671
d61f82d0
SR
672 if (saved_ftrace_func != ftrace_trace_function) {
673 saved_ftrace_func = ftrace_trace_function;
674 command |= FTRACE_UPDATE_TRACE_FUNC;
675 }
3d083395 676
d61f82d0 677 if (!command || !ftrace_enabled)
e6ea44e9 678 return;
d61f82d0
SR
679
680 ftrace_run_update_code(command);
3d083395
SR
681}
682
e309b41d 683static void ftrace_startup_sysctl(void)
b0fc494f 684{
d61f82d0
SR
685 int command = FTRACE_ENABLE_MCOUNT;
686
4eebcc81
SR
687 if (unlikely(ftrace_disabled))
688 return;
689
d61f82d0
SR
690 /* Force update next time */
691 saved_ftrace_func = NULL;
60a7ecf4
SR
692 /* ftrace_start_up is true if we want ftrace running */
693 if (ftrace_start_up)
d61f82d0
SR
694 command |= FTRACE_ENABLE_CALLS;
695
696 ftrace_run_update_code(command);
b0fc494f
SR
697}
698
e309b41d 699static void ftrace_shutdown_sysctl(void)
b0fc494f 700{
d61f82d0
SR
701 int command = FTRACE_DISABLE_MCOUNT;
702
4eebcc81
SR
703 if (unlikely(ftrace_disabled))
704 return;
705
60a7ecf4
SR
706 /* ftrace_start_up is true if ftrace is running */
707 if (ftrace_start_up)
d61f82d0
SR
708 command |= FTRACE_DISABLE_CALLS;
709
710 ftrace_run_update_code(command);
b0fc494f
SR
711}
712
3d083395
SR
713static cycle_t ftrace_update_time;
714static unsigned long ftrace_update_cnt;
715unsigned long ftrace_update_tot_cnt;
716
31e88909 717static int ftrace_update_code(struct module *mod)
3d083395 718{
08f5ac90 719 struct dyn_ftrace *p, *t;
f22f9a89 720 cycle_t start, stop;
3d083395 721
750ed1a4 722 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
723 ftrace_update_cnt = 0;
724
08f5ac90 725 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 726
08f5ac90
SR
727 /* If something went wrong, bail without enabling anything */
728 if (unlikely(ftrace_disabled))
729 return -1;
f22f9a89 730
08f5ac90 731 list_del_init(&p->list);
f22f9a89 732
08f5ac90 733 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 734 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
735 p->flags |= FTRACE_FL_CONVERTED;
736 ftrace_update_cnt++;
737 } else
738 ftrace_free_rec(p);
3d083395
SR
739 }
740
750ed1a4 741 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
742 ftrace_update_time = stop - start;
743 ftrace_update_tot_cnt += ftrace_update_cnt;
744
16444a8a
ACM
745 return 0;
746}
747
68bf21aa 748static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
749{
750 struct ftrace_page *pg;
751 int cnt;
752 int i;
3c1720f0
SR
753
754 /* allocate a few pages */
755 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
756 if (!ftrace_pages_start)
757 return -1;
758
759 /*
760 * Allocate a few more pages.
761 *
762 * TODO: have some parser search vmlinux before
763 * final linking to find all calls to ftrace.
764 * Then we can:
765 * a) know how many pages to allocate.
766 * and/or
767 * b) set up the table then.
768 *
769 * The dynamic code is still necessary for
770 * modules.
771 */
772
773 pg = ftrace_pages = ftrace_pages_start;
774
68bf21aa 775 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 776 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 777 num_to_init, cnt + 1);
3c1720f0
SR
778
779 for (i = 0; i < cnt; i++) {
780 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
781
782 /* If we fail, we'll try later anyway */
783 if (!pg->next)
784 break;
785
786 pg = pg->next;
787 }
788
789 return 0;
790}
791
5072c59f
SR
792enum {
793 FTRACE_ITER_FILTER = (1 << 0),
794 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 795 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 796 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 797 FTRACE_ITER_PRINTALL = (1 << 4),
8fc0c701 798 FTRACE_ITER_HASH = (1 << 5),
5072c59f
SR
799};
800
801#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
802
803struct ftrace_iterator {
5072c59f 804 struct ftrace_page *pg;
8fc0c701 805 int hidx;
431aa3fb 806 int idx;
5072c59f
SR
807 unsigned flags;
808 unsigned char buffer[FTRACE_BUFF_MAX+1];
809 unsigned buffer_idx;
810 unsigned filtered;
811};
812
8fc0c701
SR
813static void *
814t_hash_next(struct seq_file *m, void *v, loff_t *pos)
815{
816 struct ftrace_iterator *iter = m->private;
817 struct hlist_node *hnd = v;
818 struct hlist_head *hhd;
819
820 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
821
822 (*pos)++;
823
824 retry:
825 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
826 return NULL;
827
828 hhd = &ftrace_func_hash[iter->hidx];
829
830 if (hlist_empty(hhd)) {
831 iter->hidx++;
832 hnd = NULL;
833 goto retry;
834 }
835
836 if (!hnd)
837 hnd = hhd->first;
838 else {
839 hnd = hnd->next;
840 if (!hnd) {
841 iter->hidx++;
842 goto retry;
843 }
844 }
845
846 return hnd;
847}
848
849static void *t_hash_start(struct seq_file *m, loff_t *pos)
850{
851 struct ftrace_iterator *iter = m->private;
852 void *p = NULL;
853
854 iter->flags |= FTRACE_ITER_HASH;
855
856 return t_hash_next(m, p, pos);
857}
858
859static int t_hash_show(struct seq_file *m, void *v)
860{
b6887d79 861 struct ftrace_func_probe *rec;
8fc0c701
SR
862 struct hlist_node *hnd = v;
863 char str[KSYM_SYMBOL_LEN];
864
b6887d79 865 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
8fc0c701 866
809dcf29
SR
867 if (rec->ops->print)
868 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
869
8fc0c701
SR
870 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
871 seq_printf(m, "%s:", str);
872
873 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
874 seq_printf(m, "%s", str);
875
876 if (rec->data)
877 seq_printf(m, ":%p", rec->data);
878 seq_putc(m, '\n');
879
880 return 0;
881}
882
e309b41d 883static void *
5072c59f
SR
884t_next(struct seq_file *m, void *v, loff_t *pos)
885{
886 struct ftrace_iterator *iter = m->private;
887 struct dyn_ftrace *rec = NULL;
888
8fc0c701
SR
889 if (iter->flags & FTRACE_ITER_HASH)
890 return t_hash_next(m, v, pos);
891
5072c59f
SR
892 (*pos)++;
893
0c75a3ed
SR
894 if (iter->flags & FTRACE_ITER_PRINTALL)
895 return NULL;
896
5072c59f
SR
897 retry:
898 if (iter->idx >= iter->pg->index) {
899 if (iter->pg->next) {
900 iter->pg = iter->pg->next;
901 iter->idx = 0;
902 goto retry;
50cdaf08
LW
903 } else {
904 iter->idx = -1;
5072c59f
SR
905 }
906 } else {
907 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
908 if ((rec->flags & FTRACE_FL_FREE) ||
909
910 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
911 (rec->flags & FTRACE_FL_FAILED)) ||
912
913 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 914 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 915
0183fb1c
SR
916 ((iter->flags & FTRACE_ITER_FILTER) &&
917 !(rec->flags & FTRACE_FL_FILTER)) ||
918
41c52c0d
SR
919 ((iter->flags & FTRACE_ITER_NOTRACE) &&
920 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
921 rec = NULL;
922 goto retry;
923 }
924 }
925
5072c59f
SR
926 return rec;
927}
928
929static void *t_start(struct seq_file *m, loff_t *pos)
930{
931 struct ftrace_iterator *iter = m->private;
932 void *p = NULL;
5072c59f 933
8fc0c701 934 mutex_lock(&ftrace_lock);
0c75a3ed
SR
935 /*
936 * For set_ftrace_filter reading, if we have the filter
937 * off, we can short cut and just print out that all
938 * functions are enabled.
939 */
940 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
941 if (*pos > 0)
8fc0c701 942 return t_hash_start(m, pos);
0c75a3ed
SR
943 iter->flags |= FTRACE_ITER_PRINTALL;
944 (*pos)++;
945 return iter;
946 }
947
8fc0c701
SR
948 if (iter->flags & FTRACE_ITER_HASH)
949 return t_hash_start(m, pos);
950
50cdaf08
LW
951 if (*pos > 0) {
952 if (iter->idx < 0)
953 return p;
954 (*pos)--;
955 iter->idx--;
956 }
5821e1b7 957
50cdaf08 958 p = t_next(m, p, pos);
5072c59f 959
8fc0c701
SR
960 if (!p)
961 return t_hash_start(m, pos);
962
5072c59f
SR
963 return p;
964}
965
966static void t_stop(struct seq_file *m, void *p)
967{
8fc0c701 968 mutex_unlock(&ftrace_lock);
5072c59f
SR
969}
970
971static int t_show(struct seq_file *m, void *v)
972{
0c75a3ed 973 struct ftrace_iterator *iter = m->private;
5072c59f
SR
974 struct dyn_ftrace *rec = v;
975 char str[KSYM_SYMBOL_LEN];
976
8fc0c701
SR
977 if (iter->flags & FTRACE_ITER_HASH)
978 return t_hash_show(m, v);
979
0c75a3ed
SR
980 if (iter->flags & FTRACE_ITER_PRINTALL) {
981 seq_printf(m, "#### all functions enabled ####\n");
982 return 0;
983 }
984
5072c59f
SR
985 if (!rec)
986 return 0;
987
988 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
989
50cdaf08 990 seq_printf(m, "%s\n", str);
5072c59f
SR
991
992 return 0;
993}
994
995static struct seq_operations show_ftrace_seq_ops = {
996 .start = t_start,
997 .next = t_next,
998 .stop = t_stop,
999 .show = t_show,
1000};
1001
e309b41d 1002static int
5072c59f
SR
1003ftrace_avail_open(struct inode *inode, struct file *file)
1004{
1005 struct ftrace_iterator *iter;
1006 int ret;
1007
4eebcc81
SR
1008 if (unlikely(ftrace_disabled))
1009 return -ENODEV;
1010
5072c59f
SR
1011 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1012 if (!iter)
1013 return -ENOMEM;
1014
1015 iter->pg = ftrace_pages_start;
5072c59f
SR
1016
1017 ret = seq_open(file, &show_ftrace_seq_ops);
1018 if (!ret) {
1019 struct seq_file *m = file->private_data;
4bf39a94 1020
5072c59f 1021 m->private = iter;
4bf39a94 1022 } else {
5072c59f 1023 kfree(iter);
4bf39a94 1024 }
5072c59f
SR
1025
1026 return ret;
1027}
1028
1029int ftrace_avail_release(struct inode *inode, struct file *file)
1030{
1031 struct seq_file *m = (struct seq_file *)file->private_data;
1032 struct ftrace_iterator *iter = m->private;
1033
1034 seq_release(inode, file);
1035 kfree(iter);
4bf39a94 1036
5072c59f
SR
1037 return 0;
1038}
1039
eb9a7bf0
AS
1040static int
1041ftrace_failures_open(struct inode *inode, struct file *file)
1042{
1043 int ret;
1044 struct seq_file *m;
1045 struct ftrace_iterator *iter;
1046
1047 ret = ftrace_avail_open(inode, file);
1048 if (!ret) {
1049 m = (struct seq_file *)file->private_data;
1050 iter = (struct ftrace_iterator *)m->private;
1051 iter->flags = FTRACE_ITER_FAILURES;
1052 }
1053
1054 return ret;
1055}
1056
1057
41c52c0d 1058static void ftrace_filter_reset(int enable)
5072c59f
SR
1059{
1060 struct ftrace_page *pg;
1061 struct dyn_ftrace *rec;
41c52c0d 1062 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1063
52baf119 1064 mutex_lock(&ftrace_lock);
41c52c0d
SR
1065 if (enable)
1066 ftrace_filtered = 0;
265c831c
SR
1067 do_for_each_ftrace_rec(pg, rec) {
1068 if (rec->flags & FTRACE_FL_FAILED)
1069 continue;
1070 rec->flags &= ~type;
1071 } while_for_each_ftrace_rec();
52baf119 1072 mutex_unlock(&ftrace_lock);
5072c59f
SR
1073}
1074
e309b41d 1075static int
41c52c0d 1076ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1077{
1078 struct ftrace_iterator *iter;
1079 int ret = 0;
1080
4eebcc81
SR
1081 if (unlikely(ftrace_disabled))
1082 return -ENODEV;
1083
5072c59f
SR
1084 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1085 if (!iter)
1086 return -ENOMEM;
1087
41c52c0d 1088 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1089 if ((file->f_mode & FMODE_WRITE) &&
1090 !(file->f_flags & O_APPEND))
41c52c0d 1091 ftrace_filter_reset(enable);
5072c59f
SR
1092
1093 if (file->f_mode & FMODE_READ) {
1094 iter->pg = ftrace_pages_start;
41c52c0d
SR
1095 iter->flags = enable ? FTRACE_ITER_FILTER :
1096 FTRACE_ITER_NOTRACE;
5072c59f
SR
1097
1098 ret = seq_open(file, &show_ftrace_seq_ops);
1099 if (!ret) {
1100 struct seq_file *m = file->private_data;
1101 m->private = iter;
1102 } else
1103 kfree(iter);
1104 } else
1105 file->private_data = iter;
41c52c0d 1106 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1107
1108 return ret;
1109}
1110
41c52c0d
SR
1111static int
1112ftrace_filter_open(struct inode *inode, struct file *file)
1113{
1114 return ftrace_regex_open(inode, file, 1);
1115}
1116
1117static int
1118ftrace_notrace_open(struct inode *inode, struct file *file)
1119{
1120 return ftrace_regex_open(inode, file, 0);
1121}
1122
e309b41d 1123static loff_t
41c52c0d 1124ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1125{
1126 loff_t ret;
1127
1128 if (file->f_mode & FMODE_READ)
1129 ret = seq_lseek(file, offset, origin);
1130 else
1131 file->f_pos = ret = 1;
1132
1133 return ret;
1134}
1135
1136enum {
1137 MATCH_FULL,
1138 MATCH_FRONT_ONLY,
1139 MATCH_MIDDLE_ONLY,
1140 MATCH_END_ONLY,
1141};
1142
9f4801e3
SR
1143/*
1144 * (static function - no need for kernel doc)
1145 *
1146 * Pass in a buffer containing a glob and this function will
1147 * set search to point to the search part of the buffer and
1148 * return the type of search it is (see enum above).
1149 * This does modify buff.
1150 *
1151 * Returns enum type.
1152 * search returns the pointer to use for comparison.
1153 * not returns 1 if buff started with a '!'
1154 * 0 otherwise.
1155 */
1156static int
64e7c440 1157ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1158{
5072c59f 1159 int type = MATCH_FULL;
9f4801e3 1160 int i;
ea3a6d6d
SR
1161
1162 if (buff[0] == '!') {
9f4801e3 1163 *not = 1;
ea3a6d6d
SR
1164 buff++;
1165 len--;
9f4801e3
SR
1166 } else
1167 *not = 0;
1168
1169 *search = buff;
5072c59f
SR
1170
1171 for (i = 0; i < len; i++) {
1172 if (buff[i] == '*') {
1173 if (!i) {
9f4801e3 1174 *search = buff + 1;
5072c59f 1175 type = MATCH_END_ONLY;
5072c59f 1176 } else {
9f4801e3 1177 if (type == MATCH_END_ONLY)
5072c59f 1178 type = MATCH_MIDDLE_ONLY;
9f4801e3 1179 else
5072c59f 1180 type = MATCH_FRONT_ONLY;
5072c59f
SR
1181 buff[i] = 0;
1182 break;
1183 }
1184 }
1185 }
1186
9f4801e3
SR
1187 return type;
1188}
1189
64e7c440 1190static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1191{
9f4801e3
SR
1192 int matched = 0;
1193 char *ptr;
1194
9f4801e3
SR
1195 switch (type) {
1196 case MATCH_FULL:
1197 if (strcmp(str, regex) == 0)
1198 matched = 1;
1199 break;
1200 case MATCH_FRONT_ONLY:
1201 if (strncmp(str, regex, len) == 0)
1202 matched = 1;
1203 break;
1204 case MATCH_MIDDLE_ONLY:
1205 if (strstr(str, regex))
1206 matched = 1;
1207 break;
1208 case MATCH_END_ONLY:
1209 ptr = strstr(str, regex);
1210 if (ptr && (ptr[len] == 0))
1211 matched = 1;
1212 break;
1213 }
1214
1215 return matched;
1216}
1217
64e7c440
SR
1218static int
1219ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1220{
1221 char str[KSYM_SYMBOL_LEN];
1222
1223 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1224 return ftrace_match(str, regex, len, type);
1225}
1226
9f4801e3
SR
1227static void ftrace_match_records(char *buff, int len, int enable)
1228{
6a24a244 1229 unsigned int search_len;
9f4801e3
SR
1230 struct ftrace_page *pg;
1231 struct dyn_ftrace *rec;
6a24a244
SR
1232 unsigned long flag;
1233 char *search;
9f4801e3 1234 int type;
9f4801e3
SR
1235 int not;
1236
6a24a244 1237 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
9f4801e3
SR
1238 type = ftrace_setup_glob(buff, len, &search, &not);
1239
1240 search_len = strlen(search);
1241
52baf119 1242 mutex_lock(&ftrace_lock);
265c831c 1243 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1244
1245 if (rec->flags & FTRACE_FL_FAILED)
1246 continue;
9f4801e3
SR
1247
1248 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1249 if (not)
1250 rec->flags &= ~flag;
1251 else
1252 rec->flags |= flag;
1253 }
e68746a2
SR
1254 /*
1255 * Only enable filtering if we have a function that
1256 * is filtered on.
1257 */
1258 if (enable && (rec->flags & FTRACE_FL_FILTER))
1259 ftrace_filtered = 1;
265c831c 1260 } while_for_each_ftrace_rec();
52baf119 1261 mutex_unlock(&ftrace_lock);
5072c59f
SR
1262}
1263
64e7c440
SR
1264static int
1265ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1266 char *regex, int len, int type)
1267{
1268 char str[KSYM_SYMBOL_LEN];
1269 char *modname;
1270
1271 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1272
1273 if (!modname || strcmp(modname, mod))
1274 return 0;
1275
1276 /* blank search means to match all funcs in the mod */
1277 if (len)
1278 return ftrace_match(str, regex, len, type);
1279 else
1280 return 1;
1281}
1282
1283static void ftrace_match_module_records(char *buff, char *mod, int enable)
1284{
6a24a244 1285 unsigned search_len = 0;
64e7c440
SR
1286 struct ftrace_page *pg;
1287 struct dyn_ftrace *rec;
1288 int type = MATCH_FULL;
6a24a244
SR
1289 char *search = buff;
1290 unsigned long flag;
64e7c440
SR
1291 int not = 0;
1292
6a24a244
SR
1293 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1294
64e7c440
SR
1295 /* blank or '*' mean the same */
1296 if (strcmp(buff, "*") == 0)
1297 buff[0] = 0;
1298
1299 /* handle the case of 'dont filter this module' */
1300 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1301 buff[0] = 0;
1302 not = 1;
1303 }
1304
1305 if (strlen(buff)) {
1306 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1307 search_len = strlen(search);
1308 }
1309
52baf119 1310 mutex_lock(&ftrace_lock);
64e7c440
SR
1311 do_for_each_ftrace_rec(pg, rec) {
1312
1313 if (rec->flags & FTRACE_FL_FAILED)
1314 continue;
1315
1316 if (ftrace_match_module_record(rec, mod,
1317 search, search_len, type)) {
1318 if (not)
1319 rec->flags &= ~flag;
1320 else
1321 rec->flags |= flag;
1322 }
e68746a2
SR
1323 if (enable && (rec->flags & FTRACE_FL_FILTER))
1324 ftrace_filtered = 1;
64e7c440
SR
1325
1326 } while_for_each_ftrace_rec();
52baf119 1327 mutex_unlock(&ftrace_lock);
64e7c440
SR
1328}
1329
f6180773
SR
1330/*
1331 * We register the module command as a template to show others how
1332 * to register the a command as well.
1333 */
1334
1335static int
1336ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1337{
1338 char *mod;
1339
1340 /*
1341 * cmd == 'mod' because we only registered this func
1342 * for the 'mod' ftrace_func_command.
1343 * But if you register one func with multiple commands,
1344 * you can tell which command was used by the cmd
1345 * parameter.
1346 */
1347
1348 /* we must have a module name */
1349 if (!param)
1350 return -EINVAL;
1351
1352 mod = strsep(&param, ":");
1353 if (!strlen(mod))
1354 return -EINVAL;
1355
1356 ftrace_match_module_records(func, mod, enable);
1357 return 0;
1358}
1359
1360static struct ftrace_func_command ftrace_mod_cmd = {
1361 .name = "mod",
1362 .func = ftrace_mod_callback,
1363};
1364
1365static int __init ftrace_mod_cmd_init(void)
1366{
1367 return register_ftrace_command(&ftrace_mod_cmd);
1368}
1369device_initcall(ftrace_mod_cmd_init);
1370
59df055f 1371static void
b6887d79 1372function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1373{
b6887d79 1374 struct ftrace_func_probe *entry;
59df055f
SR
1375 struct hlist_head *hhd;
1376 struct hlist_node *n;
1377 unsigned long key;
1378 int resched;
1379
1380 key = hash_long(ip, FTRACE_HASH_BITS);
1381
1382 hhd = &ftrace_func_hash[key];
1383
1384 if (hlist_empty(hhd))
1385 return;
1386
1387 /*
1388 * Disable preemption for these calls to prevent a RCU grace
1389 * period. This syncs the hash iteration and freeing of items
1390 * on the hash. rcu_read_lock is too dangerous here.
1391 */
1392 resched = ftrace_preempt_disable();
1393 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1394 if (entry->ip == ip)
1395 entry->ops->func(ip, parent_ip, &entry->data);
1396 }
1397 ftrace_preempt_enable(resched);
1398}
1399
b6887d79 1400static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1401{
b6887d79 1402 .func = function_trace_probe_call,
59df055f
SR
1403};
1404
b6887d79 1405static int ftrace_probe_registered;
59df055f 1406
b6887d79 1407static void __enable_ftrace_function_probe(void)
59df055f
SR
1408{
1409 int i;
1410
b6887d79 1411 if (ftrace_probe_registered)
59df055f
SR
1412 return;
1413
1414 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1415 struct hlist_head *hhd = &ftrace_func_hash[i];
1416 if (hhd->first)
1417 break;
1418 }
1419 /* Nothing registered? */
1420 if (i == FTRACE_FUNC_HASHSIZE)
1421 return;
1422
b6887d79 1423 __register_ftrace_function(&trace_probe_ops);
59df055f 1424 ftrace_startup(0);
b6887d79 1425 ftrace_probe_registered = 1;
59df055f
SR
1426}
1427
b6887d79 1428static void __disable_ftrace_function_probe(void)
59df055f
SR
1429{
1430 int i;
1431
b6887d79 1432 if (!ftrace_probe_registered)
59df055f
SR
1433 return;
1434
1435 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1436 struct hlist_head *hhd = &ftrace_func_hash[i];
1437 if (hhd->first)
1438 return;
1439 }
1440
1441 /* no more funcs left */
b6887d79 1442 __unregister_ftrace_function(&trace_probe_ops);
59df055f 1443 ftrace_shutdown(0);
b6887d79 1444 ftrace_probe_registered = 0;
59df055f
SR
1445}
1446
1447
1448static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1449{
b6887d79
SR
1450 struct ftrace_func_probe *entry =
1451 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
1452
1453 if (entry->ops->free)
1454 entry->ops->free(&entry->data);
1455 kfree(entry);
1456}
1457
1458
1459int
b6887d79 1460register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1461 void *data)
1462{
b6887d79 1463 struct ftrace_func_probe *entry;
59df055f
SR
1464 struct ftrace_page *pg;
1465 struct dyn_ftrace *rec;
59df055f 1466 int type, len, not;
6a24a244 1467 unsigned long key;
59df055f
SR
1468 int count = 0;
1469 char *search;
1470
1471 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1472 len = strlen(search);
1473
b6887d79 1474 /* we do not support '!' for function probes */
59df055f
SR
1475 if (WARN_ON(not))
1476 return -EINVAL;
1477
1478 mutex_lock(&ftrace_lock);
1479 do_for_each_ftrace_rec(pg, rec) {
1480
1481 if (rec->flags & FTRACE_FL_FAILED)
1482 continue;
1483
1484 if (!ftrace_match_record(rec, search, len, type))
1485 continue;
1486
1487 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1488 if (!entry) {
b6887d79 1489 /* If we did not process any, then return error */
59df055f
SR
1490 if (!count)
1491 count = -ENOMEM;
1492 goto out_unlock;
1493 }
1494
1495 count++;
1496
1497 entry->data = data;
1498
1499 /*
1500 * The caller might want to do something special
1501 * for each function we find. We call the callback
1502 * to give the caller an opportunity to do so.
1503 */
1504 if (ops->callback) {
1505 if (ops->callback(rec->ip, &entry->data) < 0) {
1506 /* caller does not like this func */
1507 kfree(entry);
1508 continue;
1509 }
1510 }
1511
1512 entry->ops = ops;
1513 entry->ip = rec->ip;
1514
1515 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1516 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1517
1518 } while_for_each_ftrace_rec();
b6887d79 1519 __enable_ftrace_function_probe();
59df055f
SR
1520
1521 out_unlock:
1522 mutex_unlock(&ftrace_lock);
1523
1524 return count;
1525}
1526
1527enum {
b6887d79
SR
1528 PROBE_TEST_FUNC = 1,
1529 PROBE_TEST_DATA = 2
59df055f
SR
1530};
1531
1532static void
b6887d79 1533__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1534 void *data, int flags)
1535{
b6887d79 1536 struct ftrace_func_probe *entry;
59df055f
SR
1537 struct hlist_node *n, *tmp;
1538 char str[KSYM_SYMBOL_LEN];
1539 int type = MATCH_FULL;
1540 int i, len = 0;
1541 char *search;
1542
1543 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1544 glob = NULL;
1545 else {
1546 int not;
1547
1548 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1549 len = strlen(search);
1550
b6887d79 1551 /* we do not support '!' for function probes */
59df055f
SR
1552 if (WARN_ON(not))
1553 return;
1554 }
1555
1556 mutex_lock(&ftrace_lock);
1557 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1558 struct hlist_head *hhd = &ftrace_func_hash[i];
1559
1560 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1561
1562 /* break up if statements for readability */
b6887d79 1563 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
1564 continue;
1565
b6887d79 1566 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
1567 continue;
1568
1569 /* do this last, since it is the most expensive */
1570 if (glob) {
1571 kallsyms_lookup(entry->ip, NULL, NULL,
1572 NULL, str);
1573 if (!ftrace_match(str, glob, len, type))
1574 continue;
1575 }
1576
1577 hlist_del(&entry->node);
1578 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1579 }
1580 }
b6887d79 1581 __disable_ftrace_function_probe();
59df055f
SR
1582 mutex_unlock(&ftrace_lock);
1583}
1584
1585void
b6887d79 1586unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1587 void *data)
1588{
b6887d79
SR
1589 __unregister_ftrace_function_probe(glob, ops, data,
1590 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
1591}
1592
1593void
b6887d79 1594unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 1595{
b6887d79 1596 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
1597}
1598
b6887d79 1599void unregister_ftrace_function_probe_all(char *glob)
59df055f 1600{
b6887d79 1601 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
1602}
1603
f6180773
SR
1604static LIST_HEAD(ftrace_commands);
1605static DEFINE_MUTEX(ftrace_cmd_mutex);
1606
1607int register_ftrace_command(struct ftrace_func_command *cmd)
1608{
1609 struct ftrace_func_command *p;
1610 int ret = 0;
1611
1612 mutex_lock(&ftrace_cmd_mutex);
1613 list_for_each_entry(p, &ftrace_commands, list) {
1614 if (strcmp(cmd->name, p->name) == 0) {
1615 ret = -EBUSY;
1616 goto out_unlock;
1617 }
1618 }
1619 list_add(&cmd->list, &ftrace_commands);
1620 out_unlock:
1621 mutex_unlock(&ftrace_cmd_mutex);
1622
1623 return ret;
1624}
1625
1626int unregister_ftrace_command(struct ftrace_func_command *cmd)
1627{
1628 struct ftrace_func_command *p, *n;
1629 int ret = -ENODEV;
1630
1631 mutex_lock(&ftrace_cmd_mutex);
1632 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1633 if (strcmp(cmd->name, p->name) == 0) {
1634 ret = 0;
1635 list_del_init(&p->list);
1636 goto out_unlock;
1637 }
1638 }
1639 out_unlock:
1640 mutex_unlock(&ftrace_cmd_mutex);
1641
1642 return ret;
1643}
1644
64e7c440
SR
1645static int ftrace_process_regex(char *buff, int len, int enable)
1646{
f6180773 1647 char *func, *command, *next = buff;
6a24a244 1648 struct ftrace_func_command *p;
f6180773 1649 int ret = -EINVAL;
64e7c440
SR
1650
1651 func = strsep(&next, ":");
1652
1653 if (!next) {
1654 ftrace_match_records(func, len, enable);
1655 return 0;
1656 }
1657
f6180773 1658 /* command found */
64e7c440
SR
1659
1660 command = strsep(&next, ":");
1661
f6180773
SR
1662 mutex_lock(&ftrace_cmd_mutex);
1663 list_for_each_entry(p, &ftrace_commands, list) {
1664 if (strcmp(p->name, command) == 0) {
1665 ret = p->func(func, command, next, enable);
1666 goto out_unlock;
1667 }
64e7c440 1668 }
f6180773
SR
1669 out_unlock:
1670 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 1671
f6180773 1672 return ret;
64e7c440
SR
1673}
1674
e309b41d 1675static ssize_t
41c52c0d
SR
1676ftrace_regex_write(struct file *file, const char __user *ubuf,
1677 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1678{
1679 struct ftrace_iterator *iter;
1680 char ch;
1681 size_t read = 0;
1682 ssize_t ret;
1683
1684 if (!cnt || cnt < 0)
1685 return 0;
1686
41c52c0d 1687 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1688
1689 if (file->f_mode & FMODE_READ) {
1690 struct seq_file *m = file->private_data;
1691 iter = m->private;
1692 } else
1693 iter = file->private_data;
1694
1695 if (!*ppos) {
1696 iter->flags &= ~FTRACE_ITER_CONT;
1697 iter->buffer_idx = 0;
1698 }
1699
1700 ret = get_user(ch, ubuf++);
1701 if (ret)
1702 goto out;
1703 read++;
1704 cnt--;
1705
1706 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1707 /* skip white space */
1708 while (cnt && isspace(ch)) {
1709 ret = get_user(ch, ubuf++);
1710 if (ret)
1711 goto out;
1712 read++;
1713 cnt--;
1714 }
1715
5072c59f
SR
1716 if (isspace(ch)) {
1717 file->f_pos += read;
1718 ret = read;
1719 goto out;
1720 }
1721
1722 iter->buffer_idx = 0;
1723 }
1724
1725 while (cnt && !isspace(ch)) {
1726 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1727 iter->buffer[iter->buffer_idx++] = ch;
1728 else {
1729 ret = -EINVAL;
1730 goto out;
1731 }
1732 ret = get_user(ch, ubuf++);
1733 if (ret)
1734 goto out;
1735 read++;
1736 cnt--;
1737 }
1738
1739 if (isspace(ch)) {
1740 iter->filtered++;
1741 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
1742 ret = ftrace_process_regex(iter->buffer,
1743 iter->buffer_idx, enable);
1744 if (ret)
1745 goto out;
5072c59f
SR
1746 iter->buffer_idx = 0;
1747 } else
1748 iter->flags |= FTRACE_ITER_CONT;
1749
1750
1751 file->f_pos += read;
1752
1753 ret = read;
1754 out:
41c52c0d 1755 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1756
1757 return ret;
1758}
1759
41c52c0d
SR
1760static ssize_t
1761ftrace_filter_write(struct file *file, const char __user *ubuf,
1762 size_t cnt, loff_t *ppos)
1763{
1764 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1765}
1766
1767static ssize_t
1768ftrace_notrace_write(struct file *file, const char __user *ubuf,
1769 size_t cnt, loff_t *ppos)
1770{
1771 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1772}
1773
1774static void
1775ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1776{
1777 if (unlikely(ftrace_disabled))
1778 return;
1779
1780 mutex_lock(&ftrace_regex_lock);
1781 if (reset)
1782 ftrace_filter_reset(enable);
1783 if (buf)
7f24b31b 1784 ftrace_match_records(buf, len, enable);
41c52c0d
SR
1785 mutex_unlock(&ftrace_regex_lock);
1786}
1787
77a2b37d
SR
1788/**
1789 * ftrace_set_filter - set a function to filter on in ftrace
1790 * @buf - the string that holds the function filter text.
1791 * @len - the length of the string.
1792 * @reset - non zero to reset all filters before applying this filter.
1793 *
1794 * Filters denote which functions should be enabled when tracing is enabled.
1795 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1796 */
e309b41d 1797void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1798{
41c52c0d
SR
1799 ftrace_set_regex(buf, len, reset, 1);
1800}
4eebcc81 1801
41c52c0d
SR
1802/**
1803 * ftrace_set_notrace - set a function to not trace in ftrace
1804 * @buf - the string that holds the function notrace text.
1805 * @len - the length of the string.
1806 * @reset - non zero to reset all filters before applying this filter.
1807 *
1808 * Notrace Filters denote which functions should not be enabled when tracing
1809 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1810 * for tracing.
1811 */
1812void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1813{
1814 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1815}
1816
e309b41d 1817static int
41c52c0d 1818ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1819{
1820 struct seq_file *m = (struct seq_file *)file->private_data;
1821 struct ftrace_iterator *iter;
1822
41c52c0d 1823 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1824 if (file->f_mode & FMODE_READ) {
1825 iter = m->private;
1826
1827 seq_release(inode, file);
1828 } else
1829 iter = file->private_data;
1830
1831 if (iter->buffer_idx) {
1832 iter->filtered++;
1833 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 1834 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1835 }
1836
e6ea44e9 1837 mutex_lock(&ftrace_lock);
ee02a2e5 1838 if (ftrace_start_up && ftrace_enabled)
5072c59f 1839 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 1840 mutex_unlock(&ftrace_lock);
5072c59f
SR
1841
1842 kfree(iter);
41c52c0d 1843 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1844 return 0;
1845}
1846
41c52c0d
SR
1847static int
1848ftrace_filter_release(struct inode *inode, struct file *file)
1849{
1850 return ftrace_regex_release(inode, file, 1);
1851}
1852
1853static int
1854ftrace_notrace_release(struct inode *inode, struct file *file)
1855{
1856 return ftrace_regex_release(inode, file, 0);
1857}
1858
5e2336a0 1859static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
1860 .open = ftrace_avail_open,
1861 .read = seq_read,
1862 .llseek = seq_lseek,
1863 .release = ftrace_avail_release,
1864};
1865
5e2336a0 1866static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
1867 .open = ftrace_failures_open,
1868 .read = seq_read,
1869 .llseek = seq_lseek,
1870 .release = ftrace_avail_release,
1871};
1872
5e2336a0 1873static const struct file_operations ftrace_filter_fops = {
5072c59f 1874 .open = ftrace_filter_open,
850a80cf 1875 .read = seq_read,
5072c59f 1876 .write = ftrace_filter_write,
41c52c0d 1877 .llseek = ftrace_regex_lseek,
5072c59f
SR
1878 .release = ftrace_filter_release,
1879};
1880
5e2336a0 1881static const struct file_operations ftrace_notrace_fops = {
41c52c0d 1882 .open = ftrace_notrace_open,
850a80cf 1883 .read = seq_read,
41c52c0d
SR
1884 .write = ftrace_notrace_write,
1885 .llseek = ftrace_regex_lseek,
1886 .release = ftrace_notrace_release,
1887};
1888
ea4e2bc4
SR
1889#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1890
1891static DEFINE_MUTEX(graph_lock);
1892
1893int ftrace_graph_count;
1894unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1895
1896static void *
1897g_next(struct seq_file *m, void *v, loff_t *pos)
1898{
1899 unsigned long *array = m->private;
1900 int index = *pos;
1901
1902 (*pos)++;
1903
1904 if (index >= ftrace_graph_count)
1905 return NULL;
1906
1907 return &array[index];
1908}
1909
1910static void *g_start(struct seq_file *m, loff_t *pos)
1911{
1912 void *p = NULL;
1913
1914 mutex_lock(&graph_lock);
1915
f9349a8f
FW
1916 /* Nothing, tell g_show to print all functions are enabled */
1917 if (!ftrace_graph_count && !*pos)
1918 return (void *)1;
1919
ea4e2bc4
SR
1920 p = g_next(m, p, pos);
1921
1922 return p;
1923}
1924
1925static void g_stop(struct seq_file *m, void *p)
1926{
1927 mutex_unlock(&graph_lock);
1928}
1929
1930static int g_show(struct seq_file *m, void *v)
1931{
1932 unsigned long *ptr = v;
1933 char str[KSYM_SYMBOL_LEN];
1934
1935 if (!ptr)
1936 return 0;
1937
f9349a8f
FW
1938 if (ptr == (unsigned long *)1) {
1939 seq_printf(m, "#### all functions enabled ####\n");
1940 return 0;
1941 }
1942
ea4e2bc4
SR
1943 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1944
1945 seq_printf(m, "%s\n", str);
1946
1947 return 0;
1948}
1949
1950static struct seq_operations ftrace_graph_seq_ops = {
1951 .start = g_start,
1952 .next = g_next,
1953 .stop = g_stop,
1954 .show = g_show,
1955};
1956
1957static int
1958ftrace_graph_open(struct inode *inode, struct file *file)
1959{
1960 int ret = 0;
1961
1962 if (unlikely(ftrace_disabled))
1963 return -ENODEV;
1964
1965 mutex_lock(&graph_lock);
1966 if ((file->f_mode & FMODE_WRITE) &&
1967 !(file->f_flags & O_APPEND)) {
1968 ftrace_graph_count = 0;
1969 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1970 }
1971
1972 if (file->f_mode & FMODE_READ) {
1973 ret = seq_open(file, &ftrace_graph_seq_ops);
1974 if (!ret) {
1975 struct seq_file *m = file->private_data;
1976 m->private = ftrace_graph_funcs;
1977 }
1978 } else
1979 file->private_data = ftrace_graph_funcs;
1980 mutex_unlock(&graph_lock);
1981
1982 return ret;
1983}
1984
ea4e2bc4 1985static int
f9349a8f 1986ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 1987{
ea4e2bc4
SR
1988 struct dyn_ftrace *rec;
1989 struct ftrace_page *pg;
f9349a8f 1990 int search_len;
ea4e2bc4 1991 int found = 0;
f9349a8f
FW
1992 int type, not;
1993 char *search;
1994 bool exists;
1995 int i;
ea4e2bc4
SR
1996
1997 if (ftrace_disabled)
1998 return -ENODEV;
1999
f9349a8f
FW
2000 /* decode regex */
2001 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2002 if (not)
2003 return -EINVAL;
2004
2005 search_len = strlen(search);
2006
52baf119 2007 mutex_lock(&ftrace_lock);
265c831c
SR
2008 do_for_each_ftrace_rec(pg, rec) {
2009
f9349a8f
FW
2010 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2011 break;
2012
265c831c
SR
2013 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2014 continue;
2015
f9349a8f
FW
2016 if (ftrace_match_record(rec, search, search_len, type)) {
2017 /* ensure it is not already in the array */
2018 exists = false;
2019 for (i = 0; i < *idx; i++)
2020 if (array[i] == rec->ip) {
2021 exists = true;
265c831c
SR
2022 break;
2023 }
f9349a8f
FW
2024 if (!exists) {
2025 array[(*idx)++] = rec->ip;
2026 found = 1;
2027 }
ea4e2bc4 2028 }
265c831c 2029 } while_for_each_ftrace_rec();
f9349a8f 2030
52baf119 2031 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
2032
2033 return found ? 0 : -EINVAL;
2034}
2035
2036static ssize_t
2037ftrace_graph_write(struct file *file, const char __user *ubuf,
2038 size_t cnt, loff_t *ppos)
2039{
2040 unsigned char buffer[FTRACE_BUFF_MAX+1];
2041 unsigned long *array;
2042 size_t read = 0;
2043 ssize_t ret;
2044 int index = 0;
2045 char ch;
2046
2047 if (!cnt || cnt < 0)
2048 return 0;
2049
2050 mutex_lock(&graph_lock);
2051
2052 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2053 ret = -EBUSY;
2054 goto out;
2055 }
2056
2057 if (file->f_mode & FMODE_READ) {
2058 struct seq_file *m = file->private_data;
2059 array = m->private;
2060 } else
2061 array = file->private_data;
2062
2063 ret = get_user(ch, ubuf++);
2064 if (ret)
2065 goto out;
2066 read++;
2067 cnt--;
2068
2069 /* skip white space */
2070 while (cnt && isspace(ch)) {
2071 ret = get_user(ch, ubuf++);
2072 if (ret)
2073 goto out;
2074 read++;
2075 cnt--;
2076 }
2077
2078 if (isspace(ch)) {
2079 *ppos += read;
2080 ret = read;
2081 goto out;
2082 }
2083
2084 while (cnt && !isspace(ch)) {
2085 if (index < FTRACE_BUFF_MAX)
2086 buffer[index++] = ch;
2087 else {
2088 ret = -EINVAL;
2089 goto out;
2090 }
2091 ret = get_user(ch, ubuf++);
2092 if (ret)
2093 goto out;
2094 read++;
2095 cnt--;
2096 }
2097 buffer[index] = 0;
2098
f9349a8f
FW
2099 /* we allow only one expression at a time */
2100 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
ea4e2bc4
SR
2101 if (ret)
2102 goto out;
2103
ea4e2bc4
SR
2104 file->f_pos += read;
2105
2106 ret = read;
2107 out:
2108 mutex_unlock(&graph_lock);
2109
2110 return ret;
2111}
2112
2113static const struct file_operations ftrace_graph_fops = {
2114 .open = ftrace_graph_open,
850a80cf 2115 .read = seq_read,
ea4e2bc4
SR
2116 .write = ftrace_graph_write,
2117};
2118#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2119
df4fc315 2120static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2121{
5072c59f
SR
2122 struct dentry *entry;
2123
5072c59f
SR
2124 entry = debugfs_create_file("available_filter_functions", 0444,
2125 d_tracer, NULL, &ftrace_avail_fops);
2126 if (!entry)
2127 pr_warning("Could not create debugfs "
2128 "'available_filter_functions' entry\n");
2129
eb9a7bf0
AS
2130 entry = debugfs_create_file("failures", 0444,
2131 d_tracer, NULL, &ftrace_failures_fops);
2132 if (!entry)
2133 pr_warning("Could not create debugfs 'failures' entry\n");
2134
5072c59f
SR
2135 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2136 NULL, &ftrace_filter_fops);
2137 if (!entry)
2138 pr_warning("Could not create debugfs "
2139 "'set_ftrace_filter' entry\n");
41c52c0d
SR
2140
2141 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2142 NULL, &ftrace_notrace_fops);
2143 if (!entry)
2144 pr_warning("Could not create debugfs "
2145 "'set_ftrace_notrace' entry\n");
ad90c0e3 2146
ea4e2bc4
SR
2147#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2148 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2149 NULL,
2150 &ftrace_graph_fops);
2151 if (!entry)
2152 pr_warning("Could not create debugfs "
2153 "'set_graph_function' entry\n");
2154#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2155
5072c59f
SR
2156 return 0;
2157}
2158
31e88909
SR
2159static int ftrace_convert_nops(struct module *mod,
2160 unsigned long *start,
68bf21aa
SR
2161 unsigned long *end)
2162{
2163 unsigned long *p;
2164 unsigned long addr;
2165 unsigned long flags;
2166
e6ea44e9 2167 mutex_lock(&ftrace_lock);
68bf21aa
SR
2168 p = start;
2169 while (p < end) {
2170 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2171 /*
2172 * Some architecture linkers will pad between
2173 * the different mcount_loc sections of different
2174 * object files to satisfy alignments.
2175 * Skip any NULL pointers.
2176 */
2177 if (!addr)
2178 continue;
68bf21aa 2179 ftrace_record_ip(addr);
68bf21aa
SR
2180 }
2181
08f5ac90 2182 /* disable interrupts to prevent kstop machine */
68bf21aa 2183 local_irq_save(flags);
31e88909 2184 ftrace_update_code(mod);
68bf21aa 2185 local_irq_restore(flags);
e6ea44e9 2186 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2187
2188 return 0;
2189}
2190
31e88909
SR
2191void ftrace_init_module(struct module *mod,
2192 unsigned long *start, unsigned long *end)
90d595fe 2193{
00fd61ae 2194 if (ftrace_disabled || start == end)
fed1939c 2195 return;
31e88909 2196 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2197}
2198
68bf21aa
SR
2199extern unsigned long __start_mcount_loc[];
2200extern unsigned long __stop_mcount_loc[];
2201
2202void __init ftrace_init(void)
2203{
2204 unsigned long count, addr, flags;
2205 int ret;
2206
2207 /* Keep the ftrace pointer to the stub */
2208 addr = (unsigned long)ftrace_stub;
2209
2210 local_irq_save(flags);
2211 ftrace_dyn_arch_init(&addr);
2212 local_irq_restore(flags);
2213
2214 /* ftrace_dyn_arch_init places the return code in addr */
2215 if (addr)
2216 goto failed;
2217
2218 count = __stop_mcount_loc - __start_mcount_loc;
2219
2220 ret = ftrace_dyn_table_alloc(count);
2221 if (ret)
2222 goto failed;
2223
2224 last_ftrace_enabled = ftrace_enabled = 1;
2225
31e88909
SR
2226 ret = ftrace_convert_nops(NULL,
2227 __start_mcount_loc,
68bf21aa
SR
2228 __stop_mcount_loc);
2229
2230 return;
2231 failed:
2232 ftrace_disabled = 1;
2233}
68bf21aa 2234
3d083395 2235#else
0b6e4d56
FW
2236
2237static int __init ftrace_nodyn_init(void)
2238{
2239 ftrace_enabled = 1;
2240 return 0;
2241}
2242device_initcall(ftrace_nodyn_init);
2243
df4fc315
SR
2244static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2245static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2246/* Keep as macros so we do not need to define the commands */
2247# define ftrace_startup(command) do { } while (0)
2248# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2249# define ftrace_startup_sysctl() do { } while (0)
2250# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2251#endif /* CONFIG_DYNAMIC_FTRACE */
2252
df4fc315
SR
2253static ssize_t
2254ftrace_pid_read(struct file *file, char __user *ubuf,
2255 size_t cnt, loff_t *ppos)
2256{
2257 char buf[64];
2258 int r;
2259
e32d8956
SR
2260 if (ftrace_pid_trace == ftrace_swapper_pid)
2261 r = sprintf(buf, "swapper tasks\n");
2262 else if (ftrace_pid_trace)
978f3a45 2263 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
2264 else
2265 r = sprintf(buf, "no pid\n");
2266
2267 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2268}
2269
e32d8956 2270static void clear_ftrace_swapper(void)
978f3a45
SR
2271{
2272 struct task_struct *p;
e32d8956 2273 int cpu;
978f3a45 2274
e32d8956
SR
2275 get_online_cpus();
2276 for_each_online_cpu(cpu) {
2277 p = idle_task(cpu);
978f3a45 2278 clear_tsk_trace_trace(p);
e32d8956
SR
2279 }
2280 put_online_cpus();
2281}
978f3a45 2282
e32d8956
SR
2283static void set_ftrace_swapper(void)
2284{
2285 struct task_struct *p;
2286 int cpu;
2287
2288 get_online_cpus();
2289 for_each_online_cpu(cpu) {
2290 p = idle_task(cpu);
2291 set_tsk_trace_trace(p);
2292 }
2293 put_online_cpus();
978f3a45
SR
2294}
2295
e32d8956
SR
2296static void clear_ftrace_pid(struct pid *pid)
2297{
2298 struct task_struct *p;
2299
229c4ef8 2300 rcu_read_lock();
e32d8956
SR
2301 do_each_pid_task(pid, PIDTYPE_PID, p) {
2302 clear_tsk_trace_trace(p);
2303 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2304 rcu_read_unlock();
2305
e32d8956
SR
2306 put_pid(pid);
2307}
2308
2309static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2310{
2311 struct task_struct *p;
2312
229c4ef8 2313 rcu_read_lock();
978f3a45
SR
2314 do_each_pid_task(pid, PIDTYPE_PID, p) {
2315 set_tsk_trace_trace(p);
2316 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2317 rcu_read_unlock();
978f3a45
SR
2318}
2319
e32d8956
SR
2320static void clear_ftrace_pid_task(struct pid **pid)
2321{
2322 if (*pid == ftrace_swapper_pid)
2323 clear_ftrace_swapper();
2324 else
2325 clear_ftrace_pid(*pid);
2326
2327 *pid = NULL;
2328}
2329
2330static void set_ftrace_pid_task(struct pid *pid)
2331{
2332 if (pid == ftrace_swapper_pid)
2333 set_ftrace_swapper();
2334 else
2335 set_ftrace_pid(pid);
2336}
2337
df4fc315
SR
2338static ssize_t
2339ftrace_pid_write(struct file *filp, const char __user *ubuf,
2340 size_t cnt, loff_t *ppos)
2341{
978f3a45 2342 struct pid *pid;
df4fc315
SR
2343 char buf[64];
2344 long val;
2345 int ret;
2346
2347 if (cnt >= sizeof(buf))
2348 return -EINVAL;
2349
2350 if (copy_from_user(&buf, ubuf, cnt))
2351 return -EFAULT;
2352
2353 buf[cnt] = 0;
2354
2355 ret = strict_strtol(buf, 10, &val);
2356 if (ret < 0)
2357 return ret;
2358
e6ea44e9 2359 mutex_lock(&ftrace_lock);
978f3a45 2360 if (val < 0) {
df4fc315 2361 /* disable pid tracing */
978f3a45 2362 if (!ftrace_pid_trace)
df4fc315 2363 goto out;
978f3a45
SR
2364
2365 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2366
2367 } else {
e32d8956
SR
2368 /* swapper task is special */
2369 if (!val) {
2370 pid = ftrace_swapper_pid;
2371 if (pid == ftrace_pid_trace)
2372 goto out;
2373 } else {
2374 pid = find_get_pid(val);
df4fc315 2375
e32d8956
SR
2376 if (pid == ftrace_pid_trace) {
2377 put_pid(pid);
2378 goto out;
2379 }
0ef8cde5 2380 }
0ef8cde5 2381
978f3a45
SR
2382 if (ftrace_pid_trace)
2383 clear_ftrace_pid_task(&ftrace_pid_trace);
2384
2385 if (!pid)
2386 goto out;
2387
2388 ftrace_pid_trace = pid;
2389
2390 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2391 }
2392
2393 /* update the function call */
2394 ftrace_update_pid_func();
2395 ftrace_startup_enable(0);
2396
2397 out:
e6ea44e9 2398 mutex_unlock(&ftrace_lock);
df4fc315
SR
2399
2400 return cnt;
2401}
2402
5e2336a0 2403static const struct file_operations ftrace_pid_fops = {
df4fc315
SR
2404 .read = ftrace_pid_read,
2405 .write = ftrace_pid_write,
2406};
2407
2408static __init int ftrace_init_debugfs(void)
2409{
2410 struct dentry *d_tracer;
2411 struct dentry *entry;
2412
2413 d_tracer = tracing_init_dentry();
2414 if (!d_tracer)
2415 return 0;
2416
2417 ftrace_init_dyn_debugfs(d_tracer);
2418
2419 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2420 NULL, &ftrace_pid_fops);
2421 if (!entry)
2422 pr_warning("Could not create debugfs "
2423 "'set_ftrace_pid' entry\n");
2424 return 0;
2425}
df4fc315
SR
2426fs_initcall(ftrace_init_debugfs);
2427
a2bb6a3d 2428/**
81adbdc0 2429 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2430 *
2431 * This function should be used by panic code. It stops ftrace
2432 * but in a not so nice way. If you need to simply kill ftrace
2433 * from a non-atomic section, use ftrace_kill.
2434 */
81adbdc0 2435void ftrace_kill(void)
a2bb6a3d
SR
2436{
2437 ftrace_disabled = 1;
2438 ftrace_enabled = 0;
a2bb6a3d
SR
2439 clear_ftrace_function();
2440}
2441
16444a8a 2442/**
3d083395
SR
2443 * register_ftrace_function - register a function for profiling
2444 * @ops - ops structure that holds the function for profiling.
16444a8a 2445 *
3d083395
SR
2446 * Register a function to be called by all functions in the
2447 * kernel.
2448 *
2449 * Note: @ops->func and all the functions it calls must be labeled
2450 * with "notrace", otherwise it will go into a
2451 * recursive loop.
16444a8a 2452 */
3d083395 2453int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2454{
b0fc494f
SR
2455 int ret;
2456
4eebcc81
SR
2457 if (unlikely(ftrace_disabled))
2458 return -1;
2459
e6ea44e9 2460 mutex_lock(&ftrace_lock);
e7d3737e 2461
b0fc494f 2462 ret = __register_ftrace_function(ops);
5a45cfe1 2463 ftrace_startup(0);
b0fc494f 2464
e6ea44e9 2465 mutex_unlock(&ftrace_lock);
b0fc494f 2466 return ret;
3d083395
SR
2467}
2468
2469/**
32632920 2470 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2471 * @ops - ops structure that holds the function to unregister
2472 *
2473 * Unregister a function that was added to be called by ftrace profiling.
2474 */
2475int unregister_ftrace_function(struct ftrace_ops *ops)
2476{
2477 int ret;
2478
e6ea44e9 2479 mutex_lock(&ftrace_lock);
3d083395 2480 ret = __unregister_ftrace_function(ops);
5a45cfe1 2481 ftrace_shutdown(0);
e6ea44e9 2482 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2483
2484 return ret;
2485}
2486
e309b41d 2487int
b0fc494f 2488ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2489 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2490 loff_t *ppos)
2491{
2492 int ret;
2493
4eebcc81
SR
2494 if (unlikely(ftrace_disabled))
2495 return -ENODEV;
2496
e6ea44e9 2497 mutex_lock(&ftrace_lock);
b0fc494f 2498
5072c59f 2499 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2500
2501 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2502 goto out;
2503
2504 last_ftrace_enabled = ftrace_enabled;
2505
2506 if (ftrace_enabled) {
2507
2508 ftrace_startup_sysctl();
2509
2510 /* we are starting ftrace again */
2511 if (ftrace_list != &ftrace_list_end) {
2512 if (ftrace_list->next == &ftrace_list_end)
2513 ftrace_trace_function = ftrace_list->func;
2514 else
2515 ftrace_trace_function = ftrace_list_func;
2516 }
2517
2518 } else {
2519 /* stopping ftrace calls (just send to ftrace_stub) */
2520 ftrace_trace_function = ftrace_stub;
2521
2522 ftrace_shutdown_sysctl();
2523 }
2524
2525 out:
e6ea44e9 2526 mutex_unlock(&ftrace_lock);
3d083395 2527 return ret;
16444a8a 2528}
f17845e5 2529
fb52607a 2530#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2531
287b6e68 2532static atomic_t ftrace_graph_active;
4a2b8dda 2533static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2534
e49dc19c
SR
2535int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2536{
2537 return 0;
2538}
2539
287b6e68
FW
2540/* The callbacks that hook a function */
2541trace_func_graph_ret_t ftrace_graph_return =
2542 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2543trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2544
2545/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2546static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2547{
2548 int i;
2549 int ret = 0;
2550 unsigned long flags;
2551 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2552 struct task_struct *g, *t;
2553
2554 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2555 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2556 * sizeof(struct ftrace_ret_stack),
2557 GFP_KERNEL);
2558 if (!ret_stack_list[i]) {
2559 start = 0;
2560 end = i;
2561 ret = -ENOMEM;
2562 goto free;
2563 }
2564 }
2565
2566 read_lock_irqsave(&tasklist_lock, flags);
2567 do_each_thread(g, t) {
2568 if (start == end) {
2569 ret = -EAGAIN;
2570 goto unlock;
2571 }
2572
2573 if (t->ret_stack == NULL) {
f201ae23 2574 t->curr_ret_stack = -1;
48d68b20
FW
2575 /* Make sure IRQs see the -1 first: */
2576 barrier();
2577 t->ret_stack = ret_stack_list[start++];
380c4b14 2578 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2579 atomic_set(&t->trace_overrun, 0);
2580 }
2581 } while_each_thread(g, t);
2582
2583unlock:
2584 read_unlock_irqrestore(&tasklist_lock, flags);
2585free:
2586 for (i = start; i < end; i++)
2587 kfree(ret_stack_list[i]);
2588 return ret;
2589}
2590
2591/* Allocate a return stack for each task */
fb52607a 2592static int start_graph_tracing(void)
f201ae23
FW
2593{
2594 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 2595 int ret, cpu;
f201ae23
FW
2596
2597 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2598 sizeof(struct ftrace_ret_stack *),
2599 GFP_KERNEL);
2600
2601 if (!ret_stack_list)
2602 return -ENOMEM;
2603
5b058bcd
FW
2604 /* The cpu_boot init_task->ret_stack will never be freed */
2605 for_each_online_cpu(cpu)
2606 ftrace_graph_init_task(idle_task(cpu));
2607
f201ae23
FW
2608 do {
2609 ret = alloc_retstack_tasklist(ret_stack_list);
2610 } while (ret == -EAGAIN);
2611
2612 kfree(ret_stack_list);
2613 return ret;
2614}
2615
4a2b8dda
FW
2616/*
2617 * Hibernation protection.
2618 * The state of the current task is too much unstable during
2619 * suspend/restore to disk. We want to protect against that.
2620 */
2621static int
2622ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2623 void *unused)
2624{
2625 switch (state) {
2626 case PM_HIBERNATION_PREPARE:
2627 pause_graph_tracing();
2628 break;
2629
2630 case PM_POST_HIBERNATION:
2631 unpause_graph_tracing();
2632 break;
2633 }
2634 return NOTIFY_DONE;
2635}
2636
287b6e68
FW
2637int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2638 trace_func_graph_ent_t entryfunc)
15e6cb36 2639{
e7d3737e
FW
2640 int ret = 0;
2641
e6ea44e9 2642 mutex_lock(&ftrace_lock);
e7d3737e 2643
4a2b8dda
FW
2644 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2645 register_pm_notifier(&ftrace_suspend_notifier);
2646
287b6e68 2647 atomic_inc(&ftrace_graph_active);
fb52607a 2648 ret = start_graph_tracing();
f201ae23 2649 if (ret) {
287b6e68 2650 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2651 goto out;
2652 }
e53a6319 2653
287b6e68
FW
2654 ftrace_graph_return = retfunc;
2655 ftrace_graph_entry = entryfunc;
e53a6319 2656
5a45cfe1 2657 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2658
2659out:
e6ea44e9 2660 mutex_unlock(&ftrace_lock);
e7d3737e 2661 return ret;
15e6cb36
FW
2662}
2663
fb52607a 2664void unregister_ftrace_graph(void)
15e6cb36 2665{
e6ea44e9 2666 mutex_lock(&ftrace_lock);
e7d3737e 2667
287b6e68
FW
2668 atomic_dec(&ftrace_graph_active);
2669 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2670 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2671 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 2672 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 2673
e6ea44e9 2674 mutex_unlock(&ftrace_lock);
15e6cb36 2675}
f201ae23
FW
2676
2677/* Allocate a return stack for newly created task */
fb52607a 2678void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2679{
287b6e68 2680 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2681 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2682 * sizeof(struct ftrace_ret_stack),
2683 GFP_KERNEL);
2684 if (!t->ret_stack)
2685 return;
2686 t->curr_ret_stack = -1;
380c4b14 2687 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2688 atomic_set(&t->trace_overrun, 0);
2689 } else
2690 t->ret_stack = NULL;
2691}
2692
fb52607a 2693void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2694{
eae849ca
FW
2695 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2696
f201ae23 2697 t->ret_stack = NULL;
eae849ca
FW
2698 /* NULL must become visible to IRQs before we free it: */
2699 barrier();
2700
2701 kfree(ret_stack);
f201ae23 2702}
14a866c5
SR
2703
2704void ftrace_graph_stop(void)
2705{
2706 ftrace_stop();
2707}
15e6cb36
FW
2708#endif
2709