2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/tracefs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
36 #include <trace/events/sched.h>
38 #include <asm/setup.h>
40 #include "trace_output.h"
41 #include "trace_stat.h"
43 #define FTRACE_WARN_ON(cond) \
51 #define FTRACE_WARN_ON_ONCE(cond) \
54 if (WARN_ON_ONCE(___r)) \
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
79 static struct ftrace_ops ftrace_list_end __read_mostly
= {
81 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_STUB
,
82 INIT_OPS_HASH(ftrace_list_end
)
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly
;
87 static int last_ftrace_enabled
;
89 /* Current function tracing op */
90 struct ftrace_ops
*function_trace_op __read_mostly
= &ftrace_list_end
;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops
*set_function_trace_op
;
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids
);
97 struct list_head list
;
101 static bool ftrace_pids_enabled(void)
103 return !list_empty(&ftrace_pids
);
106 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
109 * ftrace_disabled is set when an anomaly is discovered.
110 * ftrace_disabled is much stronger than ftrace_enabled.
112 static int ftrace_disabled __read_mostly
;
114 static DEFINE_MUTEX(ftrace_lock
);
116 static struct ftrace_ops
*ftrace_control_list __read_mostly
= &ftrace_list_end
;
117 static struct ftrace_ops
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
118 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
119 static struct ftrace_ops global_ops
;
120 static struct ftrace_ops control_ops
;
122 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
123 struct ftrace_ops
*op
, struct pt_regs
*regs
);
125 #if ARCH_SUPPORTS_FTRACE_OPS
126 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
127 struct ftrace_ops
*op
, struct pt_regs
*regs
);
129 /* See comment below, where ftrace_ops_list_func is defined */
130 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
);
131 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
135 * Traverse the ftrace_global_list, invoking all entries. The reason that we
136 * can use rcu_dereference_raw_notrace() is that elements removed from this list
137 * are simply leaked, so there is no need to interact with a grace-period
138 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
139 * concurrent insertions into the ftrace_global_list.
141 * Silly Alpha and silly pointer-speculation compiler optimizations!
143 #define do_for_each_ftrace_op(op, list) \
144 op = rcu_dereference_raw_notrace(list); \
148 * Optimized for just a single item in the list (as that is the normal case).
150 #define while_for_each_ftrace_op(op) \
151 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
152 unlikely((op) != &ftrace_list_end))
154 static inline void ftrace_ops_init(struct ftrace_ops
*ops
)
156 #ifdef CONFIG_DYNAMIC_FTRACE
157 if (!(ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)) {
158 mutex_init(&ops
->local_hash
.regex_lock
);
159 ops
->func_hash
= &ops
->local_hash
;
160 ops
->flags
|= FTRACE_OPS_FL_INITIALIZED
;
166 * ftrace_nr_registered_ops - return number of ops registered
168 * Returns the number of ftrace_ops registered and tracing functions
170 int ftrace_nr_registered_ops(void)
172 struct ftrace_ops
*ops
;
175 mutex_lock(&ftrace_lock
);
177 for (ops
= ftrace_ops_list
;
178 ops
!= &ftrace_list_end
; ops
= ops
->next
)
181 mutex_unlock(&ftrace_lock
);
186 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
,
187 struct ftrace_ops
*op
, struct pt_regs
*regs
)
189 if (!test_tsk_trace_trace(current
))
192 op
->saved_func(ip
, parent_ip
, op
, regs
);
196 * clear_ftrace_function - reset the ftrace function
198 * This NULLs the ftrace function and in essence stops
199 * tracing. There may be lag
201 void clear_ftrace_function(void)
203 ftrace_trace_function
= ftrace_stub
;
206 static void control_ops_disable_all(struct ftrace_ops
*ops
)
210 for_each_possible_cpu(cpu
)
211 *per_cpu_ptr(ops
->disabled
, cpu
) = 1;
214 static int control_ops_alloc(struct ftrace_ops
*ops
)
216 int __percpu
*disabled
;
218 disabled
= alloc_percpu(int);
222 ops
->disabled
= disabled
;
223 control_ops_disable_all(ops
);
227 static void ftrace_sync(struct work_struct
*work
)
230 * This function is just a stub to implement a hard force
231 * of synchronize_sched(). This requires synchronizing
232 * tasks even in userspace and idle.
234 * Yes, function tracing is rude.
238 static void ftrace_sync_ipi(void *data
)
240 /* Probably not needed, but do it anyway */
244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
245 static void update_function_graph_func(void);
247 static inline void update_function_graph_func(void) { }
251 static ftrace_func_t
ftrace_ops_get_list_func(struct ftrace_ops
*ops
)
254 * If this is a dynamic ops or we force list func,
255 * then it needs to call the list anyway.
257 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
|| FTRACE_FORCE_LIST_FUNC
)
258 return ftrace_ops_list_func
;
260 return ftrace_ops_get_func(ops
);
263 static void update_ftrace_function(void)
268 * Prepare the ftrace_ops that the arch callback will use.
269 * If there's only one ftrace_ops registered, the ftrace_ops_list
270 * will point to the ops we want.
272 set_function_trace_op
= ftrace_ops_list
;
274 /* If there's no ftrace_ops registered, just call the stub function */
275 if (ftrace_ops_list
== &ftrace_list_end
) {
279 * If we are at the end of the list and this ops is
280 * recursion safe and not dynamic and the arch supports passing ops,
281 * then have the mcount trampoline call the function directly.
283 } else if (ftrace_ops_list
->next
== &ftrace_list_end
) {
284 func
= ftrace_ops_get_list_func(ftrace_ops_list
);
287 /* Just use the default ftrace_ops */
288 set_function_trace_op
= &ftrace_list_end
;
289 func
= ftrace_ops_list_func
;
292 update_function_graph_func();
294 /* If there's no change, then do nothing more here */
295 if (ftrace_trace_function
== func
)
299 * If we are using the list function, it doesn't care
300 * about the function_trace_ops.
302 if (func
== ftrace_ops_list_func
) {
303 ftrace_trace_function
= func
;
305 * Don't even bother setting function_trace_ops,
306 * it would be racy to do so anyway.
311 #ifndef CONFIG_DYNAMIC_FTRACE
313 * For static tracing, we need to be a bit more careful.
314 * The function change takes affect immediately. Thus,
315 * we need to coorditate the setting of the function_trace_ops
316 * with the setting of the ftrace_trace_function.
318 * Set the function to the list ops, which will call the
319 * function we want, albeit indirectly, but it handles the
320 * ftrace_ops and doesn't depend on function_trace_op.
322 ftrace_trace_function
= ftrace_ops_list_func
;
324 * Make sure all CPUs see this. Yes this is slow, but static
325 * tracing is slow and nasty to have enabled.
327 schedule_on_each_cpu(ftrace_sync
);
328 /* Now all cpus are using the list ops. */
329 function_trace_op
= set_function_trace_op
;
330 /* Make sure the function_trace_op is visible on all CPUs */
332 /* Nasty way to force a rmb on all cpus */
333 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
334 /* OK, we are all set to update the ftrace_trace_function now! */
335 #endif /* !CONFIG_DYNAMIC_FTRACE */
337 ftrace_trace_function
= func
;
340 int using_ftrace_ops_list_func(void)
342 return ftrace_trace_function
== ftrace_ops_list_func
;
345 static void add_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
349 * We are entering ops into the list but another
350 * CPU might be walking that list. We need to make sure
351 * the ops->next pointer is valid before another CPU sees
352 * the ops pointer included into the list.
354 rcu_assign_pointer(*list
, ops
);
357 static int remove_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
359 struct ftrace_ops
**p
;
362 * If we are removing the last function, then simply point
363 * to the ftrace_stub.
365 if (*list
== ops
&& ops
->next
== &ftrace_list_end
) {
366 *list
= &ftrace_list_end
;
370 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
381 static void add_ftrace_list_ops(struct ftrace_ops
**list
,
382 struct ftrace_ops
*main_ops
,
383 struct ftrace_ops
*ops
)
385 int first
= *list
== &ftrace_list_end
;
386 add_ftrace_ops(list
, ops
);
388 add_ftrace_ops(&ftrace_ops_list
, main_ops
);
391 static int remove_ftrace_list_ops(struct ftrace_ops
**list
,
392 struct ftrace_ops
*main_ops
,
393 struct ftrace_ops
*ops
)
395 int ret
= remove_ftrace_ops(list
, ops
);
396 if (!ret
&& *list
== &ftrace_list_end
)
397 ret
= remove_ftrace_ops(&ftrace_ops_list
, main_ops
);
401 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
403 static int __register_ftrace_function(struct ftrace_ops
*ops
)
405 if (ops
->flags
& FTRACE_OPS_FL_DELETED
)
408 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
411 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
413 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
414 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
415 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
417 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
&&
418 !(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
))
421 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
)
422 ops
->flags
|= FTRACE_OPS_FL_SAVE_REGS
;
425 if (!core_kernel_data((unsigned long)ops
))
426 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
428 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
429 if (control_ops_alloc(ops
))
431 add_ftrace_list_ops(&ftrace_control_list
, &control_ops
, ops
);
432 /* The control_ops needs the trampoline update */
435 add_ftrace_ops(&ftrace_ops_list
, ops
);
437 /* Always save the function, and reset at unregistering */
438 ops
->saved_func
= ops
->func
;
440 if (ops
->flags
& FTRACE_OPS_FL_PID
&& ftrace_pids_enabled())
441 ops
->func
= ftrace_pid_func
;
443 ftrace_update_trampoline(ops
);
446 update_ftrace_function();
451 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
455 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
458 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
459 ret
= remove_ftrace_list_ops(&ftrace_control_list
,
462 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
468 update_ftrace_function();
470 ops
->func
= ops
->saved_func
;
475 static void ftrace_update_pid_func(void)
477 bool enabled
= ftrace_pids_enabled();
478 struct ftrace_ops
*op
;
480 /* Only do something if we are tracing something */
481 if (ftrace_trace_function
== ftrace_stub
)
484 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
485 if (op
->flags
& FTRACE_OPS_FL_PID
) {
486 op
->func
= enabled
? ftrace_pid_func
:
488 ftrace_update_trampoline(op
);
490 } while_for_each_ftrace_op(op
);
492 update_ftrace_function();
495 #ifdef CONFIG_FUNCTION_PROFILER
496 struct ftrace_profile
{
497 struct hlist_node node
;
499 unsigned long counter
;
500 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
501 unsigned long long time
;
502 unsigned long long time_squared
;
506 struct ftrace_profile_page
{
507 struct ftrace_profile_page
*next
;
509 struct ftrace_profile records
[];
512 struct ftrace_profile_stat
{
514 struct hlist_head
*hash
;
515 struct ftrace_profile_page
*pages
;
516 struct ftrace_profile_page
*start
;
517 struct tracer_stat stat
;
520 #define PROFILE_RECORDS_SIZE \
521 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
523 #define PROFILES_PER_PAGE \
524 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
526 static int ftrace_profile_enabled __read_mostly
;
528 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
529 static DEFINE_MUTEX(ftrace_profile_lock
);
531 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
533 #define FTRACE_PROFILE_HASH_BITS 10
534 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
537 function_stat_next(void *v
, int idx
)
539 struct ftrace_profile
*rec
= v
;
540 struct ftrace_profile_page
*pg
;
542 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
548 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
552 rec
= &pg
->records
[0];
560 static void *function_stat_start(struct tracer_stat
*trace
)
562 struct ftrace_profile_stat
*stat
=
563 container_of(trace
, struct ftrace_profile_stat
, stat
);
565 if (!stat
|| !stat
->start
)
568 return function_stat_next(&stat
->start
->records
[0], 0);
571 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
572 /* function graph compares on total time */
573 static int function_stat_cmp(void *p1
, void *p2
)
575 struct ftrace_profile
*a
= p1
;
576 struct ftrace_profile
*b
= p2
;
578 if (a
->time
< b
->time
)
580 if (a
->time
> b
->time
)
586 /* not function graph compares against hits */
587 static int function_stat_cmp(void *p1
, void *p2
)
589 struct ftrace_profile
*a
= p1
;
590 struct ftrace_profile
*b
= p2
;
592 if (a
->counter
< b
->counter
)
594 if (a
->counter
> b
->counter
)
601 static int function_stat_headers(struct seq_file
*m
)
603 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
604 seq_puts(m
, " Function "
607 "--- ---- --- ---\n");
609 seq_puts(m
, " Function Hit\n"
615 static int function_stat_show(struct seq_file
*m
, void *v
)
617 struct ftrace_profile
*rec
= v
;
618 char str
[KSYM_SYMBOL_LEN
];
620 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
621 static struct trace_seq s
;
622 unsigned long long avg
;
623 unsigned long long stddev
;
625 mutex_lock(&ftrace_profile_lock
);
627 /* we raced with function_profile_reset() */
628 if (unlikely(rec
->counter
== 0)) {
633 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
635 do_div(avg
, rec
->counter
);
636 if (tracing_thresh
&& (avg
< tracing_thresh
))
640 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
641 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
643 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
646 /* Sample standard deviation (s^2) */
647 if (rec
->counter
<= 1)
651 * Apply Welford's method:
652 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
654 stddev
= rec
->counter
* rec
->time_squared
-
655 rec
->time
* rec
->time
;
658 * Divide only 1000 for ns^2 -> us^2 conversion.
659 * trace_print_graph_duration will divide 1000 again.
661 do_div(stddev
, rec
->counter
* (rec
->counter
- 1) * 1000);
665 trace_print_graph_duration(rec
->time
, &s
);
666 trace_seq_puts(&s
, " ");
667 trace_print_graph_duration(avg
, &s
);
668 trace_seq_puts(&s
, " ");
669 trace_print_graph_duration(stddev
, &s
);
670 trace_print_seq(m
, &s
);
674 mutex_unlock(&ftrace_profile_lock
);
679 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
681 struct ftrace_profile_page
*pg
;
683 pg
= stat
->pages
= stat
->start
;
686 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
691 memset(stat
->hash
, 0,
692 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
695 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
697 struct ftrace_profile_page
*pg
;
702 /* If we already allocated, do nothing */
706 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
710 #ifdef CONFIG_DYNAMIC_FTRACE
711 functions
= ftrace_update_tot_cnt
;
714 * We do not know the number of functions that exist because
715 * dynamic tracing is what counts them. With past experience
716 * we have around 20K functions. That should be more than enough.
717 * It is highly unlikely we will execute every function in
723 pg
= stat
->start
= stat
->pages
;
725 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
727 for (i
= 1; i
< pages
; i
++) {
728 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
739 unsigned long tmp
= (unsigned long)pg
;
751 static int ftrace_profile_init_cpu(int cpu
)
753 struct ftrace_profile_stat
*stat
;
756 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
759 /* If the profile is already created, simply reset it */
760 ftrace_profile_reset(stat
);
765 * We are profiling all functions, but usually only a few thousand
766 * functions are hit. We'll make a hash of 1024 items.
768 size
= FTRACE_PROFILE_HASH_SIZE
;
770 stat
->hash
= kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
775 /* Preallocate the function profiling pages */
776 if (ftrace_profile_pages_init(stat
) < 0) {
785 static int ftrace_profile_init(void)
790 for_each_possible_cpu(cpu
) {
791 ret
= ftrace_profile_init_cpu(cpu
);
799 /* interrupts must be disabled */
800 static struct ftrace_profile
*
801 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
803 struct ftrace_profile
*rec
;
804 struct hlist_head
*hhd
;
807 key
= hash_long(ip
, FTRACE_PROFILE_HASH_BITS
);
808 hhd
= &stat
->hash
[key
];
810 if (hlist_empty(hhd
))
813 hlist_for_each_entry_rcu_notrace(rec
, hhd
, node
) {
821 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
822 struct ftrace_profile
*rec
)
826 key
= hash_long(rec
->ip
, FTRACE_PROFILE_HASH_BITS
);
827 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
831 * The memory is already allocated, this simply finds a new record to use.
833 static struct ftrace_profile
*
834 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
836 struct ftrace_profile
*rec
= NULL
;
838 /* prevent recursion (from NMIs) */
839 if (atomic_inc_return(&stat
->disabled
) != 1)
843 * Try to find the function again since an NMI
844 * could have added it
846 rec
= ftrace_find_profiled_func(stat
, ip
);
850 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
851 if (!stat
->pages
->next
)
853 stat
->pages
= stat
->pages
->next
;
856 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
858 ftrace_add_profile(stat
, rec
);
861 atomic_dec(&stat
->disabled
);
867 function_profile_call(unsigned long ip
, unsigned long parent_ip
,
868 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
870 struct ftrace_profile_stat
*stat
;
871 struct ftrace_profile
*rec
;
874 if (!ftrace_profile_enabled
)
877 local_irq_save(flags
);
879 stat
= this_cpu_ptr(&ftrace_profile_stats
);
880 if (!stat
->hash
|| !ftrace_profile_enabled
)
883 rec
= ftrace_find_profiled_func(stat
, ip
);
885 rec
= ftrace_profile_alloc(stat
, ip
);
892 local_irq_restore(flags
);
895 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
896 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
898 function_profile_call(trace
->func
, 0, NULL
, NULL
);
902 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
904 struct ftrace_profile_stat
*stat
;
905 unsigned long long calltime
;
906 struct ftrace_profile
*rec
;
909 local_irq_save(flags
);
910 stat
= this_cpu_ptr(&ftrace_profile_stats
);
911 if (!stat
->hash
|| !ftrace_profile_enabled
)
914 /* If the calltime was zero'd ignore it */
915 if (!trace
->calltime
)
918 calltime
= trace
->rettime
- trace
->calltime
;
920 if (!(trace_flags
& TRACE_ITER_GRAPH_TIME
)) {
923 index
= trace
->depth
;
925 /* Append this call time to the parent time to subtract */
927 current
->ret_stack
[index
- 1].subtime
+= calltime
;
929 if (current
->ret_stack
[index
].subtime
< calltime
)
930 calltime
-= current
->ret_stack
[index
].subtime
;
935 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
937 rec
->time
+= calltime
;
938 rec
->time_squared
+= calltime
* calltime
;
942 local_irq_restore(flags
);
945 static int register_ftrace_profiler(void)
947 return register_ftrace_graph(&profile_graph_return
,
948 &profile_graph_entry
);
951 static void unregister_ftrace_profiler(void)
953 unregister_ftrace_graph();
956 static struct ftrace_ops ftrace_profile_ops __read_mostly
= {
957 .func
= function_profile_call
,
958 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
959 INIT_OPS_HASH(ftrace_profile_ops
)
962 static int register_ftrace_profiler(void)
964 return register_ftrace_function(&ftrace_profile_ops
);
967 static void unregister_ftrace_profiler(void)
969 unregister_ftrace_function(&ftrace_profile_ops
);
971 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
974 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
975 size_t cnt
, loff_t
*ppos
)
980 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
986 mutex_lock(&ftrace_profile_lock
);
987 if (ftrace_profile_enabled
^ val
) {
989 ret
= ftrace_profile_init();
995 ret
= register_ftrace_profiler();
1000 ftrace_profile_enabled
= 1;
1002 ftrace_profile_enabled
= 0;
1004 * unregister_ftrace_profiler calls stop_machine
1005 * so this acts like an synchronize_sched.
1007 unregister_ftrace_profiler();
1011 mutex_unlock(&ftrace_profile_lock
);
1019 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
1020 size_t cnt
, loff_t
*ppos
)
1022 char buf
[64]; /* big enough to hold a number */
1025 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
1026 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1029 static const struct file_operations ftrace_profile_fops
= {
1030 .open
= tracing_open_generic
,
1031 .read
= ftrace_profile_read
,
1032 .write
= ftrace_profile_write
,
1033 .llseek
= default_llseek
,
1036 /* used to initialize the real stat files */
1037 static struct tracer_stat function_stats __initdata
= {
1038 .name
= "functions",
1039 .stat_start
= function_stat_start
,
1040 .stat_next
= function_stat_next
,
1041 .stat_cmp
= function_stat_cmp
,
1042 .stat_headers
= function_stat_headers
,
1043 .stat_show
= function_stat_show
1046 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1048 struct ftrace_profile_stat
*stat
;
1049 struct dentry
*entry
;
1054 for_each_possible_cpu(cpu
) {
1055 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
1057 /* allocate enough for function name + cpu number */
1058 name
= kmalloc(32, GFP_KERNEL
);
1061 * The files created are permanent, if something happens
1062 * we still do not free memory.
1065 "Could not allocate stat file for cpu %d\n",
1069 stat
->stat
= function_stats
;
1070 snprintf(name
, 32, "function%d", cpu
);
1071 stat
->stat
.name
= name
;
1072 ret
= register_stat_tracer(&stat
->stat
);
1075 "Could not register function stat for cpu %d\n",
1082 entry
= tracefs_create_file("function_profile_enabled", 0644,
1083 d_tracer
, NULL
, &ftrace_profile_fops
);
1085 pr_warning("Could not create tracefs "
1086 "'function_profile_enabled' entry\n");
1089 #else /* CONFIG_FUNCTION_PROFILER */
1090 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1093 #endif /* CONFIG_FUNCTION_PROFILER */
1095 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
1097 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1098 static int ftrace_graph_active
;
1100 # define ftrace_graph_active 0
1103 #ifdef CONFIG_DYNAMIC_FTRACE
1105 static struct ftrace_ops
*removed_ops
;
1108 * Set when doing a global update, like enabling all recs or disabling them.
1109 * It is not set when just updating a single ftrace_ops.
1111 static bool update_all_ops
;
1113 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1114 # error Dynamic ftrace depends on MCOUNT_RECORD
1117 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
1119 struct ftrace_func_probe
{
1120 struct hlist_node node
;
1121 struct ftrace_probe_ops
*ops
;
1122 unsigned long flags
;
1125 struct list_head free_list
;
1128 struct ftrace_func_entry
{
1129 struct hlist_node hlist
;
1133 struct ftrace_hash
{
1134 unsigned long size_bits
;
1135 struct hlist_head
*buckets
;
1136 unsigned long count
;
1137 struct rcu_head rcu
;
1141 * We make these constant because no one should touch them,
1142 * but they are used as the default "empty hash", to avoid allocating
1143 * it all the time. These are in a read only section such that if
1144 * anyone does try to modify it, it will cause an exception.
1146 static const struct hlist_head empty_buckets
[1];
1147 static const struct ftrace_hash empty_hash
= {
1148 .buckets
= (struct hlist_head
*)empty_buckets
,
1150 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1152 static struct ftrace_ops global_ops
= {
1153 .func
= ftrace_stub
,
1154 .local_hash
.notrace_hash
= EMPTY_HASH
,
1155 .local_hash
.filter_hash
= EMPTY_HASH
,
1156 INIT_OPS_HASH(global_ops
)
1157 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
1158 FTRACE_OPS_FL_INITIALIZED
|
1163 * This is used by __kernel_text_address() to return true if the
1164 * address is on a dynamically allocated trampoline that would
1165 * not return true for either core_kernel_text() or
1166 * is_module_text_address().
1168 bool is_ftrace_trampoline(unsigned long addr
)
1170 struct ftrace_ops
*op
;
1174 * Some of the ops may be dynamically allocated,
1175 * they are freed after a synchronize_sched().
1177 preempt_disable_notrace();
1179 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1181 * This is to check for dynamically allocated trampolines.
1182 * Trampolines that are in kernel text will have
1183 * core_kernel_text() return true.
1185 if (op
->trampoline
&& op
->trampoline_size
)
1186 if (addr
>= op
->trampoline
&&
1187 addr
< op
->trampoline
+ op
->trampoline_size
) {
1191 } while_for_each_ftrace_op(op
);
1194 preempt_enable_notrace();
1199 struct ftrace_page
{
1200 struct ftrace_page
*next
;
1201 struct dyn_ftrace
*records
;
1206 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1207 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1209 /* estimate from running different kernels */
1210 #define NR_TO_INIT 10000
1212 static struct ftrace_page
*ftrace_pages_start
;
1213 static struct ftrace_page
*ftrace_pages
;
1215 static bool __always_inline
ftrace_hash_empty(struct ftrace_hash
*hash
)
1217 return !hash
|| !hash
->count
;
1220 static struct ftrace_func_entry
*
1221 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1224 struct ftrace_func_entry
*entry
;
1225 struct hlist_head
*hhd
;
1227 if (ftrace_hash_empty(hash
))
1230 if (hash
->size_bits
> 0)
1231 key
= hash_long(ip
, hash
->size_bits
);
1235 hhd
= &hash
->buckets
[key
];
1237 hlist_for_each_entry_rcu_notrace(entry
, hhd
, hlist
) {
1238 if (entry
->ip
== ip
)
1244 static void __add_hash_entry(struct ftrace_hash
*hash
,
1245 struct ftrace_func_entry
*entry
)
1247 struct hlist_head
*hhd
;
1250 if (hash
->size_bits
)
1251 key
= hash_long(entry
->ip
, hash
->size_bits
);
1255 hhd
= &hash
->buckets
[key
];
1256 hlist_add_head(&entry
->hlist
, hhd
);
1260 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1262 struct ftrace_func_entry
*entry
;
1264 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1269 __add_hash_entry(hash
, entry
);
1275 free_hash_entry(struct ftrace_hash
*hash
,
1276 struct ftrace_func_entry
*entry
)
1278 hlist_del(&entry
->hlist
);
1284 remove_hash_entry(struct ftrace_hash
*hash
,
1285 struct ftrace_func_entry
*entry
)
1287 hlist_del(&entry
->hlist
);
1291 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1293 struct hlist_head
*hhd
;
1294 struct hlist_node
*tn
;
1295 struct ftrace_func_entry
*entry
;
1296 int size
= 1 << hash
->size_bits
;
1302 for (i
= 0; i
< size
; i
++) {
1303 hhd
= &hash
->buckets
[i
];
1304 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
)
1305 free_hash_entry(hash
, entry
);
1307 FTRACE_WARN_ON(hash
->count
);
1310 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1312 if (!hash
|| hash
== EMPTY_HASH
)
1314 ftrace_hash_clear(hash
);
1315 kfree(hash
->buckets
);
1319 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1321 struct ftrace_hash
*hash
;
1323 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1324 free_ftrace_hash(hash
);
1327 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1329 if (!hash
|| hash
== EMPTY_HASH
)
1331 call_rcu_sched(&hash
->rcu
, __free_ftrace_hash_rcu
);
1334 void ftrace_free_filter(struct ftrace_ops
*ops
)
1336 ftrace_ops_init(ops
);
1337 free_ftrace_hash(ops
->func_hash
->filter_hash
);
1338 free_ftrace_hash(ops
->func_hash
->notrace_hash
);
1341 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1343 struct ftrace_hash
*hash
;
1346 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1350 size
= 1 << size_bits
;
1351 hash
->buckets
= kcalloc(size
, sizeof(*hash
->buckets
), GFP_KERNEL
);
1353 if (!hash
->buckets
) {
1358 hash
->size_bits
= size_bits
;
1363 static struct ftrace_hash
*
1364 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1366 struct ftrace_func_entry
*entry
;
1367 struct ftrace_hash
*new_hash
;
1372 new_hash
= alloc_ftrace_hash(size_bits
);
1377 if (ftrace_hash_empty(hash
))
1380 size
= 1 << hash
->size_bits
;
1381 for (i
= 0; i
< size
; i
++) {
1382 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
1383 ret
= add_hash_entry(new_hash
, entry
->ip
);
1389 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1394 free_ftrace_hash(new_hash
);
1399 ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1401 ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1403 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1404 struct ftrace_hash
*new_hash
);
1407 ftrace_hash_move(struct ftrace_ops
*ops
, int enable
,
1408 struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1410 struct ftrace_func_entry
*entry
;
1411 struct hlist_node
*tn
;
1412 struct hlist_head
*hhd
;
1413 struct ftrace_hash
*new_hash
;
1414 int size
= src
->count
;
1419 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1420 if (ops
->flags
& FTRACE_OPS_FL_IPMODIFY
&& !enable
)
1424 * If the new source is empty, just free dst and assign it
1428 new_hash
= EMPTY_HASH
;
1433 * Make the hash size about 1/2 the # found
1435 for (size
/= 2; size
; size
>>= 1)
1438 /* Don't allocate too much */
1439 if (bits
> FTRACE_HASH_MAX_BITS
)
1440 bits
= FTRACE_HASH_MAX_BITS
;
1442 new_hash
= alloc_ftrace_hash(bits
);
1446 size
= 1 << src
->size_bits
;
1447 for (i
= 0; i
< size
; i
++) {
1448 hhd
= &src
->buckets
[i
];
1449 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
) {
1450 remove_hash_entry(src
, entry
);
1451 __add_hash_entry(new_hash
, entry
);
1456 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1458 /* IPMODIFY should be updated only when filter_hash updating */
1459 ret
= ftrace_hash_ipmodify_update(ops
, new_hash
);
1461 free_ftrace_hash(new_hash
);
1467 * Remove the current set, update the hash and add
1470 ftrace_hash_rec_disable_modify(ops
, enable
);
1472 rcu_assign_pointer(*dst
, new_hash
);
1474 ftrace_hash_rec_enable_modify(ops
, enable
);
1479 static bool hash_contains_ip(unsigned long ip
,
1480 struct ftrace_ops_hash
*hash
)
1483 * The function record is a match if it exists in the filter
1484 * hash and not in the notrace hash. Note, an emty hash is
1485 * considered a match for the filter hash, but an empty
1486 * notrace hash is considered not in the notrace hash.
1488 return (ftrace_hash_empty(hash
->filter_hash
) ||
1489 ftrace_lookup_ip(hash
->filter_hash
, ip
)) &&
1490 (ftrace_hash_empty(hash
->notrace_hash
) ||
1491 !ftrace_lookup_ip(hash
->notrace_hash
, ip
));
1495 * Test the hashes for this ops to see if we want to call
1496 * the ops->func or not.
1498 * It's a match if the ip is in the ops->filter_hash or
1499 * the filter_hash does not exist or is empty,
1501 * the ip is not in the ops->notrace_hash.
1503 * This needs to be called with preemption disabled as
1504 * the hashes are freed with call_rcu_sched().
1507 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
1509 struct ftrace_ops_hash hash
;
1512 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1514 * There's a small race when adding ops that the ftrace handler
1515 * that wants regs, may be called without them. We can not
1516 * allow that handler to be called if regs is NULL.
1518 if (regs
== NULL
&& (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
))
1522 hash
.filter_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->filter_hash
);
1523 hash
.notrace_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->notrace_hash
);
1525 if (hash_contains_ip(ip
, &hash
))
1534 * This is a double for. Do not use 'break' to break out of the loop,
1535 * you must use a goto.
1537 #define do_for_each_ftrace_rec(pg, rec) \
1538 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1540 for (_____i = 0; _____i < pg->index; _____i++) { \
1541 rec = &pg->records[_____i];
1543 #define while_for_each_ftrace_rec() \
1548 static int ftrace_cmp_recs(const void *a
, const void *b
)
1550 const struct dyn_ftrace
*key
= a
;
1551 const struct dyn_ftrace
*rec
= b
;
1553 if (key
->flags
< rec
->ip
)
1555 if (key
->ip
>= rec
->ip
+ MCOUNT_INSN_SIZE
)
1560 static unsigned long ftrace_location_range(unsigned long start
, unsigned long end
)
1562 struct ftrace_page
*pg
;
1563 struct dyn_ftrace
*rec
;
1564 struct dyn_ftrace key
;
1567 key
.flags
= end
; /* overload flags, as it is unsigned long */
1569 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1570 if (end
< pg
->records
[0].ip
||
1571 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
1573 rec
= bsearch(&key
, pg
->records
, pg
->index
,
1574 sizeof(struct dyn_ftrace
),
1584 * ftrace_location - return true if the ip giving is a traced location
1585 * @ip: the instruction pointer to check
1587 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1588 * That is, the instruction that is either a NOP or call to
1589 * the function tracer. It checks the ftrace internal tables to
1590 * determine if the address belongs or not.
1592 unsigned long ftrace_location(unsigned long ip
)
1594 return ftrace_location_range(ip
, ip
);
1598 * ftrace_text_reserved - return true if range contains an ftrace location
1599 * @start: start of range to search
1600 * @end: end of range to search (inclusive). @end points to the last byte to check.
1602 * Returns 1 if @start and @end contains a ftrace location.
1603 * That is, the instruction that is either a NOP or call to
1604 * the function tracer. It checks the ftrace internal tables to
1605 * determine if the address belongs or not.
1607 int ftrace_text_reserved(const void *start
, const void *end
)
1611 ret
= ftrace_location_range((unsigned long)start
,
1612 (unsigned long)end
);
1617 /* Test if ops registered to this rec needs regs */
1618 static bool test_rec_ops_needs_regs(struct dyn_ftrace
*rec
)
1620 struct ftrace_ops
*ops
;
1621 bool keep_regs
= false;
1623 for (ops
= ftrace_ops_list
;
1624 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
1625 /* pass rec in as regs to have non-NULL val */
1626 if (ftrace_ops_test(ops
, rec
->ip
, rec
)) {
1627 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1637 static void __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1641 struct ftrace_hash
*hash
;
1642 struct ftrace_hash
*other_hash
;
1643 struct ftrace_page
*pg
;
1644 struct dyn_ftrace
*rec
;
1648 /* Only update if the ops has been registered */
1649 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1653 * In the filter_hash case:
1654 * If the count is zero, we update all records.
1655 * Otherwise we just update the items in the hash.
1657 * In the notrace_hash case:
1658 * We enable the update in the hash.
1659 * As disabling notrace means enabling the tracing,
1660 * and enabling notrace means disabling, the inc variable
1664 hash
= ops
->func_hash
->filter_hash
;
1665 other_hash
= ops
->func_hash
->notrace_hash
;
1666 if (ftrace_hash_empty(hash
))
1670 hash
= ops
->func_hash
->notrace_hash
;
1671 other_hash
= ops
->func_hash
->filter_hash
;
1673 * If the notrace hash has no items,
1674 * then there's nothing to do.
1676 if (ftrace_hash_empty(hash
))
1680 do_for_each_ftrace_rec(pg
, rec
) {
1681 int in_other_hash
= 0;
1687 * Only the filter_hash affects all records.
1688 * Update if the record is not in the notrace hash.
1690 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1693 in_hash
= !!ftrace_lookup_ip(hash
, rec
->ip
);
1694 in_other_hash
= !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1697 * If filter_hash is set, we want to match all functions
1698 * that are in the hash but not in the other hash.
1700 * If filter_hash is not set, then we are decrementing.
1701 * That means we match anything that is in the hash
1702 * and also in the other_hash. That is, we need to turn
1703 * off functions in the other hash because they are disabled
1706 if (filter_hash
&& in_hash
&& !in_other_hash
)
1708 else if (!filter_hash
&& in_hash
&&
1709 (in_other_hash
|| ftrace_hash_empty(other_hash
)))
1717 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == FTRACE_REF_MAX
))
1721 * If there's only a single callback registered to a
1722 * function, and the ops has a trampoline registered
1723 * for it, then we can call it directly.
1725 if (ftrace_rec_count(rec
) == 1 && ops
->trampoline
)
1726 rec
->flags
|= FTRACE_FL_TRAMP
;
1729 * If we are adding another function callback
1730 * to this function, and the previous had a
1731 * custom trampoline in use, then we need to go
1732 * back to the default trampoline.
1734 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1737 * If any ops wants regs saved for this function
1738 * then all ops will get saved regs.
1740 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
)
1741 rec
->flags
|= FTRACE_FL_REGS
;
1743 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == 0))
1748 * If the rec had REGS enabled and the ops that is
1749 * being removed had REGS set, then see if there is
1750 * still any ops for this record that wants regs.
1751 * If not, we can stop recording them.
1753 if (ftrace_rec_count(rec
) > 0 &&
1754 rec
->flags
& FTRACE_FL_REGS
&&
1755 ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1756 if (!test_rec_ops_needs_regs(rec
))
1757 rec
->flags
&= ~FTRACE_FL_REGS
;
1761 * If the rec had TRAMP enabled, then it needs to
1762 * be cleared. As TRAMP can only be enabled iff
1763 * there is only a single ops attached to it.
1764 * In otherwords, always disable it on decrementing.
1765 * In the future, we may set it if rec count is
1766 * decremented to one, and the ops that is left
1769 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1772 * flags will be cleared in ftrace_check_record()
1773 * if rec count is zero.
1777 /* Shortcut, if we handled all records, we are done. */
1778 if (!all
&& count
== hash
->count
)
1780 } while_for_each_ftrace_rec();
1783 static void ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1786 __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1789 static void ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1792 __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1795 static void ftrace_hash_rec_update_modify(struct ftrace_ops
*ops
,
1796 int filter_hash
, int inc
)
1798 struct ftrace_ops
*op
;
1800 __ftrace_hash_rec_update(ops
, filter_hash
, inc
);
1802 if (ops
->func_hash
!= &global_ops
.local_hash
)
1806 * If the ops shares the global_ops hash, then we need to update
1807 * all ops that are enabled and use this hash.
1809 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1813 if (op
->func_hash
== &global_ops
.local_hash
)
1814 __ftrace_hash_rec_update(op
, filter_hash
, inc
);
1815 } while_for_each_ftrace_op(op
);
1818 static void ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
,
1821 ftrace_hash_rec_update_modify(ops
, filter_hash
, 0);
1824 static void ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
,
1827 ftrace_hash_rec_update_modify(ops
, filter_hash
, 1);
1831 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1832 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1833 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1834 * Note that old_hash and new_hash has below meanings
1835 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1836 * - If the hash is EMPTY_HASH, it hits nothing
1837 * - Anything else hits the recs which match the hash entries.
1839 static int __ftrace_hash_update_ipmodify(struct ftrace_ops
*ops
,
1840 struct ftrace_hash
*old_hash
,
1841 struct ftrace_hash
*new_hash
)
1843 struct ftrace_page
*pg
;
1844 struct dyn_ftrace
*rec
, *end
= NULL
;
1847 /* Only update if the ops has been registered */
1848 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1851 if (!(ops
->flags
& FTRACE_OPS_FL_IPMODIFY
))
1855 * Since the IPMODIFY is a very address sensitive action, we do not
1856 * allow ftrace_ops to set all functions to new hash.
1858 if (!new_hash
|| !old_hash
)
1861 /* Update rec->flags */
1862 do_for_each_ftrace_rec(pg
, rec
) {
1863 /* We need to update only differences of filter_hash */
1864 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1865 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1866 if (in_old
== in_new
)
1870 /* New entries must ensure no others are using it */
1871 if (rec
->flags
& FTRACE_FL_IPMODIFY
)
1873 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1874 } else /* Removed entry */
1875 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1876 } while_for_each_ftrace_rec();
1883 /* Roll back what we did above */
1884 do_for_each_ftrace_rec(pg
, rec
) {
1888 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1889 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1890 if (in_old
== in_new
)
1894 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1896 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1897 } while_for_each_ftrace_rec();
1903 static int ftrace_hash_ipmodify_enable(struct ftrace_ops
*ops
)
1905 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1907 if (ftrace_hash_empty(hash
))
1910 return __ftrace_hash_update_ipmodify(ops
, EMPTY_HASH
, hash
);
1913 /* Disabling always succeeds */
1914 static void ftrace_hash_ipmodify_disable(struct ftrace_ops
*ops
)
1916 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1918 if (ftrace_hash_empty(hash
))
1921 __ftrace_hash_update_ipmodify(ops
, hash
, EMPTY_HASH
);
1924 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1925 struct ftrace_hash
*new_hash
)
1927 struct ftrace_hash
*old_hash
= ops
->func_hash
->filter_hash
;
1929 if (ftrace_hash_empty(old_hash
))
1932 if (ftrace_hash_empty(new_hash
))
1935 return __ftrace_hash_update_ipmodify(ops
, old_hash
, new_hash
);
1938 static void print_ip_ins(const char *fmt
, unsigned char *p
)
1942 printk(KERN_CONT
"%s", fmt
);
1944 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1945 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1948 static struct ftrace_ops
*
1949 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
);
1952 * ftrace_bug - report and shutdown function tracer
1953 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1954 * @rec: The record that failed
1956 * The arch code that enables or disables the function tracing
1957 * can call ftrace_bug() when it has detected a problem in
1958 * modifying the code. @failed should be one of either:
1959 * EFAULT - if the problem happens on reading the @ip address
1960 * EINVAL - if what is read at @ip is not what was expected
1961 * EPERM - if the problem happens on writting to the @ip address
1963 void ftrace_bug(int failed
, struct dyn_ftrace
*rec
)
1965 unsigned long ip
= rec
? rec
->ip
: 0;
1969 FTRACE_WARN_ON_ONCE(1);
1970 pr_info("ftrace faulted on modifying ");
1974 FTRACE_WARN_ON_ONCE(1);
1975 pr_info("ftrace failed to modify ");
1977 print_ip_ins(" actual: ", (unsigned char *)ip
);
1981 FTRACE_WARN_ON_ONCE(1);
1982 pr_info("ftrace faulted on writing ");
1986 FTRACE_WARN_ON_ONCE(1);
1987 pr_info("ftrace faulted on unknown error ");
1991 struct ftrace_ops
*ops
= NULL
;
1993 pr_info("ftrace record flags: %lx\n", rec
->flags
);
1994 pr_cont(" (%ld)%s", ftrace_rec_count(rec
),
1995 rec
->flags
& FTRACE_FL_REGS
? " R" : " ");
1996 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
1997 ops
= ftrace_find_tramp_ops_any(rec
);
1999 pr_cont("\ttramp: %pS",
2000 (void *)ops
->trampoline
);
2002 pr_cont("\ttramp: ERROR!");
2005 ip
= ftrace_get_addr_curr(rec
);
2006 pr_cont(" expected tramp: %lx\n", ip
);
2010 static int ftrace_check_record(struct dyn_ftrace
*rec
, int enable
, int update
)
2012 unsigned long flag
= 0UL;
2015 * If we are updating calls:
2017 * If the record has a ref count, then we need to enable it
2018 * because someone is using it.
2020 * Otherwise we make sure its disabled.
2022 * If we are disabling calls, then disable all records that
2025 if (enable
&& ftrace_rec_count(rec
))
2026 flag
= FTRACE_FL_ENABLED
;
2029 * If enabling and the REGS flag does not match the REGS_EN, or
2030 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2031 * this record. Set flags to fail the compare against ENABLED.
2034 if (!(rec
->flags
& FTRACE_FL_REGS
) !=
2035 !(rec
->flags
& FTRACE_FL_REGS_EN
))
2036 flag
|= FTRACE_FL_REGS
;
2038 if (!(rec
->flags
& FTRACE_FL_TRAMP
) !=
2039 !(rec
->flags
& FTRACE_FL_TRAMP_EN
))
2040 flag
|= FTRACE_FL_TRAMP
;
2043 /* If the state of this record hasn't changed, then do nothing */
2044 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
2045 return FTRACE_UPDATE_IGNORE
;
2048 /* Save off if rec is being enabled (for return value) */
2049 flag
^= rec
->flags
& FTRACE_FL_ENABLED
;
2052 rec
->flags
|= FTRACE_FL_ENABLED
;
2053 if (flag
& FTRACE_FL_REGS
) {
2054 if (rec
->flags
& FTRACE_FL_REGS
)
2055 rec
->flags
|= FTRACE_FL_REGS_EN
;
2057 rec
->flags
&= ~FTRACE_FL_REGS_EN
;
2059 if (flag
& FTRACE_FL_TRAMP
) {
2060 if (rec
->flags
& FTRACE_FL_TRAMP
)
2061 rec
->flags
|= FTRACE_FL_TRAMP_EN
;
2063 rec
->flags
&= ~FTRACE_FL_TRAMP_EN
;
2068 * If this record is being updated from a nop, then
2069 * return UPDATE_MAKE_CALL.
2071 * return UPDATE_MODIFY_CALL to tell the caller to convert
2072 * from the save regs, to a non-save regs function or
2073 * vice versa, or from a trampoline call.
2075 if (flag
& FTRACE_FL_ENABLED
)
2076 return FTRACE_UPDATE_MAKE_CALL
;
2078 return FTRACE_UPDATE_MODIFY_CALL
;
2082 /* If there's no more users, clear all flags */
2083 if (!ftrace_rec_count(rec
))
2087 * Just disable the record, but keep the ops TRAMP
2088 * and REGS states. The _EN flags must be disabled though.
2090 rec
->flags
&= ~(FTRACE_FL_ENABLED
| FTRACE_FL_TRAMP_EN
|
2094 return FTRACE_UPDATE_MAKE_NOP
;
2098 * ftrace_update_record, set a record that now is tracing or not
2099 * @rec: the record to update
2100 * @enable: set to 1 if the record is tracing, zero to force disable
2102 * The records that represent all functions that can be traced need
2103 * to be updated when tracing has been enabled.
2105 int ftrace_update_record(struct dyn_ftrace
*rec
, int enable
)
2107 return ftrace_check_record(rec
, enable
, 1);
2111 * ftrace_test_record, check if the record has been enabled or not
2112 * @rec: the record to test
2113 * @enable: set to 1 to check if enabled, 0 if it is disabled
2115 * The arch code may need to test if a record is already set to
2116 * tracing to determine how to modify the function code that it
2119 int ftrace_test_record(struct dyn_ftrace
*rec
, int enable
)
2121 return ftrace_check_record(rec
, enable
, 0);
2124 static struct ftrace_ops
*
2125 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
)
2127 struct ftrace_ops
*op
;
2128 unsigned long ip
= rec
->ip
;
2130 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2132 if (!op
->trampoline
)
2135 if (hash_contains_ip(ip
, op
->func_hash
))
2137 } while_for_each_ftrace_op(op
);
2142 static struct ftrace_ops
*
2143 ftrace_find_tramp_ops_curr(struct dyn_ftrace
*rec
)
2145 struct ftrace_ops
*op
;
2146 unsigned long ip
= rec
->ip
;
2149 * Need to check removed ops first.
2150 * If they are being removed, and this rec has a tramp,
2151 * and this rec is in the ops list, then it would be the
2152 * one with the tramp.
2155 if (hash_contains_ip(ip
, &removed_ops
->old_hash
))
2160 * Need to find the current trampoline for a rec.
2161 * Now, a trampoline is only attached to a rec if there
2162 * was a single 'ops' attached to it. But this can be called
2163 * when we are adding another op to the rec or removing the
2164 * current one. Thus, if the op is being added, we can
2165 * ignore it because it hasn't attached itself to the rec
2168 * If an ops is being modified (hooking to different functions)
2169 * then we don't care about the new functions that are being
2170 * added, just the old ones (that are probably being removed).
2172 * If we are adding an ops to a function that already is using
2173 * a trampoline, it needs to be removed (trampolines are only
2174 * for single ops connected), then an ops that is not being
2175 * modified also needs to be checked.
2177 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2179 if (!op
->trampoline
)
2183 * If the ops is being added, it hasn't gotten to
2184 * the point to be removed from this tree yet.
2186 if (op
->flags
& FTRACE_OPS_FL_ADDING
)
2191 * If the ops is being modified and is in the old
2192 * hash, then it is probably being removed from this
2195 if ((op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2196 hash_contains_ip(ip
, &op
->old_hash
))
2199 * If the ops is not being added or modified, and it's
2200 * in its normal filter hash, then this must be the one
2203 if (!(op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2204 hash_contains_ip(ip
, op
->func_hash
))
2207 } while_for_each_ftrace_op(op
);
2212 static struct ftrace_ops
*
2213 ftrace_find_tramp_ops_new(struct dyn_ftrace
*rec
)
2215 struct ftrace_ops
*op
;
2216 unsigned long ip
= rec
->ip
;
2218 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2219 /* pass rec in as regs to have non-NULL val */
2220 if (hash_contains_ip(ip
, op
->func_hash
))
2222 } while_for_each_ftrace_op(op
);
2228 * ftrace_get_addr_new - Get the call address to set to
2229 * @rec: The ftrace record descriptor
2231 * If the record has the FTRACE_FL_REGS set, that means that it
2232 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2233 * is not not set, then it wants to convert to the normal callback.
2235 * Returns the address of the trampoline to set to
2237 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
)
2239 struct ftrace_ops
*ops
;
2241 /* Trampolines take precedence over regs */
2242 if (rec
->flags
& FTRACE_FL_TRAMP
) {
2243 ops
= ftrace_find_tramp_ops_new(rec
);
2244 if (FTRACE_WARN_ON(!ops
|| !ops
->trampoline
)) {
2245 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2246 (void *)rec
->ip
, (void *)rec
->ip
, rec
->flags
);
2247 /* Ftrace is shutting down, return anything */
2248 return (unsigned long)FTRACE_ADDR
;
2250 return ops
->trampoline
;
2253 if (rec
->flags
& FTRACE_FL_REGS
)
2254 return (unsigned long)FTRACE_REGS_ADDR
;
2256 return (unsigned long)FTRACE_ADDR
;
2260 * ftrace_get_addr_curr - Get the call address that is already there
2261 * @rec: The ftrace record descriptor
2263 * The FTRACE_FL_REGS_EN is set when the record already points to
2264 * a function that saves all the regs. Basically the '_EN' version
2265 * represents the current state of the function.
2267 * Returns the address of the trampoline that is currently being called
2269 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
)
2271 struct ftrace_ops
*ops
;
2273 /* Trampolines take precedence over regs */
2274 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2275 ops
= ftrace_find_tramp_ops_curr(rec
);
2276 if (FTRACE_WARN_ON(!ops
)) {
2277 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2278 (void *)rec
->ip
, (void *)rec
->ip
);
2279 /* Ftrace is shutting down, return anything */
2280 return (unsigned long)FTRACE_ADDR
;
2282 return ops
->trampoline
;
2285 if (rec
->flags
& FTRACE_FL_REGS_EN
)
2286 return (unsigned long)FTRACE_REGS_ADDR
;
2288 return (unsigned long)FTRACE_ADDR
;
2292 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
2294 unsigned long ftrace_old_addr
;
2295 unsigned long ftrace_addr
;
2298 ftrace_addr
= ftrace_get_addr_new(rec
);
2300 /* This needs to be done before we call ftrace_update_record */
2301 ftrace_old_addr
= ftrace_get_addr_curr(rec
);
2303 ret
= ftrace_update_record(rec
, enable
);
2306 case FTRACE_UPDATE_IGNORE
:
2309 case FTRACE_UPDATE_MAKE_CALL
:
2310 return ftrace_make_call(rec
, ftrace_addr
);
2312 case FTRACE_UPDATE_MAKE_NOP
:
2313 return ftrace_make_nop(NULL
, rec
, ftrace_old_addr
);
2315 case FTRACE_UPDATE_MODIFY_CALL
:
2316 return ftrace_modify_call(rec
, ftrace_old_addr
, ftrace_addr
);
2319 return -1; /* unknow ftrace bug */
2322 void __weak
ftrace_replace_code(int enable
)
2324 struct dyn_ftrace
*rec
;
2325 struct ftrace_page
*pg
;
2328 if (unlikely(ftrace_disabled
))
2331 do_for_each_ftrace_rec(pg
, rec
) {
2332 failed
= __ftrace_replace_code(rec
, enable
);
2334 ftrace_bug(failed
, rec
);
2335 /* Stop processing */
2338 } while_for_each_ftrace_rec();
2341 struct ftrace_rec_iter
{
2342 struct ftrace_page
*pg
;
2347 * ftrace_rec_iter_start, start up iterating over traced functions
2349 * Returns an iterator handle that is used to iterate over all
2350 * the records that represent address locations where functions
2353 * May return NULL if no records are available.
2355 struct ftrace_rec_iter
*ftrace_rec_iter_start(void)
2358 * We only use a single iterator.
2359 * Protected by the ftrace_lock mutex.
2361 static struct ftrace_rec_iter ftrace_rec_iter
;
2362 struct ftrace_rec_iter
*iter
= &ftrace_rec_iter
;
2364 iter
->pg
= ftrace_pages_start
;
2367 /* Could have empty pages */
2368 while (iter
->pg
&& !iter
->pg
->index
)
2369 iter
->pg
= iter
->pg
->next
;
2378 * ftrace_rec_iter_next, get the next record to process.
2379 * @iter: The handle to the iterator.
2381 * Returns the next iterator after the given iterator @iter.
2383 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
)
2387 if (iter
->index
>= iter
->pg
->index
) {
2388 iter
->pg
= iter
->pg
->next
;
2391 /* Could have empty pages */
2392 while (iter
->pg
&& !iter
->pg
->index
)
2393 iter
->pg
= iter
->pg
->next
;
2403 * ftrace_rec_iter_record, get the record at the iterator location
2404 * @iter: The current iterator location
2406 * Returns the record that the current @iter is at.
2408 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
)
2410 return &iter
->pg
->records
[iter
->index
];
2414 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
2418 if (unlikely(ftrace_disabled
))
2421 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
2423 ftrace_bug(ret
, rec
);
2430 * archs can override this function if they must do something
2431 * before the modifying code is performed.
2433 int __weak
ftrace_arch_code_modify_prepare(void)
2439 * archs can override this function if they must do something
2440 * after the modifying code is performed.
2442 int __weak
ftrace_arch_code_modify_post_process(void)
2447 void ftrace_modify_all_code(int command
)
2449 int update
= command
& FTRACE_UPDATE_TRACE_FUNC
;
2453 * If the ftrace_caller calls a ftrace_ops func directly,
2454 * we need to make sure that it only traces functions it
2455 * expects to trace. When doing the switch of functions,
2456 * we need to update to the ftrace_ops_list_func first
2457 * before the transition between old and new calls are set,
2458 * as the ftrace_ops_list_func will check the ops hashes
2459 * to make sure the ops are having the right functions
2463 err
= ftrace_update_ftrace_func(ftrace_ops_list_func
);
2464 if (FTRACE_WARN_ON(err
))
2468 if (command
& FTRACE_UPDATE_CALLS
)
2469 ftrace_replace_code(1);
2470 else if (command
& FTRACE_DISABLE_CALLS
)
2471 ftrace_replace_code(0);
2473 if (update
&& ftrace_trace_function
!= ftrace_ops_list_func
) {
2474 function_trace_op
= set_function_trace_op
;
2476 /* If irqs are disabled, we are in stop machine */
2477 if (!irqs_disabled())
2478 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
2479 err
= ftrace_update_ftrace_func(ftrace_trace_function
);
2480 if (FTRACE_WARN_ON(err
))
2484 if (command
& FTRACE_START_FUNC_RET
)
2485 err
= ftrace_enable_ftrace_graph_caller();
2486 else if (command
& FTRACE_STOP_FUNC_RET
)
2487 err
= ftrace_disable_ftrace_graph_caller();
2488 FTRACE_WARN_ON(err
);
2491 static int __ftrace_modify_code(void *data
)
2493 int *command
= data
;
2495 ftrace_modify_all_code(*command
);
2501 * ftrace_run_stop_machine, go back to the stop machine method
2502 * @command: The command to tell ftrace what to do
2504 * If an arch needs to fall back to the stop machine method, the
2505 * it can call this function.
2507 void ftrace_run_stop_machine(int command
)
2509 stop_machine(__ftrace_modify_code
, &command
, NULL
);
2513 * arch_ftrace_update_code, modify the code to trace or not trace
2514 * @command: The command that needs to be done
2516 * Archs can override this function if it does not need to
2517 * run stop_machine() to modify code.
2519 void __weak
arch_ftrace_update_code(int command
)
2521 ftrace_run_stop_machine(command
);
2524 static void ftrace_run_update_code(int command
)
2528 ret
= ftrace_arch_code_modify_prepare();
2529 FTRACE_WARN_ON(ret
);
2534 * By default we use stop_machine() to modify the code.
2535 * But archs can do what ever they want as long as it
2536 * is safe. The stop_machine() is the safest, but also
2537 * produces the most overhead.
2539 arch_ftrace_update_code(command
);
2541 ret
= ftrace_arch_code_modify_post_process();
2542 FTRACE_WARN_ON(ret
);
2545 static void ftrace_run_modify_code(struct ftrace_ops
*ops
, int command
,
2546 struct ftrace_ops_hash
*old_hash
)
2548 ops
->flags
|= FTRACE_OPS_FL_MODIFYING
;
2549 ops
->old_hash
.filter_hash
= old_hash
->filter_hash
;
2550 ops
->old_hash
.notrace_hash
= old_hash
->notrace_hash
;
2551 ftrace_run_update_code(command
);
2552 ops
->old_hash
.filter_hash
= NULL
;
2553 ops
->old_hash
.notrace_hash
= NULL
;
2554 ops
->flags
&= ~FTRACE_OPS_FL_MODIFYING
;
2557 static ftrace_func_t saved_ftrace_func
;
2558 static int ftrace_start_up
;
2560 void __weak
arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
2564 static void control_ops_free(struct ftrace_ops
*ops
)
2566 free_percpu(ops
->disabled
);
2569 static void ftrace_startup_enable(int command
)
2571 if (saved_ftrace_func
!= ftrace_trace_function
) {
2572 saved_ftrace_func
= ftrace_trace_function
;
2573 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2576 if (!command
|| !ftrace_enabled
)
2579 ftrace_run_update_code(command
);
2582 static void ftrace_startup_all(int command
)
2584 update_all_ops
= true;
2585 ftrace_startup_enable(command
);
2586 update_all_ops
= false;
2589 static int ftrace_startup(struct ftrace_ops
*ops
, int command
)
2593 if (unlikely(ftrace_disabled
))
2596 ret
= __register_ftrace_function(ops
);
2601 command
|= FTRACE_UPDATE_CALLS
;
2604 * Note that ftrace probes uses this to start up
2605 * and modify functions it will probe. But we still
2606 * set the ADDING flag for modification, as probes
2607 * do not have trampolines. If they add them in the
2608 * future, then the probes will need to distinguish
2609 * between adding and updating probes.
2611 ops
->flags
|= FTRACE_OPS_FL_ENABLED
| FTRACE_OPS_FL_ADDING
;
2613 ret
= ftrace_hash_ipmodify_enable(ops
);
2615 /* Rollback registration process */
2616 __unregister_ftrace_function(ops
);
2618 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2622 ftrace_hash_rec_enable(ops
, 1);
2624 ftrace_startup_enable(command
);
2626 ops
->flags
&= ~FTRACE_OPS_FL_ADDING
;
2631 static int ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
2635 if (unlikely(ftrace_disabled
))
2638 ret
= __unregister_ftrace_function(ops
);
2644 * Just warn in case of unbalance, no need to kill ftrace, it's not
2645 * critical but the ftrace_call callers may be never nopped again after
2646 * further ftrace uses.
2648 WARN_ON_ONCE(ftrace_start_up
< 0);
2650 /* Disabling ipmodify never fails */
2651 ftrace_hash_ipmodify_disable(ops
);
2652 ftrace_hash_rec_disable(ops
, 1);
2654 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2656 command
|= FTRACE_UPDATE_CALLS
;
2658 if (saved_ftrace_func
!= ftrace_trace_function
) {
2659 saved_ftrace_func
= ftrace_trace_function
;
2660 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2663 if (!command
|| !ftrace_enabled
) {
2665 * If these are control ops, they still need their
2666 * per_cpu field freed. Since, function tracing is
2667 * not currently active, we can just free them
2668 * without synchronizing all CPUs.
2670 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2671 control_ops_free(ops
);
2676 * If the ops uses a trampoline, then it needs to be
2677 * tested first on update.
2679 ops
->flags
|= FTRACE_OPS_FL_REMOVING
;
2682 /* The trampoline logic checks the old hashes */
2683 ops
->old_hash
.filter_hash
= ops
->func_hash
->filter_hash
;
2684 ops
->old_hash
.notrace_hash
= ops
->func_hash
->notrace_hash
;
2686 ftrace_run_update_code(command
);
2689 * If there's no more ops registered with ftrace, run a
2690 * sanity check to make sure all rec flags are cleared.
2692 if (ftrace_ops_list
== &ftrace_list_end
) {
2693 struct ftrace_page
*pg
;
2694 struct dyn_ftrace
*rec
;
2696 do_for_each_ftrace_rec(pg
, rec
) {
2697 if (FTRACE_WARN_ON_ONCE(rec
->flags
))
2698 pr_warn(" %pS flags:%lx\n",
2699 (void *)rec
->ip
, rec
->flags
);
2700 } while_for_each_ftrace_rec();
2703 ops
->old_hash
.filter_hash
= NULL
;
2704 ops
->old_hash
.notrace_hash
= NULL
;
2707 ops
->flags
&= ~FTRACE_OPS_FL_REMOVING
;
2710 * Dynamic ops may be freed, we must make sure that all
2711 * callers are done before leaving this function.
2712 * The same goes for freeing the per_cpu data of the control
2715 * Again, normal synchronize_sched() is not good enough.
2716 * We need to do a hard force of sched synchronization.
2717 * This is because we use preempt_disable() to do RCU, but
2718 * the function tracers can be called where RCU is not watching
2719 * (like before user_exit()). We can not rely on the RCU
2720 * infrastructure to do the synchronization, thus we must do it
2723 if (ops
->flags
& (FTRACE_OPS_FL_DYNAMIC
| FTRACE_OPS_FL_CONTROL
)) {
2724 schedule_on_each_cpu(ftrace_sync
);
2726 arch_ftrace_trampoline_free(ops
);
2728 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2729 control_ops_free(ops
);
2735 static void ftrace_startup_sysctl(void)
2739 if (unlikely(ftrace_disabled
))
2742 /* Force update next time */
2743 saved_ftrace_func
= NULL
;
2744 /* ftrace_start_up is true if we want ftrace running */
2745 if (ftrace_start_up
) {
2746 command
= FTRACE_UPDATE_CALLS
;
2747 if (ftrace_graph_active
)
2748 command
|= FTRACE_START_FUNC_RET
;
2749 ftrace_startup_enable(command
);
2753 static void ftrace_shutdown_sysctl(void)
2757 if (unlikely(ftrace_disabled
))
2760 /* ftrace_start_up is true if ftrace is running */
2761 if (ftrace_start_up
) {
2762 command
= FTRACE_DISABLE_CALLS
;
2763 if (ftrace_graph_active
)
2764 command
|= FTRACE_STOP_FUNC_RET
;
2765 ftrace_run_update_code(command
);
2769 static cycle_t ftrace_update_time
;
2770 unsigned long ftrace_update_tot_cnt
;
2772 static inline int ops_traces_mod(struct ftrace_ops
*ops
)
2775 * Filter_hash being empty will default to trace module.
2776 * But notrace hash requires a test of individual module functions.
2778 return ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2779 ftrace_hash_empty(ops
->func_hash
->notrace_hash
);
2783 * Check if the current ops references the record.
2785 * If the ops traces all functions, then it was already accounted for.
2786 * If the ops does not trace the current record function, skip it.
2787 * If the ops ignores the function via notrace filter, skip it.
2790 ops_references_rec(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
2792 /* If ops isn't enabled, ignore it */
2793 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
2796 /* If ops traces all mods, we already accounted for it */
2797 if (ops_traces_mod(ops
))
2800 /* The function must be in the filter */
2801 if (!ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2802 !ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))
2805 /* If in notrace hash, we ignore it too */
2806 if (ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
))
2812 static int referenced_filters(struct dyn_ftrace
*rec
)
2814 struct ftrace_ops
*ops
;
2817 for (ops
= ftrace_ops_list
; ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2818 if (ops_references_rec(ops
, rec
))
2825 static int ftrace_update_code(struct module
*mod
, struct ftrace_page
*new_pgs
)
2827 struct ftrace_page
*pg
;
2828 struct dyn_ftrace
*p
;
2829 cycle_t start
, stop
;
2830 unsigned long update_cnt
= 0;
2831 unsigned long ref
= 0;
2836 * When adding a module, we need to check if tracers are
2837 * currently enabled and if they are set to trace all functions.
2838 * If they are, we need to enable the module functions as well
2839 * as update the reference counts for those function records.
2842 struct ftrace_ops
*ops
;
2844 for (ops
= ftrace_ops_list
;
2845 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2846 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
2847 if (ops_traces_mod(ops
))
2855 start
= ftrace_now(raw_smp_processor_id());
2857 for (pg
= new_pgs
; pg
; pg
= pg
->next
) {
2859 for (i
= 0; i
< pg
->index
; i
++) {
2862 /* If something went wrong, bail without enabling anything */
2863 if (unlikely(ftrace_disabled
))
2866 p
= &pg
->records
[i
];
2868 cnt
+= referenced_filters(p
);
2872 * Do the initial record conversion from mcount jump
2873 * to the NOP instructions.
2875 if (!ftrace_code_disable(mod
, p
))
2881 * If the tracing is enabled, go ahead and enable the record.
2883 * The reason not to enable the record immediatelly is the
2884 * inherent check of ftrace_make_nop/ftrace_make_call for
2885 * correct previous instructions. Making first the NOP
2886 * conversion puts the module to the correct state, thus
2887 * passing the ftrace_make_call check.
2889 if (ftrace_start_up
&& cnt
) {
2890 int failed
= __ftrace_replace_code(p
, 1);
2892 ftrace_bug(failed
, p
);
2897 stop
= ftrace_now(raw_smp_processor_id());
2898 ftrace_update_time
= stop
- start
;
2899 ftrace_update_tot_cnt
+= update_cnt
;
2904 static int ftrace_allocate_records(struct ftrace_page
*pg
, int count
)
2909 if (WARN_ON(!count
))
2912 order
= get_count_order(DIV_ROUND_UP(count
, ENTRIES_PER_PAGE
));
2915 * We want to fill as much as possible. No more than a page
2918 while ((PAGE_SIZE
<< order
) / ENTRY_SIZE
>= count
+ ENTRIES_PER_PAGE
)
2922 pg
->records
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
2925 /* if we can't allocate this size, try something smaller */
2932 cnt
= (PAGE_SIZE
<< order
) / ENTRY_SIZE
;
2941 static struct ftrace_page
*
2942 ftrace_allocate_pages(unsigned long num_to_init
)
2944 struct ftrace_page
*start_pg
;
2945 struct ftrace_page
*pg
;
2952 start_pg
= pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2957 * Try to allocate as much as possible in one continues
2958 * location that fills in all of the space. We want to
2959 * waste as little space as possible.
2962 cnt
= ftrace_allocate_records(pg
, num_to_init
);
2970 pg
->next
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2982 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
2983 free_pages((unsigned long)pg
->records
, order
);
2984 start_pg
= pg
->next
;
2988 pr_info("ftrace: FAILED to allocate memory for functions\n");
2992 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2994 struct ftrace_iterator
{
2997 struct ftrace_page
*pg
;
2998 struct dyn_ftrace
*func
;
2999 struct ftrace_func_probe
*probe
;
3000 struct trace_parser parser
;
3001 struct ftrace_hash
*hash
;
3002 struct ftrace_ops
*ops
;
3009 t_hash_next(struct seq_file
*m
, loff_t
*pos
)
3011 struct ftrace_iterator
*iter
= m
->private;
3012 struct hlist_node
*hnd
= NULL
;
3013 struct hlist_head
*hhd
;
3019 hnd
= &iter
->probe
->node
;
3021 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
3024 hhd
= &ftrace_func_hash
[iter
->hidx
];
3026 if (hlist_empty(hhd
)) {
3042 if (WARN_ON_ONCE(!hnd
))
3045 iter
->probe
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
3050 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
3052 struct ftrace_iterator
*iter
= m
->private;
3056 if (!(iter
->flags
& FTRACE_ITER_DO_HASH
))
3059 if (iter
->func_pos
> *pos
)
3063 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
3064 p
= t_hash_next(m
, &l
);
3071 /* Only set this if we have an item */
3072 iter
->flags
|= FTRACE_ITER_HASH
;
3078 t_hash_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
3080 struct ftrace_func_probe
*rec
;
3083 if (WARN_ON_ONCE(!rec
))
3086 if (rec
->ops
->print
)
3087 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
3089 seq_printf(m
, "%ps:%ps", (void *)rec
->ip
, (void *)rec
->ops
->func
);
3092 seq_printf(m
, ":%p", rec
->data
);
3099 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3101 struct ftrace_iterator
*iter
= m
->private;
3102 struct ftrace_ops
*ops
= iter
->ops
;
3103 struct dyn_ftrace
*rec
= NULL
;
3105 if (unlikely(ftrace_disabled
))
3108 if (iter
->flags
& FTRACE_ITER_HASH
)
3109 return t_hash_next(m
, pos
);
3112 iter
->pos
= iter
->func_pos
= *pos
;
3114 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
3115 return t_hash_start(m
, pos
);
3118 if (iter
->idx
>= iter
->pg
->index
) {
3119 if (iter
->pg
->next
) {
3120 iter
->pg
= iter
->pg
->next
;
3125 rec
= &iter
->pg
->records
[iter
->idx
++];
3126 if (((iter
->flags
& FTRACE_ITER_FILTER
) &&
3127 !(ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))) ||
3129 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
3130 !ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
)) ||
3132 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
3133 !(rec
->flags
& FTRACE_FL_ENABLED
))) {
3141 return t_hash_start(m
, pos
);
3148 static void reset_iter_read(struct ftrace_iterator
*iter
)
3152 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
| FTRACE_ITER_HASH
);
3155 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3157 struct ftrace_iterator
*iter
= m
->private;
3158 struct ftrace_ops
*ops
= iter
->ops
;
3162 mutex_lock(&ftrace_lock
);
3164 if (unlikely(ftrace_disabled
))
3168 * If an lseek was done, then reset and start from beginning.
3170 if (*pos
< iter
->pos
)
3171 reset_iter_read(iter
);
3174 * For set_ftrace_filter reading, if we have the filter
3175 * off, we can short cut and just print out that all
3176 * functions are enabled.
3178 if ((iter
->flags
& FTRACE_ITER_FILTER
&&
3179 ftrace_hash_empty(ops
->func_hash
->filter_hash
)) ||
3180 (iter
->flags
& FTRACE_ITER_NOTRACE
&&
3181 ftrace_hash_empty(ops
->func_hash
->notrace_hash
))) {
3183 return t_hash_start(m
, pos
);
3184 iter
->flags
|= FTRACE_ITER_PRINTALL
;
3185 /* reset in case of seek/pread */
3186 iter
->flags
&= ~FTRACE_ITER_HASH
;
3190 if (iter
->flags
& FTRACE_ITER_HASH
)
3191 return t_hash_start(m
, pos
);
3194 * Unfortunately, we need to restart at ftrace_pages_start
3195 * every time we let go of the ftrace_mutex. This is because
3196 * those pointers can change without the lock.
3198 iter
->pg
= ftrace_pages_start
;
3200 for (l
= 0; l
<= *pos
; ) {
3201 p
= t_next(m
, p
, &l
);
3207 return t_hash_start(m
, pos
);
3212 static void t_stop(struct seq_file
*m
, void *p
)
3214 mutex_unlock(&ftrace_lock
);
3218 arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
3223 static void add_trampoline_func(struct seq_file
*m
, struct ftrace_ops
*ops
,
3224 struct dyn_ftrace
*rec
)
3228 ptr
= arch_ftrace_trampoline_func(ops
, rec
);
3230 seq_printf(m
, " ->%pS", ptr
);
3233 static int t_show(struct seq_file
*m
, void *v
)
3235 struct ftrace_iterator
*iter
= m
->private;
3236 struct dyn_ftrace
*rec
;
3238 if (iter
->flags
& FTRACE_ITER_HASH
)
3239 return t_hash_show(m
, iter
);
3241 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
3242 if (iter
->flags
& FTRACE_ITER_NOTRACE
)
3243 seq_puts(m
, "#### no functions disabled ####\n");
3245 seq_puts(m
, "#### all functions enabled ####\n");
3254 seq_printf(m
, "%ps", (void *)rec
->ip
);
3255 if (iter
->flags
& FTRACE_ITER_ENABLED
) {
3256 struct ftrace_ops
*ops
= NULL
;
3258 seq_printf(m
, " (%ld)%s%s",
3259 ftrace_rec_count(rec
),
3260 rec
->flags
& FTRACE_FL_REGS
? " R" : " ",
3261 rec
->flags
& FTRACE_FL_IPMODIFY
? " I" : " ");
3262 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
3263 ops
= ftrace_find_tramp_ops_any(rec
);
3265 seq_printf(m
, "\ttramp: %pS",
3266 (void *)ops
->trampoline
);
3268 seq_puts(m
, "\ttramp: ERROR!");
3271 add_trampoline_func(m
, ops
, rec
);
3279 static const struct seq_operations show_ftrace_seq_ops
= {
3287 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
3289 struct ftrace_iterator
*iter
;
3291 if (unlikely(ftrace_disabled
))
3294 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3296 iter
->pg
= ftrace_pages_start
;
3297 iter
->ops
= &global_ops
;
3300 return iter
? 0 : -ENOMEM
;
3304 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
3306 struct ftrace_iterator
*iter
;
3308 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3310 iter
->pg
= ftrace_pages_start
;
3311 iter
->flags
= FTRACE_ITER_ENABLED
;
3312 iter
->ops
= &global_ops
;
3315 return iter
? 0 : -ENOMEM
;
3319 * ftrace_regex_open - initialize function tracer filter files
3320 * @ops: The ftrace_ops that hold the hash filters
3321 * @flag: The type of filter to process
3322 * @inode: The inode, usually passed in to your open routine
3323 * @file: The file, usually passed in to your open routine
3325 * ftrace_regex_open() initializes the filter files for the
3326 * @ops. Depending on @flag it may process the filter hash or
3327 * the notrace hash of @ops. With this called from the open
3328 * routine, you can use ftrace_filter_write() for the write
3329 * routine if @flag has FTRACE_ITER_FILTER set, or
3330 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3331 * tracing_lseek() should be used as the lseek routine, and
3332 * release must call ftrace_regex_release().
3335 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
3336 struct inode
*inode
, struct file
*file
)
3338 struct ftrace_iterator
*iter
;
3339 struct ftrace_hash
*hash
;
3342 ftrace_ops_init(ops
);
3344 if (unlikely(ftrace_disabled
))
3347 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
3351 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
)) {
3359 mutex_lock(&ops
->func_hash
->regex_lock
);
3361 if (flag
& FTRACE_ITER_NOTRACE
)
3362 hash
= ops
->func_hash
->notrace_hash
;
3364 hash
= ops
->func_hash
->filter_hash
;
3366 if (file
->f_mode
& FMODE_WRITE
) {
3367 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
3369 if (file
->f_flags
& O_TRUNC
)
3370 iter
->hash
= alloc_ftrace_hash(size_bits
);
3372 iter
->hash
= alloc_and_copy_ftrace_hash(size_bits
, hash
);
3375 trace_parser_put(&iter
->parser
);
3382 if (file
->f_mode
& FMODE_READ
) {
3383 iter
->pg
= ftrace_pages_start
;
3385 ret
= seq_open(file
, &show_ftrace_seq_ops
);
3387 struct seq_file
*m
= file
->private_data
;
3391 free_ftrace_hash(iter
->hash
);
3392 trace_parser_put(&iter
->parser
);
3396 file
->private_data
= iter
;
3399 mutex_unlock(&ops
->func_hash
->regex_lock
);
3405 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
3407 struct ftrace_ops
*ops
= inode
->i_private
;
3409 return ftrace_regex_open(ops
,
3410 FTRACE_ITER_FILTER
| FTRACE_ITER_DO_HASH
,
3415 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
3417 struct ftrace_ops
*ops
= inode
->i_private
;
3419 return ftrace_regex_open(ops
, FTRACE_ITER_NOTRACE
,
3423 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
3430 if (strcmp(str
, regex
) == 0)
3433 case MATCH_FRONT_ONLY
:
3434 if (strncmp(str
, regex
, len
) == 0)
3437 case MATCH_MIDDLE_ONLY
:
3438 if (strstr(str
, regex
))
3441 case MATCH_END_ONLY
:
3443 if (slen
>= len
&& memcmp(str
+ slen
- len
, regex
, len
) == 0)
3452 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int not)
3454 struct ftrace_func_entry
*entry
;
3457 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
3459 /* Do nothing if it doesn't exist */
3463 free_hash_entry(hash
, entry
);
3465 /* Do nothing if it exists */
3469 ret
= add_hash_entry(hash
, rec
->ip
);
3475 ftrace_match_record(struct dyn_ftrace
*rec
, char *mod
,
3476 char *regex
, int len
, int type
)
3478 char str
[KSYM_SYMBOL_LEN
];
3481 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
3484 /* module lookup requires matching the module */
3485 if (!modname
|| strcmp(modname
, mod
))
3488 /* blank search means to match all funcs in the mod */
3493 return ftrace_match(str
, regex
, len
, type
);
3497 match_records(struct ftrace_hash
*hash
, char *buff
,
3498 int len
, char *mod
, int not)
3500 unsigned search_len
= 0;
3501 struct ftrace_page
*pg
;
3502 struct dyn_ftrace
*rec
;
3503 int type
= MATCH_FULL
;
3504 char *search
= buff
;
3509 type
= filter_parse_regex(buff
, len
, &search
, ¬);
3510 search_len
= strlen(search
);
3513 mutex_lock(&ftrace_lock
);
3515 if (unlikely(ftrace_disabled
))
3518 do_for_each_ftrace_rec(pg
, rec
) {
3519 if (ftrace_match_record(rec
, mod
, search
, search_len
, type
)) {
3520 ret
= enter_record(hash
, rec
, not);
3527 } while_for_each_ftrace_rec();
3529 mutex_unlock(&ftrace_lock
);
3535 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
3537 return match_records(hash
, buff
, len
, NULL
, 0);
3541 ftrace_match_module_records(struct ftrace_hash
*hash
, char *buff
, char *mod
)
3545 /* blank or '*' mean the same */
3546 if (strcmp(buff
, "*") == 0)
3549 /* handle the case of 'dont filter this module' */
3550 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
3555 return match_records(hash
, buff
, strlen(buff
), mod
, not);
3559 * We register the module command as a template to show others how
3560 * to register the a command as well.
3564 ftrace_mod_callback(struct ftrace_hash
*hash
,
3565 char *func
, char *cmd
, char *param
, int enable
)
3571 * cmd == 'mod' because we only registered this func
3572 * for the 'mod' ftrace_func_command.
3573 * But if you register one func with multiple commands,
3574 * you can tell which command was used by the cmd
3578 /* we must have a module name */
3582 mod
= strsep(¶m
, ":");
3586 ret
= ftrace_match_module_records(hash
, func
, mod
);
3595 static struct ftrace_func_command ftrace_mod_cmd
= {
3597 .func
= ftrace_mod_callback
,
3600 static int __init
ftrace_mod_cmd_init(void)
3602 return register_ftrace_command(&ftrace_mod_cmd
);
3604 core_initcall(ftrace_mod_cmd_init
);
3606 static void function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
,
3607 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
3609 struct ftrace_func_probe
*entry
;
3610 struct hlist_head
*hhd
;
3613 key
= hash_long(ip
, FTRACE_HASH_BITS
);
3615 hhd
= &ftrace_func_hash
[key
];
3617 if (hlist_empty(hhd
))
3621 * Disable preemption for these calls to prevent a RCU grace
3622 * period. This syncs the hash iteration and freeing of items
3623 * on the hash. rcu_read_lock is too dangerous here.
3625 preempt_disable_notrace();
3626 hlist_for_each_entry_rcu_notrace(entry
, hhd
, node
) {
3627 if (entry
->ip
== ip
)
3628 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
3630 preempt_enable_notrace();
3633 static struct ftrace_ops trace_probe_ops __read_mostly
=
3635 .func
= function_trace_probe_call
,
3636 .flags
= FTRACE_OPS_FL_INITIALIZED
,
3637 INIT_OPS_HASH(trace_probe_ops
)
3640 static int ftrace_probe_registered
;
3642 static void __enable_ftrace_function_probe(struct ftrace_ops_hash
*old_hash
)
3647 if (ftrace_probe_registered
) {
3648 /* still need to update the function call sites */
3650 ftrace_run_modify_code(&trace_probe_ops
, FTRACE_UPDATE_CALLS
,
3655 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3656 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3660 /* Nothing registered? */
3661 if (i
== FTRACE_FUNC_HASHSIZE
)
3664 ret
= ftrace_startup(&trace_probe_ops
, 0);
3666 ftrace_probe_registered
= 1;
3669 static void __disable_ftrace_function_probe(void)
3673 if (!ftrace_probe_registered
)
3676 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3677 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3682 /* no more funcs left */
3683 ftrace_shutdown(&trace_probe_ops
, 0);
3685 ftrace_probe_registered
= 0;
3689 static void ftrace_free_entry(struct ftrace_func_probe
*entry
)
3691 if (entry
->ops
->free
)
3692 entry
->ops
->free(entry
->ops
, entry
->ip
, &entry
->data
);
3697 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3700 struct ftrace_ops_hash old_hash_ops
;
3701 struct ftrace_func_probe
*entry
;
3702 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3703 struct ftrace_hash
*old_hash
= *orig_hash
;
3704 struct ftrace_hash
*hash
;
3705 struct ftrace_page
*pg
;
3706 struct dyn_ftrace
*rec
;
3713 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3714 len
= strlen(search
);
3716 /* we do not support '!' for function probes */
3720 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3722 old_hash_ops
.filter_hash
= old_hash
;
3723 /* Probes only have filters */
3724 old_hash_ops
.notrace_hash
= NULL
;
3726 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
3732 if (unlikely(ftrace_disabled
)) {
3737 mutex_lock(&ftrace_lock
);
3739 do_for_each_ftrace_rec(pg
, rec
) {
3741 if (!ftrace_match_record(rec
, NULL
, search
, len
, type
))
3744 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
3746 /* If we did not process any, then return error */
3757 * The caller might want to do something special
3758 * for each function we find. We call the callback
3759 * to give the caller an opportunity to do so.
3762 if (ops
->init(ops
, rec
->ip
, &entry
->data
) < 0) {
3763 /* caller does not like this func */
3769 ret
= enter_record(hash
, rec
, 0);
3777 entry
->ip
= rec
->ip
;
3779 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
3780 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
3782 } while_for_each_ftrace_rec();
3784 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3786 __enable_ftrace_function_probe(&old_hash_ops
);
3789 free_ftrace_hash_rcu(old_hash
);
3794 mutex_unlock(&ftrace_lock
);
3796 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3797 free_ftrace_hash(hash
);
3803 PROBE_TEST_FUNC
= 1,
3808 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3809 void *data
, int flags
)
3811 struct ftrace_func_entry
*rec_entry
;
3812 struct ftrace_func_probe
*entry
;
3813 struct ftrace_func_probe
*p
;
3814 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3815 struct ftrace_hash
*old_hash
= *orig_hash
;
3816 struct list_head free_list
;
3817 struct ftrace_hash
*hash
;
3818 struct hlist_node
*tmp
;
3819 char str
[KSYM_SYMBOL_LEN
];
3820 int type
= MATCH_FULL
;
3825 if (glob
&& (strcmp(glob
, "*") == 0 || !strlen(glob
)))
3830 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3831 len
= strlen(search
);
3833 /* we do not support '!' for function probes */
3838 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3840 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3842 /* Hmm, should report this somehow */
3845 INIT_LIST_HEAD(&free_list
);
3847 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3848 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3850 hlist_for_each_entry_safe(entry
, tmp
, hhd
, node
) {
3852 /* break up if statements for readability */
3853 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
3856 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
3859 /* do this last, since it is the most expensive */
3861 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
3863 if (!ftrace_match(str
, glob
, len
, type
))
3867 rec_entry
= ftrace_lookup_ip(hash
, entry
->ip
);
3868 /* It is possible more than one entry had this ip */
3870 free_hash_entry(hash
, rec_entry
);
3872 hlist_del_rcu(&entry
->node
);
3873 list_add(&entry
->free_list
, &free_list
);
3876 mutex_lock(&ftrace_lock
);
3877 __disable_ftrace_function_probe();
3879 * Remove after the disable is called. Otherwise, if the last
3880 * probe is removed, a null hash means *all enabled*.
3882 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3883 synchronize_sched();
3885 free_ftrace_hash_rcu(old_hash
);
3887 list_for_each_entry_safe(entry
, p
, &free_list
, free_list
) {
3888 list_del(&entry
->free_list
);
3889 ftrace_free_entry(entry
);
3891 mutex_unlock(&ftrace_lock
);
3894 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3895 free_ftrace_hash(hash
);
3899 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3902 __unregister_ftrace_function_probe(glob
, ops
, data
,
3903 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
3907 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
3909 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
3912 void unregister_ftrace_function_probe_all(char *glob
)
3914 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
3917 static LIST_HEAD(ftrace_commands
);
3918 static DEFINE_MUTEX(ftrace_cmd_mutex
);
3921 * Currently we only register ftrace commands from __init, so mark this
3924 __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
3926 struct ftrace_func_command
*p
;
3929 mutex_lock(&ftrace_cmd_mutex
);
3930 list_for_each_entry(p
, &ftrace_commands
, list
) {
3931 if (strcmp(cmd
->name
, p
->name
) == 0) {
3936 list_add(&cmd
->list
, &ftrace_commands
);
3938 mutex_unlock(&ftrace_cmd_mutex
);
3944 * Currently we only unregister ftrace commands from __init, so mark
3947 __init
int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
3949 struct ftrace_func_command
*p
, *n
;
3952 mutex_lock(&ftrace_cmd_mutex
);
3953 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
3954 if (strcmp(cmd
->name
, p
->name
) == 0) {
3956 list_del_init(&p
->list
);
3961 mutex_unlock(&ftrace_cmd_mutex
);
3966 static int ftrace_process_regex(struct ftrace_hash
*hash
,
3967 char *buff
, int len
, int enable
)
3969 char *func
, *command
, *next
= buff
;
3970 struct ftrace_func_command
*p
;
3973 func
= strsep(&next
, ":");
3976 ret
= ftrace_match_records(hash
, func
, len
);
3986 command
= strsep(&next
, ":");
3988 mutex_lock(&ftrace_cmd_mutex
);
3989 list_for_each_entry(p
, &ftrace_commands
, list
) {
3990 if (strcmp(p
->name
, command
) == 0) {
3991 ret
= p
->func(hash
, func
, command
, next
, enable
);
3996 mutex_unlock(&ftrace_cmd_mutex
);
4002 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
4003 size_t cnt
, loff_t
*ppos
, int enable
)
4005 struct ftrace_iterator
*iter
;
4006 struct trace_parser
*parser
;
4012 if (file
->f_mode
& FMODE_READ
) {
4013 struct seq_file
*m
= file
->private_data
;
4016 iter
= file
->private_data
;
4018 if (unlikely(ftrace_disabled
))
4021 /* iter->hash is a local copy, so we don't need regex_lock */
4023 parser
= &iter
->parser
;
4024 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
4026 if (read
>= 0 && trace_parser_loaded(parser
) &&
4027 !trace_parser_cont(parser
)) {
4028 ret
= ftrace_process_regex(iter
->hash
, parser
->buffer
,
4029 parser
->idx
, enable
);
4030 trace_parser_clear(parser
);
4041 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
4042 size_t cnt
, loff_t
*ppos
)
4044 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
4048 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
4049 size_t cnt
, loff_t
*ppos
)
4051 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
4055 ftrace_match_addr(struct ftrace_hash
*hash
, unsigned long ip
, int remove
)
4057 struct ftrace_func_entry
*entry
;
4059 if (!ftrace_location(ip
))
4063 entry
= ftrace_lookup_ip(hash
, ip
);
4066 free_hash_entry(hash
, entry
);
4070 return add_hash_entry(hash
, ip
);
4073 static void ftrace_ops_update_code(struct ftrace_ops
*ops
,
4074 struct ftrace_ops_hash
*old_hash
)
4076 struct ftrace_ops
*op
;
4078 if (!ftrace_enabled
)
4081 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
4082 ftrace_run_modify_code(ops
, FTRACE_UPDATE_CALLS
, old_hash
);
4087 * If this is the shared global_ops filter, then we need to
4088 * check if there is another ops that shares it, is enabled.
4089 * If so, we still need to run the modify code.
4091 if (ops
->func_hash
!= &global_ops
.local_hash
)
4094 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
4095 if (op
->func_hash
== &global_ops
.local_hash
&&
4096 op
->flags
& FTRACE_OPS_FL_ENABLED
) {
4097 ftrace_run_modify_code(op
, FTRACE_UPDATE_CALLS
, old_hash
);
4098 /* Only need to do this once */
4101 } while_for_each_ftrace_op(op
);
4105 ftrace_set_hash(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4106 unsigned long ip
, int remove
, int reset
, int enable
)
4108 struct ftrace_hash
**orig_hash
;
4109 struct ftrace_ops_hash old_hash_ops
;
4110 struct ftrace_hash
*old_hash
;
4111 struct ftrace_hash
*hash
;
4114 if (unlikely(ftrace_disabled
))
4117 mutex_lock(&ops
->func_hash
->regex_lock
);
4120 orig_hash
= &ops
->func_hash
->filter_hash
;
4122 orig_hash
= &ops
->func_hash
->notrace_hash
;
4125 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
4127 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
4131 goto out_regex_unlock
;
4134 if (buf
&& !ftrace_match_records(hash
, buf
, len
)) {
4136 goto out_regex_unlock
;
4139 ret
= ftrace_match_addr(hash
, ip
, remove
);
4141 goto out_regex_unlock
;
4144 mutex_lock(&ftrace_lock
);
4145 old_hash
= *orig_hash
;
4146 old_hash_ops
.filter_hash
= ops
->func_hash
->filter_hash
;
4147 old_hash_ops
.notrace_hash
= ops
->func_hash
->notrace_hash
;
4148 ret
= ftrace_hash_move(ops
, enable
, orig_hash
, hash
);
4150 ftrace_ops_update_code(ops
, &old_hash_ops
);
4151 free_ftrace_hash_rcu(old_hash
);
4153 mutex_unlock(&ftrace_lock
);
4156 mutex_unlock(&ops
->func_hash
->regex_lock
);
4158 free_ftrace_hash(hash
);
4163 ftrace_set_addr(struct ftrace_ops
*ops
, unsigned long ip
, int remove
,
4164 int reset
, int enable
)
4166 return ftrace_set_hash(ops
, 0, 0, ip
, remove
, reset
, enable
);
4170 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4171 * @ops - the ops to set the filter with
4172 * @ip - the address to add to or remove from the filter.
4173 * @remove - non zero to remove the ip from the filter
4174 * @reset - non zero to reset all filters before applying this filter.
4176 * Filters denote which functions should be enabled when tracing is enabled
4177 * If @ip is NULL, it failes to update filter.
4179 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
4180 int remove
, int reset
)
4182 ftrace_ops_init(ops
);
4183 return ftrace_set_addr(ops
, ip
, remove
, reset
, 1);
4185 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip
);
4188 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4189 int reset
, int enable
)
4191 return ftrace_set_hash(ops
, buf
, len
, 0, 0, reset
, enable
);
4195 * ftrace_set_filter - set a function to filter on in ftrace
4196 * @ops - the ops to set the filter with
4197 * @buf - the string that holds the function filter text.
4198 * @len - the length of the string.
4199 * @reset - non zero to reset all filters before applying this filter.
4201 * Filters denote which functions should be enabled when tracing is enabled.
4202 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4204 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
4207 ftrace_ops_init(ops
);
4208 return ftrace_set_regex(ops
, buf
, len
, reset
, 1);
4210 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
4213 * ftrace_set_notrace - set a function to not trace in ftrace
4214 * @ops - the ops to set the notrace filter with
4215 * @buf - the string that holds the function notrace text.
4216 * @len - the length of the string.
4217 * @reset - non zero to reset all filters before applying this filter.
4219 * Notrace Filters denote which functions should not be enabled when tracing
4220 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4223 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
4226 ftrace_ops_init(ops
);
4227 return ftrace_set_regex(ops
, buf
, len
, reset
, 0);
4229 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
4231 * ftrace_set_global_filter - set a function to filter on with global tracers
4232 * @buf - the string that holds the function filter text.
4233 * @len - the length of the string.
4234 * @reset - non zero to reset all filters before applying this filter.
4236 * Filters denote which functions should be enabled when tracing is enabled.
4237 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4239 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
4241 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
4243 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
4246 * ftrace_set_global_notrace - set a function to not trace with global tracers
4247 * @buf - the string that holds the function notrace text.
4248 * @len - the length of the string.
4249 * @reset - non zero to reset all filters before applying this filter.
4251 * Notrace Filters denote which functions should not be enabled when tracing
4252 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4255 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
4257 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
4259 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
4262 * command line interface to allow users to set filters on boot up.
4264 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4265 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4266 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4268 /* Used by function selftest to not test if filter is set */
4269 bool ftrace_filter_param __initdata
;
4271 static int __init
set_ftrace_notrace(char *str
)
4273 ftrace_filter_param
= true;
4274 strlcpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
4277 __setup("ftrace_notrace=", set_ftrace_notrace
);
4279 static int __init
set_ftrace_filter(char *str
)
4281 ftrace_filter_param
= true;
4282 strlcpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
4285 __setup("ftrace_filter=", set_ftrace_filter
);
4287 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4288 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4289 static char ftrace_graph_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4290 static int ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
);
4292 static unsigned long save_global_trampoline
;
4293 static unsigned long save_global_flags
;
4295 static int __init
set_graph_function(char *str
)
4297 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
4300 __setup("ftrace_graph_filter=", set_graph_function
);
4302 static int __init
set_graph_notrace_function(char *str
)
4304 strlcpy(ftrace_graph_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
4307 __setup("ftrace_graph_notrace=", set_graph_notrace_function
);
4309 static void __init
set_ftrace_early_graph(char *buf
, int enable
)
4313 unsigned long *table
= ftrace_graph_funcs
;
4314 int *count
= &ftrace_graph_count
;
4317 table
= ftrace_graph_notrace_funcs
;
4318 count
= &ftrace_graph_notrace_count
;
4322 func
= strsep(&buf
, ",");
4323 /* we allow only one expression at a time */
4324 ret
= ftrace_set_func(table
, count
, FTRACE_GRAPH_MAX_FUNCS
, func
);
4326 printk(KERN_DEBUG
"ftrace: function %s not "
4327 "traceable\n", func
);
4330 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4333 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
4337 ftrace_ops_init(ops
);
4340 func
= strsep(&buf
, ",");
4341 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
4345 static void __init
set_ftrace_early_filters(void)
4347 if (ftrace_filter_buf
[0])
4348 ftrace_set_early_filter(&global_ops
, ftrace_filter_buf
, 1);
4349 if (ftrace_notrace_buf
[0])
4350 ftrace_set_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
4351 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4352 if (ftrace_graph_buf
[0])
4353 set_ftrace_early_graph(ftrace_graph_buf
, 1);
4354 if (ftrace_graph_notrace_buf
[0])
4355 set_ftrace_early_graph(ftrace_graph_notrace_buf
, 0);
4356 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4359 int ftrace_regex_release(struct inode
*inode
, struct file
*file
)
4361 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
4362 struct ftrace_ops_hash old_hash_ops
;
4363 struct ftrace_iterator
*iter
;
4364 struct ftrace_hash
**orig_hash
;
4365 struct ftrace_hash
*old_hash
;
4366 struct trace_parser
*parser
;
4370 if (file
->f_mode
& FMODE_READ
) {
4372 seq_release(inode
, file
);
4374 iter
= file
->private_data
;
4376 parser
= &iter
->parser
;
4377 if (trace_parser_loaded(parser
)) {
4378 parser
->buffer
[parser
->idx
] = 0;
4379 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
4382 trace_parser_put(parser
);
4384 mutex_lock(&iter
->ops
->func_hash
->regex_lock
);
4386 if (file
->f_mode
& FMODE_WRITE
) {
4387 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
4390 orig_hash
= &iter
->ops
->func_hash
->filter_hash
;
4392 orig_hash
= &iter
->ops
->func_hash
->notrace_hash
;
4394 mutex_lock(&ftrace_lock
);
4395 old_hash
= *orig_hash
;
4396 old_hash_ops
.filter_hash
= iter
->ops
->func_hash
->filter_hash
;
4397 old_hash_ops
.notrace_hash
= iter
->ops
->func_hash
->notrace_hash
;
4398 ret
= ftrace_hash_move(iter
->ops
, filter_hash
,
4399 orig_hash
, iter
->hash
);
4401 ftrace_ops_update_code(iter
->ops
, &old_hash_ops
);
4402 free_ftrace_hash_rcu(old_hash
);
4404 mutex_unlock(&ftrace_lock
);
4407 mutex_unlock(&iter
->ops
->func_hash
->regex_lock
);
4408 free_ftrace_hash(iter
->hash
);
4414 static const struct file_operations ftrace_avail_fops
= {
4415 .open
= ftrace_avail_open
,
4417 .llseek
= seq_lseek
,
4418 .release
= seq_release_private
,
4421 static const struct file_operations ftrace_enabled_fops
= {
4422 .open
= ftrace_enabled_open
,
4424 .llseek
= seq_lseek
,
4425 .release
= seq_release_private
,
4428 static const struct file_operations ftrace_filter_fops
= {
4429 .open
= ftrace_filter_open
,
4431 .write
= ftrace_filter_write
,
4432 .llseek
= tracing_lseek
,
4433 .release
= ftrace_regex_release
,
4436 static const struct file_operations ftrace_notrace_fops
= {
4437 .open
= ftrace_notrace_open
,
4439 .write
= ftrace_notrace_write
,
4440 .llseek
= tracing_lseek
,
4441 .release
= ftrace_regex_release
,
4444 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4446 static DEFINE_MUTEX(graph_lock
);
4448 int ftrace_graph_count
;
4449 int ftrace_graph_notrace_count
;
4450 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4451 unsigned long ftrace_graph_notrace_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4453 struct ftrace_graph_data
{
4454 unsigned long *table
;
4457 const struct seq_operations
*seq_ops
;
4461 __g_next(struct seq_file
*m
, loff_t
*pos
)
4463 struct ftrace_graph_data
*fgd
= m
->private;
4465 if (*pos
>= *fgd
->count
)
4467 return &fgd
->table
[*pos
];
4471 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4474 return __g_next(m
, pos
);
4477 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
4479 struct ftrace_graph_data
*fgd
= m
->private;
4481 mutex_lock(&graph_lock
);
4483 /* Nothing, tell g_show to print all functions are enabled */
4484 if (!*fgd
->count
&& !*pos
)
4487 return __g_next(m
, pos
);
4490 static void g_stop(struct seq_file
*m
, void *p
)
4492 mutex_unlock(&graph_lock
);
4495 static int g_show(struct seq_file
*m
, void *v
)
4497 unsigned long *ptr
= v
;
4502 if (ptr
== (unsigned long *)1) {
4503 struct ftrace_graph_data
*fgd
= m
->private;
4505 if (fgd
->table
== ftrace_graph_funcs
)
4506 seq_puts(m
, "#### all functions enabled ####\n");
4508 seq_puts(m
, "#### no functions disabled ####\n");
4512 seq_printf(m
, "%ps\n", (void *)*ptr
);
4517 static const struct seq_operations ftrace_graph_seq_ops
= {
4525 __ftrace_graph_open(struct inode
*inode
, struct file
*file
,
4526 struct ftrace_graph_data
*fgd
)
4530 mutex_lock(&graph_lock
);
4531 if ((file
->f_mode
& FMODE_WRITE
) &&
4532 (file
->f_flags
& O_TRUNC
)) {
4534 memset(fgd
->table
, 0, fgd
->size
* sizeof(*fgd
->table
));
4536 mutex_unlock(&graph_lock
);
4538 if (file
->f_mode
& FMODE_READ
) {
4539 ret
= seq_open(file
, fgd
->seq_ops
);
4541 struct seq_file
*m
= file
->private_data
;
4545 file
->private_data
= fgd
;
4551 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
4553 struct ftrace_graph_data
*fgd
;
4555 if (unlikely(ftrace_disabled
))
4558 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4562 fgd
->table
= ftrace_graph_funcs
;
4563 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4564 fgd
->count
= &ftrace_graph_count
;
4565 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4567 return __ftrace_graph_open(inode
, file
, fgd
);
4571 ftrace_graph_notrace_open(struct inode
*inode
, struct file
*file
)
4573 struct ftrace_graph_data
*fgd
;
4575 if (unlikely(ftrace_disabled
))
4578 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4582 fgd
->table
= ftrace_graph_notrace_funcs
;
4583 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4584 fgd
->count
= &ftrace_graph_notrace_count
;
4585 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4587 return __ftrace_graph_open(inode
, file
, fgd
);
4591 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
4593 if (file
->f_mode
& FMODE_READ
) {
4594 struct seq_file
*m
= file
->private_data
;
4597 seq_release(inode
, file
);
4599 kfree(file
->private_data
);
4606 ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
)
4608 struct dyn_ftrace
*rec
;
4609 struct ftrace_page
*pg
;
4618 type
= filter_parse_regex(buffer
, strlen(buffer
), &search
, ¬);
4619 if (!not && *idx
>= size
)
4622 search_len
= strlen(search
);
4624 mutex_lock(&ftrace_lock
);
4626 if (unlikely(ftrace_disabled
)) {
4627 mutex_unlock(&ftrace_lock
);
4631 do_for_each_ftrace_rec(pg
, rec
) {
4633 if (ftrace_match_record(rec
, NULL
, search
, search_len
, type
)) {
4634 /* if it is in the array */
4636 for (i
= 0; i
< *idx
; i
++) {
4637 if (array
[i
] == rec
->ip
) {
4646 array
[(*idx
)++] = rec
->ip
;
4652 array
[i
] = array
[--(*idx
)];
4658 } while_for_each_ftrace_rec();
4660 mutex_unlock(&ftrace_lock
);
4669 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
4670 size_t cnt
, loff_t
*ppos
)
4672 struct trace_parser parser
;
4673 ssize_t read
, ret
= 0;
4674 struct ftrace_graph_data
*fgd
= file
->private_data
;
4679 if (trace_parser_get_init(&parser
, FTRACE_BUFF_MAX
))
4682 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
4684 if (read
>= 0 && trace_parser_loaded((&parser
))) {
4685 parser
.buffer
[parser
.idx
] = 0;
4687 mutex_lock(&graph_lock
);
4689 /* we allow only one expression at a time */
4690 ret
= ftrace_set_func(fgd
->table
, fgd
->count
, fgd
->size
,
4693 mutex_unlock(&graph_lock
);
4699 trace_parser_put(&parser
);
4704 static const struct file_operations ftrace_graph_fops
= {
4705 .open
= ftrace_graph_open
,
4707 .write
= ftrace_graph_write
,
4708 .llseek
= tracing_lseek
,
4709 .release
= ftrace_graph_release
,
4712 static const struct file_operations ftrace_graph_notrace_fops
= {
4713 .open
= ftrace_graph_notrace_open
,
4715 .write
= ftrace_graph_write
,
4716 .llseek
= tracing_lseek
,
4717 .release
= ftrace_graph_release
,
4719 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4721 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
4722 struct dentry
*parent
)
4725 trace_create_file("set_ftrace_filter", 0644, parent
,
4726 ops
, &ftrace_filter_fops
);
4728 trace_create_file("set_ftrace_notrace", 0644, parent
,
4729 ops
, &ftrace_notrace_fops
);
4733 * The name "destroy_filter_files" is really a misnomer. Although
4734 * in the future, it may actualy delete the files, but this is
4735 * really intended to make sure the ops passed in are disabled
4736 * and that when this function returns, the caller is free to
4739 * The "destroy" name is only to match the "create" name that this
4740 * should be paired with.
4742 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
)
4744 mutex_lock(&ftrace_lock
);
4745 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
)
4746 ftrace_shutdown(ops
, 0);
4747 ops
->flags
|= FTRACE_OPS_FL_DELETED
;
4748 mutex_unlock(&ftrace_lock
);
4751 static __init
int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
)
4754 trace_create_file("available_filter_functions", 0444,
4755 d_tracer
, NULL
, &ftrace_avail_fops
);
4757 trace_create_file("enabled_functions", 0444,
4758 d_tracer
, NULL
, &ftrace_enabled_fops
);
4760 ftrace_create_filter_files(&global_ops
, d_tracer
);
4762 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4763 trace_create_file("set_graph_function", 0444, d_tracer
,
4765 &ftrace_graph_fops
);
4766 trace_create_file("set_graph_notrace", 0444, d_tracer
,
4768 &ftrace_graph_notrace_fops
);
4769 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4774 static int ftrace_cmp_ips(const void *a
, const void *b
)
4776 const unsigned long *ipa
= a
;
4777 const unsigned long *ipb
= b
;
4786 static void ftrace_swap_ips(void *a
, void *b
, int size
)
4788 unsigned long *ipa
= a
;
4789 unsigned long *ipb
= b
;
4797 static int ftrace_process_locs(struct module
*mod
,
4798 unsigned long *start
,
4801 struct ftrace_page
*start_pg
;
4802 struct ftrace_page
*pg
;
4803 struct dyn_ftrace
*rec
;
4804 unsigned long count
;
4807 unsigned long flags
= 0; /* Shut up gcc */
4810 count
= end
- start
;
4815 sort(start
, count
, sizeof(*start
),
4816 ftrace_cmp_ips
, ftrace_swap_ips
);
4818 start_pg
= ftrace_allocate_pages(count
);
4822 mutex_lock(&ftrace_lock
);
4825 * Core and each module needs their own pages, as
4826 * modules will free them when they are removed.
4827 * Force a new page to be allocated for modules.
4830 WARN_ON(ftrace_pages
|| ftrace_pages_start
);
4831 /* First initialization */
4832 ftrace_pages
= ftrace_pages_start
= start_pg
;
4837 if (WARN_ON(ftrace_pages
->next
)) {
4838 /* Hmm, we have free pages? */
4839 while (ftrace_pages
->next
)
4840 ftrace_pages
= ftrace_pages
->next
;
4843 ftrace_pages
->next
= start_pg
;
4849 addr
= ftrace_call_adjust(*p
++);
4851 * Some architecture linkers will pad between
4852 * the different mcount_loc sections of different
4853 * object files to satisfy alignments.
4854 * Skip any NULL pointers.
4859 if (pg
->index
== pg
->size
) {
4860 /* We should have allocated enough */
4861 if (WARN_ON(!pg
->next
))
4866 rec
= &pg
->records
[pg
->index
++];
4870 /* We should have used all pages */
4873 /* Assign the last page to ftrace_pages */
4877 * We only need to disable interrupts on start up
4878 * because we are modifying code that an interrupt
4879 * may execute, and the modification is not atomic.
4880 * But for modules, nothing runs the code we modify
4881 * until we are finished with it, and there's no
4882 * reason to cause large interrupt latencies while we do it.
4885 local_irq_save(flags
);
4886 ftrace_update_code(mod
, start_pg
);
4888 local_irq_restore(flags
);
4891 mutex_unlock(&ftrace_lock
);
4896 #ifdef CONFIG_MODULES
4898 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4900 void ftrace_release_mod(struct module
*mod
)
4902 struct dyn_ftrace
*rec
;
4903 struct ftrace_page
**last_pg
;
4904 struct ftrace_page
*pg
;
4907 mutex_lock(&ftrace_lock
);
4909 if (ftrace_disabled
)
4913 * Each module has its own ftrace_pages, remove
4914 * them from the list.
4916 last_pg
= &ftrace_pages_start
;
4917 for (pg
= ftrace_pages_start
; pg
; pg
= *last_pg
) {
4918 rec
= &pg
->records
[0];
4919 if (within_module_core(rec
->ip
, mod
)) {
4921 * As core pages are first, the first
4922 * page should never be a module page.
4924 if (WARN_ON(pg
== ftrace_pages_start
))
4927 /* Check if we are deleting the last page */
4928 if (pg
== ftrace_pages
)
4929 ftrace_pages
= next_to_ftrace_page(last_pg
);
4931 *last_pg
= pg
->next
;
4932 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
4933 free_pages((unsigned long)pg
->records
, order
);
4936 last_pg
= &pg
->next
;
4939 mutex_unlock(&ftrace_lock
);
4942 static void ftrace_init_module(struct module
*mod
,
4943 unsigned long *start
, unsigned long *end
)
4945 if (ftrace_disabled
|| start
== end
)
4947 ftrace_process_locs(mod
, start
, end
);
4950 void ftrace_module_init(struct module
*mod
)
4952 ftrace_init_module(mod
, mod
->ftrace_callsites
,
4953 mod
->ftrace_callsites
+
4954 mod
->num_ftrace_callsites
);
4957 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4958 unsigned long val
, void *data
)
4960 struct module
*mod
= data
;
4962 if (val
== MODULE_STATE_GOING
)
4963 ftrace_release_mod(mod
);
4968 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4969 unsigned long val
, void *data
)
4973 #endif /* CONFIG_MODULES */
4975 struct notifier_block ftrace_module_exit_nb
= {
4976 .notifier_call
= ftrace_module_notify_exit
,
4977 .priority
= INT_MIN
, /* Run after anything that can remove kprobes */
4980 void __init
ftrace_init(void)
4982 extern unsigned long __start_mcount_loc
[];
4983 extern unsigned long __stop_mcount_loc
[];
4984 unsigned long count
, flags
;
4987 local_irq_save(flags
);
4988 ret
= ftrace_dyn_arch_init();
4989 local_irq_restore(flags
);
4993 count
= __stop_mcount_loc
- __start_mcount_loc
;
4995 pr_info("ftrace: No functions to be traced?\n");
4999 pr_info("ftrace: allocating %ld entries in %ld pages\n",
5000 count
, count
/ ENTRIES_PER_PAGE
+ 1);
5002 last_ftrace_enabled
= ftrace_enabled
= 1;
5004 ret
= ftrace_process_locs(NULL
,
5008 ret
= register_module_notifier(&ftrace_module_exit_nb
);
5010 pr_warning("Failed to register trace ftrace module exit notifier\n");
5012 set_ftrace_early_filters();
5016 ftrace_disabled
= 1;
5019 /* Do nothing if arch does not support this */
5020 void __weak
arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
5024 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
5028 * Currently there's no safe way to free a trampoline when the kernel
5029 * is configured with PREEMPT. That is because a task could be preempted
5030 * when it jumped to the trampoline, it may be preempted for a long time
5031 * depending on the system load, and currently there's no way to know
5032 * when it will be off the trampoline. If the trampoline is freed
5033 * too early, when the task runs again, it will be executing on freed
5036 #ifdef CONFIG_PREEMPT
5037 /* Currently, only non dynamic ops can have a trampoline */
5038 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
)
5042 arch_ftrace_update_trampoline(ops
);
5047 static struct ftrace_ops global_ops
= {
5048 .func
= ftrace_stub
,
5049 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5050 FTRACE_OPS_FL_INITIALIZED
|
5054 static int __init
ftrace_nodyn_init(void)
5059 core_initcall(ftrace_nodyn_init
);
5061 static inline int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
) { return 0; }
5062 static inline void ftrace_startup_enable(int command
) { }
5063 static inline void ftrace_startup_all(int command
) { }
5064 /* Keep as macros so we do not need to define the commands */
5065 # define ftrace_startup(ops, command) \
5067 int ___ret = __register_ftrace_function(ops); \
5069 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
5072 # define ftrace_shutdown(ops, command) \
5074 int ___ret = __unregister_ftrace_function(ops); \
5076 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
5080 # define ftrace_startup_sysctl() do { } while (0)
5081 # define ftrace_shutdown_sysctl() do { } while (0)
5084 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
5089 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
5093 #endif /* CONFIG_DYNAMIC_FTRACE */
5095 __init
void ftrace_init_global_array_ops(struct trace_array
*tr
)
5097 tr
->ops
= &global_ops
;
5098 tr
->ops
->private = tr
;
5101 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
)
5103 /* If we filter on pids, update to use the pid function */
5104 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
5105 if (WARN_ON(tr
->ops
->func
!= ftrace_stub
))
5106 printk("ftrace ops had %pS for function\n",
5109 tr
->ops
->func
= func
;
5110 tr
->ops
->private = tr
;
5113 void ftrace_reset_array_ops(struct trace_array
*tr
)
5115 tr
->ops
->func
= ftrace_stub
;
5119 ftrace_ops_control_func(unsigned long ip
, unsigned long parent_ip
,
5120 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5122 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT
)))
5126 * Some of the ops may be dynamically allocated,
5127 * they must be freed after a synchronize_sched().
5129 preempt_disable_notrace();
5130 trace_recursion_set(TRACE_CONTROL_BIT
);
5133 * Control funcs (perf) uses RCU. Only trace if
5134 * RCU is currently active.
5136 if (!rcu_is_watching())
5139 do_for_each_ftrace_op(op
, ftrace_control_list
) {
5140 if (!(op
->flags
& FTRACE_OPS_FL_STUB
) &&
5141 !ftrace_function_local_disabled(op
) &&
5142 ftrace_ops_test(op
, ip
, regs
))
5143 op
->func(ip
, parent_ip
, op
, regs
);
5144 } while_for_each_ftrace_op(op
);
5146 trace_recursion_clear(TRACE_CONTROL_BIT
);
5147 preempt_enable_notrace();
5150 static struct ftrace_ops control_ops
= {
5151 .func
= ftrace_ops_control_func
,
5152 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
5153 INIT_OPS_HASH(control_ops
)
5157 __ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
5158 struct ftrace_ops
*ignored
, struct pt_regs
*regs
)
5160 struct ftrace_ops
*op
;
5163 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
5168 * Some of the ops may be dynamically allocated,
5169 * they must be freed after a synchronize_sched().
5171 preempt_disable_notrace();
5172 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5173 if (ftrace_ops_test(op
, ip
, regs
)) {
5174 if (FTRACE_WARN_ON(!op
->func
)) {
5175 pr_warn("op=%p %pS\n", op
, op
);
5178 op
->func(ip
, parent_ip
, op
, regs
);
5180 } while_for_each_ftrace_op(op
);
5182 preempt_enable_notrace();
5183 trace_clear_recursion(bit
);
5187 * Some archs only support passing ip and parent_ip. Even though
5188 * the list function ignores the op parameter, we do not want any
5189 * C side effects, where a function is called without the caller
5190 * sending a third parameter.
5191 * Archs are to support both the regs and ftrace_ops at the same time.
5192 * If they support ftrace_ops, it is assumed they support regs.
5193 * If call backs want to use regs, they must either check for regs
5194 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5195 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5196 * An architecture can pass partial regs with ftrace_ops and still
5197 * set the ARCH_SUPPORT_FTARCE_OPS.
5199 #if ARCH_SUPPORTS_FTRACE_OPS
5200 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
5201 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5203 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, regs
);
5206 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
)
5208 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, NULL
);
5213 * If there's only one function registered but it does not support
5214 * recursion, this function will be called by the mcount trampoline.
5215 * This function will handle recursion protection.
5217 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
5218 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5222 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
5226 op
->func(ip
, parent_ip
, op
, regs
);
5228 trace_clear_recursion(bit
);
5232 * ftrace_ops_get_func - get the function a trampoline should call
5233 * @ops: the ops to get the function for
5235 * Normally the mcount trampoline will call the ops->func, but there
5236 * are times that it should not. For example, if the ops does not
5237 * have its own recursion protection, then it should call the
5238 * ftrace_ops_recurs_func() instead.
5240 * Returns the function that the trampoline should call for @ops.
5242 ftrace_func_t
ftrace_ops_get_func(struct ftrace_ops
*ops
)
5245 * If the func handles its own recursion, call it directly.
5246 * Otherwise call the recursion protected function that
5247 * will call the ftrace ops function.
5249 if (!(ops
->flags
& FTRACE_OPS_FL_RECURSION_SAFE
))
5250 return ftrace_ops_recurs_func
;
5255 static void clear_ftrace_swapper(void)
5257 struct task_struct
*p
;
5261 for_each_online_cpu(cpu
) {
5263 clear_tsk_trace_trace(p
);
5268 static void set_ftrace_swapper(void)
5270 struct task_struct
*p
;
5274 for_each_online_cpu(cpu
) {
5276 set_tsk_trace_trace(p
);
5281 static void clear_ftrace_pid(struct pid
*pid
)
5283 struct task_struct
*p
;
5286 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
5287 clear_tsk_trace_trace(p
);
5288 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
5294 static void set_ftrace_pid(struct pid
*pid
)
5296 struct task_struct
*p
;
5299 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
5300 set_tsk_trace_trace(p
);
5301 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
5305 static void clear_ftrace_pid_task(struct pid
*pid
)
5307 if (pid
== ftrace_swapper_pid
)
5308 clear_ftrace_swapper();
5310 clear_ftrace_pid(pid
);
5313 static void set_ftrace_pid_task(struct pid
*pid
)
5315 if (pid
== ftrace_swapper_pid
)
5316 set_ftrace_swapper();
5318 set_ftrace_pid(pid
);
5321 static int ftrace_pid_add(int p
)
5324 struct ftrace_pid
*fpid
;
5327 mutex_lock(&ftrace_lock
);
5330 pid
= ftrace_swapper_pid
;
5332 pid
= find_get_pid(p
);
5339 list_for_each_entry(fpid
, &ftrace_pids
, list
)
5340 if (fpid
->pid
== pid
)
5345 fpid
= kmalloc(sizeof(*fpid
), GFP_KERNEL
);
5349 list_add(&fpid
->list
, &ftrace_pids
);
5352 set_ftrace_pid_task(pid
);
5354 ftrace_update_pid_func();
5356 ftrace_startup_all(0);
5358 mutex_unlock(&ftrace_lock
);
5362 if (pid
!= ftrace_swapper_pid
)
5366 mutex_unlock(&ftrace_lock
);
5370 static void ftrace_pid_reset(void)
5372 struct ftrace_pid
*fpid
, *safe
;
5374 mutex_lock(&ftrace_lock
);
5375 list_for_each_entry_safe(fpid
, safe
, &ftrace_pids
, list
) {
5376 struct pid
*pid
= fpid
->pid
;
5378 clear_ftrace_pid_task(pid
);
5380 list_del(&fpid
->list
);
5384 ftrace_update_pid_func();
5385 ftrace_startup_all(0);
5387 mutex_unlock(&ftrace_lock
);
5390 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
5392 mutex_lock(&ftrace_lock
);
5394 if (!ftrace_pids_enabled() && (!*pos
))
5397 return seq_list_start(&ftrace_pids
, *pos
);
5400 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5405 return seq_list_next(v
, &ftrace_pids
, pos
);
5408 static void fpid_stop(struct seq_file
*m
, void *p
)
5410 mutex_unlock(&ftrace_lock
);
5413 static int fpid_show(struct seq_file
*m
, void *v
)
5415 const struct ftrace_pid
*fpid
= list_entry(v
, struct ftrace_pid
, list
);
5417 if (v
== (void *)1) {
5418 seq_puts(m
, "no pid\n");
5422 if (fpid
->pid
== ftrace_swapper_pid
)
5423 seq_puts(m
, "swapper tasks\n");
5425 seq_printf(m
, "%u\n", pid_vnr(fpid
->pid
));
5430 static const struct seq_operations ftrace_pid_sops
= {
5431 .start
= fpid_start
,
5438 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
5442 if ((file
->f_mode
& FMODE_WRITE
) &&
5443 (file
->f_flags
& O_TRUNC
))
5446 if (file
->f_mode
& FMODE_READ
)
5447 ret
= seq_open(file
, &ftrace_pid_sops
);
5453 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
5454 size_t cnt
, loff_t
*ppos
)
5460 if (cnt
>= sizeof(buf
))
5463 if (copy_from_user(&buf
, ubuf
, cnt
))
5469 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5470 * to clean the filter quietly.
5472 tmp
= strstrip(buf
);
5473 if (strlen(tmp
) == 0)
5476 ret
= kstrtol(tmp
, 10, &val
);
5480 ret
= ftrace_pid_add(val
);
5482 return ret
? ret
: cnt
;
5486 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
5488 if (file
->f_mode
& FMODE_READ
)
5489 seq_release(inode
, file
);
5494 static const struct file_operations ftrace_pid_fops
= {
5495 .open
= ftrace_pid_open
,
5496 .write
= ftrace_pid_write
,
5498 .llseek
= tracing_lseek
,
5499 .release
= ftrace_pid_release
,
5502 static __init
int ftrace_init_tracefs(void)
5504 struct dentry
*d_tracer
;
5506 d_tracer
= tracing_init_dentry();
5507 if (IS_ERR(d_tracer
))
5510 ftrace_init_dyn_tracefs(d_tracer
);
5512 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
5513 NULL
, &ftrace_pid_fops
);
5515 ftrace_profile_tracefs(d_tracer
);
5519 fs_initcall(ftrace_init_tracefs
);
5522 * ftrace_kill - kill ftrace
5524 * This function should be used by panic code. It stops ftrace
5525 * but in a not so nice way. If you need to simply kill ftrace
5526 * from a non-atomic section, use ftrace_kill.
5528 void ftrace_kill(void)
5530 ftrace_disabled
= 1;
5532 clear_ftrace_function();
5536 * Test if ftrace is dead or not.
5538 int ftrace_is_dead(void)
5540 return ftrace_disabled
;
5544 * register_ftrace_function - register a function for profiling
5545 * @ops - ops structure that holds the function for profiling.
5547 * Register a function to be called by all functions in the
5550 * Note: @ops->func and all the functions it calls must be labeled
5551 * with "notrace", otherwise it will go into a
5554 int register_ftrace_function(struct ftrace_ops
*ops
)
5558 ftrace_ops_init(ops
);
5560 mutex_lock(&ftrace_lock
);
5562 ret
= ftrace_startup(ops
, 0);
5564 mutex_unlock(&ftrace_lock
);
5568 EXPORT_SYMBOL_GPL(register_ftrace_function
);
5571 * unregister_ftrace_function - unregister a function for profiling.
5572 * @ops - ops structure that holds the function to unregister
5574 * Unregister a function that was added to be called by ftrace profiling.
5576 int unregister_ftrace_function(struct ftrace_ops
*ops
)
5580 mutex_lock(&ftrace_lock
);
5581 ret
= ftrace_shutdown(ops
, 0);
5582 mutex_unlock(&ftrace_lock
);
5586 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
5589 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
5590 void __user
*buffer
, size_t *lenp
,
5595 mutex_lock(&ftrace_lock
);
5597 if (unlikely(ftrace_disabled
))
5600 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
5602 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
5605 last_ftrace_enabled
= !!ftrace_enabled
;
5607 if (ftrace_enabled
) {
5609 /* we are starting ftrace again */
5610 if (ftrace_ops_list
!= &ftrace_list_end
)
5611 update_ftrace_function();
5613 ftrace_startup_sysctl();
5616 /* stopping ftrace calls (just send to ftrace_stub) */
5617 ftrace_trace_function
= ftrace_stub
;
5619 ftrace_shutdown_sysctl();
5623 mutex_unlock(&ftrace_lock
);
5627 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5629 static struct ftrace_ops graph_ops
= {
5630 .func
= ftrace_stub
,
5631 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5632 FTRACE_OPS_FL_INITIALIZED
|
5635 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5636 .trampoline
= FTRACE_GRAPH_TRAMP_ADDR
,
5637 /* trampoline_size is only needed for dynamically allocated tramps */
5639 ASSIGN_OPS_HASH(graph_ops
, &global_ops
.local_hash
)
5642 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
5647 /* The callbacks that hook a function */
5648 trace_func_graph_ret_t ftrace_graph_return
=
5649 (trace_func_graph_ret_t
)ftrace_stub
;
5650 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
5651 static trace_func_graph_ent_t __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5653 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5654 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
5658 unsigned long flags
;
5659 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
5660 struct task_struct
*g
, *t
;
5662 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
5663 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
5664 * sizeof(struct ftrace_ret_stack
),
5666 if (!ret_stack_list
[i
]) {
5674 read_lock_irqsave(&tasklist_lock
, flags
);
5675 do_each_thread(g
, t
) {
5681 if (t
->ret_stack
== NULL
) {
5682 atomic_set(&t
->tracing_graph_pause
, 0);
5683 atomic_set(&t
->trace_overrun
, 0);
5684 t
->curr_ret_stack
= -1;
5685 /* Make sure the tasks see the -1 first: */
5687 t
->ret_stack
= ret_stack_list
[start
++];
5689 } while_each_thread(g
, t
);
5692 read_unlock_irqrestore(&tasklist_lock
, flags
);
5694 for (i
= start
; i
< end
; i
++)
5695 kfree(ret_stack_list
[i
]);
5700 ftrace_graph_probe_sched_switch(void *ignore
,
5701 struct task_struct
*prev
, struct task_struct
*next
)
5703 unsigned long long timestamp
;
5707 * Does the user want to count the time a function was asleep.
5708 * If so, do not update the time stamps.
5710 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
5713 timestamp
= trace_clock_local();
5715 prev
->ftrace_timestamp
= timestamp
;
5717 /* only process tasks that we timestamped */
5718 if (!next
->ftrace_timestamp
)
5722 * Update all the counters in next to make up for the
5723 * time next was sleeping.
5725 timestamp
-= next
->ftrace_timestamp
;
5727 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
5728 next
->ret_stack
[index
].calltime
+= timestamp
;
5731 /* Allocate a return stack for each task */
5732 static int start_graph_tracing(void)
5734 struct ftrace_ret_stack
**ret_stack_list
;
5737 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
5738 sizeof(struct ftrace_ret_stack
*),
5741 if (!ret_stack_list
)
5744 /* The cpu_boot init_task->ret_stack will never be freed */
5745 for_each_online_cpu(cpu
) {
5746 if (!idle_task(cpu
)->ret_stack
)
5747 ftrace_graph_init_idle_task(idle_task(cpu
), cpu
);
5751 ret
= alloc_retstack_tasklist(ret_stack_list
);
5752 } while (ret
== -EAGAIN
);
5755 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5757 pr_info("ftrace_graph: Couldn't activate tracepoint"
5758 " probe to kernel_sched_switch\n");
5761 kfree(ret_stack_list
);
5766 * Hibernation protection.
5767 * The state of the current task is too much unstable during
5768 * suspend/restore to disk. We want to protect against that.
5771 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
5775 case PM_HIBERNATION_PREPARE
:
5776 pause_graph_tracing();
5779 case PM_POST_HIBERNATION
:
5780 unpause_graph_tracing();
5786 static int ftrace_graph_entry_test(struct ftrace_graph_ent
*trace
)
5788 if (!ftrace_ops_test(&global_ops
, trace
->func
, NULL
))
5790 return __ftrace_graph_entry(trace
);
5794 * The function graph tracer should only trace the functions defined
5795 * by set_ftrace_filter and set_ftrace_notrace. If another function
5796 * tracer ops is registered, the graph tracer requires testing the
5797 * function against the global ops, and not just trace any function
5798 * that any ftrace_ops registered.
5800 static void update_function_graph_func(void)
5802 struct ftrace_ops
*op
;
5803 bool do_test
= false;
5806 * The graph and global ops share the same set of functions
5807 * to test. If any other ops is on the list, then
5808 * the graph tracing needs to test if its the function
5811 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5812 if (op
!= &global_ops
&& op
!= &graph_ops
&&
5813 op
!= &ftrace_list_end
) {
5815 /* in double loop, break out with goto */
5818 } while_for_each_ftrace_op(op
);
5821 ftrace_graph_entry
= ftrace_graph_entry_test
;
5823 ftrace_graph_entry
= __ftrace_graph_entry
;
5826 static struct notifier_block ftrace_suspend_notifier
= {
5827 .notifier_call
= ftrace_suspend_notifier_call
,
5830 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
5831 trace_func_graph_ent_t entryfunc
)
5835 mutex_lock(&ftrace_lock
);
5837 /* we currently allow only one tracer registered at a time */
5838 if (ftrace_graph_active
) {
5843 register_pm_notifier(&ftrace_suspend_notifier
);
5845 ftrace_graph_active
++;
5846 ret
= start_graph_tracing();
5848 ftrace_graph_active
--;
5852 ftrace_graph_return
= retfunc
;
5855 * Update the indirect function to the entryfunc, and the
5856 * function that gets called to the entry_test first. Then
5857 * call the update fgraph entry function to determine if
5858 * the entryfunc should be called directly or not.
5860 __ftrace_graph_entry
= entryfunc
;
5861 ftrace_graph_entry
= ftrace_graph_entry_test
;
5862 update_function_graph_func();
5864 ret
= ftrace_startup(&graph_ops
, FTRACE_START_FUNC_RET
);
5866 mutex_unlock(&ftrace_lock
);
5870 void unregister_ftrace_graph(void)
5872 mutex_lock(&ftrace_lock
);
5874 if (unlikely(!ftrace_graph_active
))
5877 ftrace_graph_active
--;
5878 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
5879 ftrace_graph_entry
= ftrace_graph_entry_stub
;
5880 __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5881 ftrace_shutdown(&graph_ops
, FTRACE_STOP_FUNC_RET
);
5882 unregister_pm_notifier(&ftrace_suspend_notifier
);
5883 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5885 #ifdef CONFIG_DYNAMIC_FTRACE
5887 * Function graph does not allocate the trampoline, but
5888 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5891 global_ops
.trampoline
= save_global_trampoline
;
5892 if (save_global_flags
& FTRACE_OPS_FL_ALLOC_TRAMP
)
5893 global_ops
.flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
5897 mutex_unlock(&ftrace_lock
);
5900 static DEFINE_PER_CPU(struct ftrace_ret_stack
*, idle_ret_stack
);
5903 graph_init_task(struct task_struct
*t
, struct ftrace_ret_stack
*ret_stack
)
5905 atomic_set(&t
->tracing_graph_pause
, 0);
5906 atomic_set(&t
->trace_overrun
, 0);
5907 t
->ftrace_timestamp
= 0;
5908 /* make curr_ret_stack visible before we add the ret_stack */
5910 t
->ret_stack
= ret_stack
;
5914 * Allocate a return stack for the idle task. May be the first
5915 * time through, or it may be done by CPU hotplug online.
5917 void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
)
5919 t
->curr_ret_stack
= -1;
5921 * The idle task has no parent, it either has its own
5922 * stack or no stack at all.
5925 WARN_ON(t
->ret_stack
!= per_cpu(idle_ret_stack
, cpu
));
5927 if (ftrace_graph_active
) {
5928 struct ftrace_ret_stack
*ret_stack
;
5930 ret_stack
= per_cpu(idle_ret_stack
, cpu
);
5932 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5933 * sizeof(struct ftrace_ret_stack
),
5937 per_cpu(idle_ret_stack
, cpu
) = ret_stack
;
5939 graph_init_task(t
, ret_stack
);
5943 /* Allocate a return stack for newly created task */
5944 void ftrace_graph_init_task(struct task_struct
*t
)
5946 /* Make sure we do not use the parent ret_stack */
5947 t
->ret_stack
= NULL
;
5948 t
->curr_ret_stack
= -1;
5950 if (ftrace_graph_active
) {
5951 struct ftrace_ret_stack
*ret_stack
;
5953 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5954 * sizeof(struct ftrace_ret_stack
),
5958 graph_init_task(t
, ret_stack
);
5962 void ftrace_graph_exit_task(struct task_struct
*t
)
5964 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
5966 t
->ret_stack
= NULL
;
5967 /* NULL must become visible to IRQs before we free it: */