2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 #define FTRACE_WARN_ON(cond) \
41 #define FTRACE_WARN_ON_ONCE(cond) \
43 if (WARN_ON_ONCE(cond)) \
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly
;
49 static int last_ftrace_enabled
;
51 /* set when tracing only a pid */
52 struct pid
*ftrace_pid_trace
;
53 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
55 /* Quick disabling of function tracer. */
56 int function_trace_stop
;
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
62 static int ftrace_disabled __read_mostly
;
64 static DEFINE_SPINLOCK(ftrace_lock
);
65 static DEFINE_MUTEX(ftrace_sysctl_lock
);
66 static DEFINE_MUTEX(ftrace_start_lock
);
68 static struct ftrace_ops ftrace_list_end __read_mostly
=
73 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
74 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
75 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
76 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
78 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
80 struct ftrace_ops
*op
= ftrace_list
;
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
85 while (op
!= &ftrace_list_end
) {
87 read_barrier_depends();
88 op
->func(ip
, parent_ip
);
93 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
95 if (!test_tsk_trace_trace(current
))
98 ftrace_pid_function(ip
, parent_ip
);
101 static void set_ftrace_pid_function(ftrace_func_t func
)
103 /* do not set ftrace_pid_function to itself! */
104 if (func
!= ftrace_pid_func
)
105 ftrace_pid_function
= func
;
109 * clear_ftrace_function - reset the ftrace function
111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
114 void clear_ftrace_function(void)
116 ftrace_trace_function
= ftrace_stub
;
117 __ftrace_trace_function
= ftrace_stub
;
118 ftrace_pid_function
= ftrace_stub
;
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
126 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
128 if (function_trace_stop
)
131 __ftrace_trace_function(ip
, parent_ip
);
135 static int __register_ftrace_function(struct ftrace_ops
*ops
)
137 /* should not be called from interrupt context */
138 spin_lock(&ftrace_lock
);
140 ops
->next
= ftrace_list
;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
150 if (ftrace_enabled
) {
153 if (ops
->next
== &ftrace_list_end
)
156 func
= ftrace_list_func
;
158 if (ftrace_pid_trace
) {
159 set_ftrace_pid_function(func
);
160 func
= ftrace_pid_func
;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function
= func
;
170 __ftrace_trace_function
= func
;
171 ftrace_trace_function
= ftrace_test_stop_func
;
175 spin_unlock(&ftrace_lock
);
180 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
182 struct ftrace_ops
**p
;
185 /* should not be called from interrupt context */
186 spin_lock(&ftrace_lock
);
189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
192 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
193 ftrace_trace_function
= ftrace_stub
;
194 ftrace_list
= &ftrace_list_end
;
198 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
209 if (ftrace_enabled
) {
210 /* If we only have one func left, then call that directly */
211 if (ftrace_list
->next
== &ftrace_list_end
) {
212 ftrace_func_t func
= ftrace_list
->func
;
214 if (ftrace_pid_trace
) {
215 set_ftrace_pid_function(func
);
216 func
= ftrace_pid_func
;
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function
= func
;
221 __ftrace_trace_function
= func
;
227 spin_unlock(&ftrace_lock
);
232 static void ftrace_update_pid_func(void)
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock
);
239 if (ftrace_trace_function
== ftrace_stub
)
242 func
= ftrace_trace_function
;
244 if (ftrace_pid_trace
) {
245 set_ftrace_pid_function(func
);
246 func
= ftrace_pid_func
;
248 if (func
== ftrace_pid_func
)
249 func
= ftrace_pid_function
;
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function
= func
;
255 __ftrace_trace_function
= func
;
259 spin_unlock(&ftrace_lock
);
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
273 static unsigned long mcount_addr
= MCOUNT_ADDR
;
276 FTRACE_ENABLE_CALLS
= (1 << 0),
277 FTRACE_DISABLE_CALLS
= (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
279 FTRACE_ENABLE_MCOUNT
= (1 << 3),
280 FTRACE_DISABLE_MCOUNT
= (1 << 4),
281 FTRACE_START_FUNC_RET
= (1 << 5),
282 FTRACE_STOP_FUNC_RET
= (1 << 6),
285 static int ftrace_filtered
;
287 static LIST_HEAD(ftrace_new_addrs
);
289 static DEFINE_MUTEX(ftrace_regex_lock
);
292 struct ftrace_page
*next
;
294 struct dyn_ftrace records
[];
297 #define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
300 /* estimate from running different kernels */
301 #define NR_TO_INIT 10000
303 static struct ftrace_page
*ftrace_pages_start
;
304 static struct ftrace_page
*ftrace_pages
;
306 static struct dyn_ftrace
*ftrace_free_records
;
309 #ifdef CONFIG_KPROBES
311 static int frozen_record_count
;
313 static inline void freeze_record(struct dyn_ftrace
*rec
)
315 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
316 rec
->flags
|= FTRACE_FL_FROZEN
;
317 frozen_record_count
++;
321 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
323 if (rec
->flags
& FTRACE_FL_FROZEN
) {
324 rec
->flags
&= ~FTRACE_FL_FROZEN
;
325 frozen_record_count
--;
329 static inline int record_frozen(struct dyn_ftrace
*rec
)
331 return rec
->flags
& FTRACE_FL_FROZEN
;
334 # define freeze_record(rec) ({ 0; })
335 # define unfreeze_record(rec) ({ 0; })
336 # define record_frozen(rec) ({ 0; })
337 #endif /* CONFIG_KPROBES */
339 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
341 rec
->ip
= (unsigned long)ftrace_free_records
;
342 ftrace_free_records
= rec
;
343 rec
->flags
|= FTRACE_FL_FREE
;
346 void ftrace_release(void *start
, unsigned long size
)
348 struct dyn_ftrace
*rec
;
349 struct ftrace_page
*pg
;
350 unsigned long s
= (unsigned long)start
;
351 unsigned long e
= s
+ size
;
354 if (ftrace_disabled
|| !start
)
357 /* should not be called from interrupt context */
358 spin_lock(&ftrace_lock
);
360 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
361 for (i
= 0; i
< pg
->index
; i
++) {
362 rec
= &pg
->records
[i
];
364 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
365 ftrace_free_rec(rec
);
368 spin_unlock(&ftrace_lock
);
371 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
373 struct dyn_ftrace
*rec
;
375 /* First check for freed records */
376 if (ftrace_free_records
) {
377 rec
= ftrace_free_records
;
379 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
380 FTRACE_WARN_ON_ONCE(1);
381 ftrace_free_records
= NULL
;
385 ftrace_free_records
= (void *)rec
->ip
;
386 memset(rec
, 0, sizeof(*rec
));
390 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
391 if (!ftrace_pages
->next
) {
392 /* allocate another page */
394 (void *)get_zeroed_page(GFP_KERNEL
);
395 if (!ftrace_pages
->next
)
398 ftrace_pages
= ftrace_pages
->next
;
401 return &ftrace_pages
->records
[ftrace_pages
->index
++];
404 static struct dyn_ftrace
*
405 ftrace_record_ip(unsigned long ip
)
407 struct dyn_ftrace
*rec
;
412 rec
= ftrace_alloc_dyn_node(ip
);
418 list_add(&rec
->list
, &ftrace_new_addrs
);
423 static void print_ip_ins(const char *fmt
, unsigned char *p
)
427 printk(KERN_CONT
"%s", fmt
);
429 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
430 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
433 static void ftrace_bug(int failed
, unsigned long ip
)
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
445 print_ip_ins(" actual: ", (unsigned char *)ip
);
446 printk(KERN_CONT
"\n");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
462 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
464 unsigned long ip
, fl
;
465 unsigned long ftrace_addr
;
467 ftrace_addr
= (unsigned long)ftrace_caller
;
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
479 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
480 if (rec
->flags
& FTRACE_FL_ENABLED
)
481 rec
->flags
&= ~FTRACE_FL_ENABLED
;
485 } else if (ftrace_filtered
&& enable
) {
490 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
492 /* Record is filtered and enabled, do nothing */
493 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
496 /* Record is not filtered and is not enabled do nothing */
500 /* Record is not filtered but enabled, disable it */
501 if (fl
== FTRACE_FL_ENABLED
)
502 rec
->flags
&= ~FTRACE_FL_ENABLED
;
504 /* Otherwise record is filtered but not enabled, enable it */
505 rec
->flags
|= FTRACE_FL_ENABLED
;
507 /* Disable or not filtered */
510 /* if record is enabled, do nothing */
511 if (rec
->flags
& FTRACE_FL_ENABLED
)
514 rec
->flags
|= FTRACE_FL_ENABLED
;
518 /* if record is not enabled do nothing */
519 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
522 rec
->flags
&= ~FTRACE_FL_ENABLED
;
526 if (rec
->flags
& FTRACE_FL_ENABLED
)
527 return ftrace_make_call(rec
, ftrace_addr
);
529 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
532 static void ftrace_replace_code(int enable
)
535 struct dyn_ftrace
*rec
;
536 struct ftrace_page
*pg
;
538 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
539 for (i
= 0; i
< pg
->index
; i
++) {
540 rec
= &pg
->records
[i
];
543 * Skip over free records and records that have
546 if (rec
->flags
& FTRACE_FL_FREE
||
547 rec
->flags
& FTRACE_FL_FAILED
)
550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec
->ip
)) {
555 unfreeze_record(rec
);
558 failed
= __ftrace_replace_code(rec
, enable
);
559 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
560 rec
->flags
|= FTRACE_FL_FAILED
;
561 if ((system_state
== SYSTEM_BOOTING
) ||
562 !core_kernel_text(rec
->ip
)) {
563 ftrace_free_rec(rec
);
565 ftrace_bug(failed
, rec
->ip
);
566 /* Stop processing */
575 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
582 ret
= ftrace_make_nop(mod
, rec
, mcount_addr
);
585 rec
->flags
|= FTRACE_FL_FAILED
;
592 * archs can override this function if they must do something
593 * before the modifying code is performed.
595 int __weak
ftrace_arch_code_modify_prepare(void)
601 * archs can override this function if they must do something
602 * after the modifying code is performed.
604 int __weak
ftrace_arch_code_modify_post_process(void)
609 static int __ftrace_modify_code(void *data
)
613 if (*command
& FTRACE_ENABLE_CALLS
)
614 ftrace_replace_code(1);
615 else if (*command
& FTRACE_DISABLE_CALLS
)
616 ftrace_replace_code(0);
618 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
619 ftrace_update_ftrace_func(ftrace_trace_function
);
621 if (*command
& FTRACE_START_FUNC_RET
)
622 ftrace_enable_ftrace_graph_caller();
623 else if (*command
& FTRACE_STOP_FUNC_RET
)
624 ftrace_disable_ftrace_graph_caller();
629 static void ftrace_run_update_code(int command
)
633 ret
= ftrace_arch_code_modify_prepare();
638 stop_machine(__ftrace_modify_code
, &command
, NULL
);
640 ret
= ftrace_arch_code_modify_post_process();
644 static ftrace_func_t saved_ftrace_func
;
645 static int ftrace_start_up
;
647 static void ftrace_startup_enable(int command
)
649 if (saved_ftrace_func
!= ftrace_trace_function
) {
650 saved_ftrace_func
= ftrace_trace_function
;
651 command
|= FTRACE_UPDATE_TRACE_FUNC
;
654 if (!command
|| !ftrace_enabled
)
657 ftrace_run_update_code(command
);
660 static void ftrace_startup(int command
)
662 if (unlikely(ftrace_disabled
))
665 mutex_lock(&ftrace_start_lock
);
667 command
|= FTRACE_ENABLE_CALLS
;
669 ftrace_startup_enable(command
);
671 mutex_unlock(&ftrace_start_lock
);
674 static void ftrace_shutdown(int command
)
676 if (unlikely(ftrace_disabled
))
679 mutex_lock(&ftrace_start_lock
);
681 if (!ftrace_start_up
)
682 command
|= FTRACE_DISABLE_CALLS
;
684 if (saved_ftrace_func
!= ftrace_trace_function
) {
685 saved_ftrace_func
= ftrace_trace_function
;
686 command
|= FTRACE_UPDATE_TRACE_FUNC
;
689 if (!command
|| !ftrace_enabled
)
692 ftrace_run_update_code(command
);
694 mutex_unlock(&ftrace_start_lock
);
697 static void ftrace_startup_sysctl(void)
699 int command
= FTRACE_ENABLE_MCOUNT
;
701 if (unlikely(ftrace_disabled
))
704 mutex_lock(&ftrace_start_lock
);
705 /* Force update next time */
706 saved_ftrace_func
= NULL
;
707 /* ftrace_start_up is true if we want ftrace running */
709 command
|= FTRACE_ENABLE_CALLS
;
711 ftrace_run_update_code(command
);
712 mutex_unlock(&ftrace_start_lock
);
715 static void ftrace_shutdown_sysctl(void)
717 int command
= FTRACE_DISABLE_MCOUNT
;
719 if (unlikely(ftrace_disabled
))
722 mutex_lock(&ftrace_start_lock
);
723 /* ftrace_start_up is true if ftrace is running */
725 command
|= FTRACE_DISABLE_CALLS
;
727 ftrace_run_update_code(command
);
728 mutex_unlock(&ftrace_start_lock
);
731 static cycle_t ftrace_update_time
;
732 static unsigned long ftrace_update_cnt
;
733 unsigned long ftrace_update_tot_cnt
;
735 static int ftrace_update_code(struct module
*mod
)
737 struct dyn_ftrace
*p
, *t
;
740 start
= ftrace_now(raw_smp_processor_id());
741 ftrace_update_cnt
= 0;
743 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
745 /* If something went wrong, bail without enabling anything */
746 if (unlikely(ftrace_disabled
))
749 list_del_init(&p
->list
);
751 /* convert record (i.e, patch mcount-call with NOP) */
752 if (ftrace_code_disable(mod
, p
)) {
753 p
->flags
|= FTRACE_FL_CONVERTED
;
759 stop
= ftrace_now(raw_smp_processor_id());
760 ftrace_update_time
= stop
- start
;
761 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
766 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
768 struct ftrace_page
*pg
;
772 /* allocate a few pages */
773 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
774 if (!ftrace_pages_start
)
778 * Allocate a few more pages.
780 * TODO: have some parser search vmlinux before
781 * final linking to find all calls to ftrace.
783 * a) know how many pages to allocate.
785 * b) set up the table then.
787 * The dynamic code is still necessary for
791 pg
= ftrace_pages
= ftrace_pages_start
;
793 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
794 pr_info("ftrace: allocating %ld entries in %d pages\n",
795 num_to_init
, cnt
+ 1);
797 for (i
= 0; i
< cnt
; i
++) {
798 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
800 /* If we fail, we'll try later anyway */
811 FTRACE_ITER_FILTER
= (1 << 0),
812 FTRACE_ITER_CONT
= (1 << 1),
813 FTRACE_ITER_NOTRACE
= (1 << 2),
814 FTRACE_ITER_FAILURES
= (1 << 3),
817 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
819 struct ftrace_iterator
{
820 struct ftrace_page
*pg
;
823 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
829 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
831 struct ftrace_iterator
*iter
= m
->private;
832 struct dyn_ftrace
*rec
= NULL
;
836 /* should not be called from interrupt context */
837 spin_lock(&ftrace_lock
);
839 if (iter
->idx
>= iter
->pg
->index
) {
840 if (iter
->pg
->next
) {
841 iter
->pg
= iter
->pg
->next
;
848 rec
= &iter
->pg
->records
[iter
->idx
++];
849 if ((rec
->flags
& FTRACE_FL_FREE
) ||
851 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
852 (rec
->flags
& FTRACE_FL_FAILED
)) ||
854 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
855 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
857 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
858 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
860 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
861 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
866 spin_unlock(&ftrace_lock
);
871 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
873 struct ftrace_iterator
*iter
= m
->private;
883 p
= t_next(m
, p
, pos
);
888 static void t_stop(struct seq_file
*m
, void *p
)
892 static int t_show(struct seq_file
*m
, void *v
)
894 struct dyn_ftrace
*rec
= v
;
895 char str
[KSYM_SYMBOL_LEN
];
900 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
902 seq_printf(m
, "%s\n", str
);
907 static struct seq_operations show_ftrace_seq_ops
= {
915 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
917 struct ftrace_iterator
*iter
;
920 if (unlikely(ftrace_disabled
))
923 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
927 iter
->pg
= ftrace_pages_start
;
929 ret
= seq_open(file
, &show_ftrace_seq_ops
);
931 struct seq_file
*m
= file
->private_data
;
941 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
943 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
944 struct ftrace_iterator
*iter
= m
->private;
946 seq_release(inode
, file
);
953 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
957 struct ftrace_iterator
*iter
;
959 ret
= ftrace_avail_open(inode
, file
);
961 m
= (struct seq_file
*)file
->private_data
;
962 iter
= (struct ftrace_iterator
*)m
->private;
963 iter
->flags
= FTRACE_ITER_FAILURES
;
970 static void ftrace_filter_reset(int enable
)
972 struct ftrace_page
*pg
;
973 struct dyn_ftrace
*rec
;
974 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
977 /* should not be called from interrupt context */
978 spin_lock(&ftrace_lock
);
981 pg
= ftrace_pages_start
;
983 for (i
= 0; i
< pg
->index
; i
++) {
984 rec
= &pg
->records
[i
];
985 if (rec
->flags
& FTRACE_FL_FAILED
)
991 spin_unlock(&ftrace_lock
);
995 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
997 struct ftrace_iterator
*iter
;
1000 if (unlikely(ftrace_disabled
))
1003 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1007 mutex_lock(&ftrace_regex_lock
);
1008 if ((file
->f_mode
& FMODE_WRITE
) &&
1009 !(file
->f_flags
& O_APPEND
))
1010 ftrace_filter_reset(enable
);
1012 if (file
->f_mode
& FMODE_READ
) {
1013 iter
->pg
= ftrace_pages_start
;
1014 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
1015 FTRACE_ITER_NOTRACE
;
1017 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1019 struct seq_file
*m
= file
->private_data
;
1024 file
->private_data
= iter
;
1025 mutex_unlock(&ftrace_regex_lock
);
1031 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1033 return ftrace_regex_open(inode
, file
, 1);
1037 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1039 return ftrace_regex_open(inode
, file
, 0);
1043 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1044 size_t cnt
, loff_t
*ppos
)
1046 if (file
->f_mode
& FMODE_READ
)
1047 return seq_read(file
, ubuf
, cnt
, ppos
);
1053 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1057 if (file
->f_mode
& FMODE_READ
)
1058 ret
= seq_lseek(file
, offset
, origin
);
1060 file
->f_pos
= ret
= 1;
1073 ftrace_match(unsigned char *buff
, int len
, int enable
)
1075 char str
[KSYM_SYMBOL_LEN
];
1076 char *search
= NULL
;
1077 struct ftrace_page
*pg
;
1078 struct dyn_ftrace
*rec
;
1079 int type
= MATCH_FULL
;
1080 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1081 unsigned i
, match
= 0, search_len
= 0;
1084 if (buff
[0] == '!') {
1090 for (i
= 0; i
< len
; i
++) {
1091 if (buff
[i
] == '*') {
1093 search
= buff
+ i
+ 1;
1094 type
= MATCH_END_ONLY
;
1095 search_len
= len
- (i
+ 1);
1097 if (type
== MATCH_END_ONLY
) {
1098 type
= MATCH_MIDDLE_ONLY
;
1101 type
= MATCH_FRONT_ONLY
;
1109 /* should not be called from interrupt context */
1110 spin_lock(&ftrace_lock
);
1112 ftrace_filtered
= 1;
1113 pg
= ftrace_pages_start
;
1115 for (i
= 0; i
< pg
->index
; i
++) {
1119 rec
= &pg
->records
[i
];
1120 if (rec
->flags
& FTRACE_FL_FAILED
)
1122 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1125 if (strcmp(str
, buff
) == 0)
1128 case MATCH_FRONT_ONLY
:
1129 if (memcmp(str
, buff
, match
) == 0)
1132 case MATCH_MIDDLE_ONLY
:
1133 if (strstr(str
, search
))
1136 case MATCH_END_ONLY
:
1137 ptr
= strstr(str
, search
);
1138 if (ptr
&& (ptr
[search_len
] == 0))
1144 rec
->flags
&= ~flag
;
1151 spin_unlock(&ftrace_lock
);
1155 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1156 size_t cnt
, loff_t
*ppos
, int enable
)
1158 struct ftrace_iterator
*iter
;
1163 if (!cnt
|| cnt
< 0)
1166 mutex_lock(&ftrace_regex_lock
);
1168 if (file
->f_mode
& FMODE_READ
) {
1169 struct seq_file
*m
= file
->private_data
;
1172 iter
= file
->private_data
;
1175 iter
->flags
&= ~FTRACE_ITER_CONT
;
1176 iter
->buffer_idx
= 0;
1179 ret
= get_user(ch
, ubuf
++);
1185 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1186 /* skip white space */
1187 while (cnt
&& isspace(ch
)) {
1188 ret
= get_user(ch
, ubuf
++);
1196 file
->f_pos
+= read
;
1201 iter
->buffer_idx
= 0;
1204 while (cnt
&& !isspace(ch
)) {
1205 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1206 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1211 ret
= get_user(ch
, ubuf
++);
1220 iter
->buffer
[iter
->buffer_idx
] = 0;
1221 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1222 iter
->buffer_idx
= 0;
1224 iter
->flags
|= FTRACE_ITER_CONT
;
1227 file
->f_pos
+= read
;
1231 mutex_unlock(&ftrace_regex_lock
);
1237 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1238 size_t cnt
, loff_t
*ppos
)
1240 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1244 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1245 size_t cnt
, loff_t
*ppos
)
1247 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1251 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1253 if (unlikely(ftrace_disabled
))
1256 mutex_lock(&ftrace_regex_lock
);
1258 ftrace_filter_reset(enable
);
1260 ftrace_match(buf
, len
, enable
);
1261 mutex_unlock(&ftrace_regex_lock
);
1265 * ftrace_set_filter - set a function to filter on in ftrace
1266 * @buf - the string that holds the function filter text.
1267 * @len - the length of the string.
1268 * @reset - non zero to reset all filters before applying this filter.
1270 * Filters denote which functions should be enabled when tracing is enabled.
1271 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1273 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1275 ftrace_set_regex(buf
, len
, reset
, 1);
1279 * ftrace_set_notrace - set a function to not trace in ftrace
1280 * @buf - the string that holds the function notrace text.
1281 * @len - the length of the string.
1282 * @reset - non zero to reset all filters before applying this filter.
1284 * Notrace Filters denote which functions should not be enabled when tracing
1285 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1288 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1290 ftrace_set_regex(buf
, len
, reset
, 0);
1294 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1296 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1297 struct ftrace_iterator
*iter
;
1299 mutex_lock(&ftrace_regex_lock
);
1300 if (file
->f_mode
& FMODE_READ
) {
1303 seq_release(inode
, file
);
1305 iter
= file
->private_data
;
1307 if (iter
->buffer_idx
) {
1309 iter
->buffer
[iter
->buffer_idx
] = 0;
1310 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1313 mutex_lock(&ftrace_sysctl_lock
);
1314 mutex_lock(&ftrace_start_lock
);
1315 if (ftrace_start_up
&& ftrace_enabled
)
1316 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1317 mutex_unlock(&ftrace_start_lock
);
1318 mutex_unlock(&ftrace_sysctl_lock
);
1321 mutex_unlock(&ftrace_regex_lock
);
1326 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1328 return ftrace_regex_release(inode
, file
, 1);
1332 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1334 return ftrace_regex_release(inode
, file
, 0);
1337 static struct file_operations ftrace_avail_fops
= {
1338 .open
= ftrace_avail_open
,
1340 .llseek
= seq_lseek
,
1341 .release
= ftrace_avail_release
,
1344 static struct file_operations ftrace_failures_fops
= {
1345 .open
= ftrace_failures_open
,
1347 .llseek
= seq_lseek
,
1348 .release
= ftrace_avail_release
,
1351 static struct file_operations ftrace_filter_fops
= {
1352 .open
= ftrace_filter_open
,
1353 .read
= ftrace_regex_read
,
1354 .write
= ftrace_filter_write
,
1355 .llseek
= ftrace_regex_lseek
,
1356 .release
= ftrace_filter_release
,
1359 static struct file_operations ftrace_notrace_fops
= {
1360 .open
= ftrace_notrace_open
,
1361 .read
= ftrace_regex_read
,
1362 .write
= ftrace_notrace_write
,
1363 .llseek
= ftrace_regex_lseek
,
1364 .release
= ftrace_notrace_release
,
1367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1369 static DEFINE_MUTEX(graph_lock
);
1371 int ftrace_graph_count
;
1372 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
1375 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1377 unsigned long *array
= m
->private;
1382 if (index
>= ftrace_graph_count
)
1385 return &array
[index
];
1388 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
1392 mutex_lock(&graph_lock
);
1394 p
= g_next(m
, p
, pos
);
1399 static void g_stop(struct seq_file
*m
, void *p
)
1401 mutex_unlock(&graph_lock
);
1404 static int g_show(struct seq_file
*m
, void *v
)
1406 unsigned long *ptr
= v
;
1407 char str
[KSYM_SYMBOL_LEN
];
1412 kallsyms_lookup(*ptr
, NULL
, NULL
, NULL
, str
);
1414 seq_printf(m
, "%s\n", str
);
1419 static struct seq_operations ftrace_graph_seq_ops
= {
1427 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
1431 if (unlikely(ftrace_disabled
))
1434 mutex_lock(&graph_lock
);
1435 if ((file
->f_mode
& FMODE_WRITE
) &&
1436 !(file
->f_flags
& O_APPEND
)) {
1437 ftrace_graph_count
= 0;
1438 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
1441 if (file
->f_mode
& FMODE_READ
) {
1442 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
1444 struct seq_file
*m
= file
->private_data
;
1445 m
->private = ftrace_graph_funcs
;
1448 file
->private_data
= ftrace_graph_funcs
;
1449 mutex_unlock(&graph_lock
);
1455 ftrace_graph_read(struct file
*file
, char __user
*ubuf
,
1456 size_t cnt
, loff_t
*ppos
)
1458 if (file
->f_mode
& FMODE_READ
)
1459 return seq_read(file
, ubuf
, cnt
, ppos
);
1465 ftrace_set_func(unsigned long *array
, int idx
, char *buffer
)
1467 char str
[KSYM_SYMBOL_LEN
];
1468 struct dyn_ftrace
*rec
;
1469 struct ftrace_page
*pg
;
1473 if (ftrace_disabled
)
1476 /* should not be called from interrupt context */
1477 spin_lock(&ftrace_lock
);
1479 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1480 for (i
= 0; i
< pg
->index
; i
++) {
1481 rec
= &pg
->records
[i
];
1483 if (rec
->flags
& (FTRACE_FL_FAILED
| FTRACE_FL_FREE
))
1486 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1487 if (strcmp(str
, buffer
) == 0) {
1489 for (j
= 0; j
< idx
; j
++)
1490 if (array
[j
] == rec
->ip
) {
1495 array
[idx
] = rec
->ip
;
1500 spin_unlock(&ftrace_lock
);
1502 return found
? 0 : -EINVAL
;
1506 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
1507 size_t cnt
, loff_t
*ppos
)
1509 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
1510 unsigned long *array
;
1516 if (!cnt
|| cnt
< 0)
1519 mutex_lock(&graph_lock
);
1521 if (ftrace_graph_count
>= FTRACE_GRAPH_MAX_FUNCS
) {
1526 if (file
->f_mode
& FMODE_READ
) {
1527 struct seq_file
*m
= file
->private_data
;
1530 array
= file
->private_data
;
1532 ret
= get_user(ch
, ubuf
++);
1538 /* skip white space */
1539 while (cnt
&& isspace(ch
)) {
1540 ret
= get_user(ch
, ubuf
++);
1553 while (cnt
&& !isspace(ch
)) {
1554 if (index
< FTRACE_BUFF_MAX
)
1555 buffer
[index
++] = ch
;
1560 ret
= get_user(ch
, ubuf
++);
1568 /* we allow only one at a time */
1569 ret
= ftrace_set_func(array
, ftrace_graph_count
, buffer
);
1573 ftrace_graph_count
++;
1575 file
->f_pos
+= read
;
1579 mutex_unlock(&graph_lock
);
1584 static const struct file_operations ftrace_graph_fops
= {
1585 .open
= ftrace_graph_open
,
1586 .read
= ftrace_graph_read
,
1587 .write
= ftrace_graph_write
,
1589 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1591 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
1593 struct dentry
*entry
;
1595 entry
= debugfs_create_file("available_filter_functions", 0444,
1596 d_tracer
, NULL
, &ftrace_avail_fops
);
1598 pr_warning("Could not create debugfs "
1599 "'available_filter_functions' entry\n");
1601 entry
= debugfs_create_file("failures", 0444,
1602 d_tracer
, NULL
, &ftrace_failures_fops
);
1604 pr_warning("Could not create debugfs 'failures' entry\n");
1606 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1607 NULL
, &ftrace_filter_fops
);
1609 pr_warning("Could not create debugfs "
1610 "'set_ftrace_filter' entry\n");
1612 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1613 NULL
, &ftrace_notrace_fops
);
1615 pr_warning("Could not create debugfs "
1616 "'set_ftrace_notrace' entry\n");
1618 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1619 entry
= debugfs_create_file("set_graph_function", 0444, d_tracer
,
1621 &ftrace_graph_fops
);
1623 pr_warning("Could not create debugfs "
1624 "'set_graph_function' entry\n");
1625 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1630 static int ftrace_convert_nops(struct module
*mod
,
1631 unsigned long *start
,
1636 unsigned long flags
;
1638 mutex_lock(&ftrace_start_lock
);
1641 addr
= ftrace_call_adjust(*p
++);
1643 * Some architecture linkers will pad between
1644 * the different mcount_loc sections of different
1645 * object files to satisfy alignments.
1646 * Skip any NULL pointers.
1650 ftrace_record_ip(addr
);
1653 /* disable interrupts to prevent kstop machine */
1654 local_irq_save(flags
);
1655 ftrace_update_code(mod
);
1656 local_irq_restore(flags
);
1657 mutex_unlock(&ftrace_start_lock
);
1662 void ftrace_init_module(struct module
*mod
,
1663 unsigned long *start
, unsigned long *end
)
1665 if (ftrace_disabled
|| start
== end
)
1667 ftrace_convert_nops(mod
, start
, end
);
1670 extern unsigned long __start_mcount_loc
[];
1671 extern unsigned long __stop_mcount_loc
[];
1673 void __init
ftrace_init(void)
1675 unsigned long count
, addr
, flags
;
1678 /* Keep the ftrace pointer to the stub */
1679 addr
= (unsigned long)ftrace_stub
;
1681 local_irq_save(flags
);
1682 ftrace_dyn_arch_init(&addr
);
1683 local_irq_restore(flags
);
1685 /* ftrace_dyn_arch_init places the return code in addr */
1689 count
= __stop_mcount_loc
- __start_mcount_loc
;
1691 ret
= ftrace_dyn_table_alloc(count
);
1695 last_ftrace_enabled
= ftrace_enabled
= 1;
1697 ret
= ftrace_convert_nops(NULL
,
1703 ftrace_disabled
= 1;
1708 static int __init
ftrace_nodyn_init(void)
1713 device_initcall(ftrace_nodyn_init
);
1715 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
1716 static inline void ftrace_startup_enable(int command
) { }
1717 /* Keep as macros so we do not need to define the commands */
1718 # define ftrace_startup(command) do { } while (0)
1719 # define ftrace_shutdown(command) do { } while (0)
1720 # define ftrace_startup_sysctl() do { } while (0)
1721 # define ftrace_shutdown_sysctl() do { } while (0)
1722 #endif /* CONFIG_DYNAMIC_FTRACE */
1725 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
1726 size_t cnt
, loff_t
*ppos
)
1731 if (ftrace_pid_trace
== ftrace_swapper_pid
)
1732 r
= sprintf(buf
, "swapper tasks\n");
1733 else if (ftrace_pid_trace
)
1734 r
= sprintf(buf
, "%u\n", pid_nr(ftrace_pid_trace
));
1736 r
= sprintf(buf
, "no pid\n");
1738 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1741 static void clear_ftrace_swapper(void)
1743 struct task_struct
*p
;
1747 for_each_online_cpu(cpu
) {
1749 clear_tsk_trace_trace(p
);
1754 static void set_ftrace_swapper(void)
1756 struct task_struct
*p
;
1760 for_each_online_cpu(cpu
) {
1762 set_tsk_trace_trace(p
);
1767 static void clear_ftrace_pid(struct pid
*pid
)
1769 struct task_struct
*p
;
1772 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1773 clear_tsk_trace_trace(p
);
1774 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1780 static void set_ftrace_pid(struct pid
*pid
)
1782 struct task_struct
*p
;
1785 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1786 set_tsk_trace_trace(p
);
1787 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1791 static void clear_ftrace_pid_task(struct pid
**pid
)
1793 if (*pid
== ftrace_swapper_pid
)
1794 clear_ftrace_swapper();
1796 clear_ftrace_pid(*pid
);
1801 static void set_ftrace_pid_task(struct pid
*pid
)
1803 if (pid
== ftrace_swapper_pid
)
1804 set_ftrace_swapper();
1806 set_ftrace_pid(pid
);
1810 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
1811 size_t cnt
, loff_t
*ppos
)
1818 if (cnt
>= sizeof(buf
))
1821 if (copy_from_user(&buf
, ubuf
, cnt
))
1826 ret
= strict_strtol(buf
, 10, &val
);
1830 mutex_lock(&ftrace_start_lock
);
1832 /* disable pid tracing */
1833 if (!ftrace_pid_trace
)
1836 clear_ftrace_pid_task(&ftrace_pid_trace
);
1839 /* swapper task is special */
1841 pid
= ftrace_swapper_pid
;
1842 if (pid
== ftrace_pid_trace
)
1845 pid
= find_get_pid(val
);
1847 if (pid
== ftrace_pid_trace
) {
1853 if (ftrace_pid_trace
)
1854 clear_ftrace_pid_task(&ftrace_pid_trace
);
1859 ftrace_pid_trace
= pid
;
1861 set_ftrace_pid_task(ftrace_pid_trace
);
1864 /* update the function call */
1865 ftrace_update_pid_func();
1866 ftrace_startup_enable(0);
1869 mutex_unlock(&ftrace_start_lock
);
1874 static struct file_operations ftrace_pid_fops
= {
1875 .read
= ftrace_pid_read
,
1876 .write
= ftrace_pid_write
,
1879 static __init
int ftrace_init_debugfs(void)
1881 struct dentry
*d_tracer
;
1882 struct dentry
*entry
;
1884 d_tracer
= tracing_init_dentry();
1888 ftrace_init_dyn_debugfs(d_tracer
);
1890 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
1891 NULL
, &ftrace_pid_fops
);
1893 pr_warning("Could not create debugfs "
1894 "'set_ftrace_pid' entry\n");
1898 fs_initcall(ftrace_init_debugfs
);
1901 * ftrace_kill - kill ftrace
1903 * This function should be used by panic code. It stops ftrace
1904 * but in a not so nice way. If you need to simply kill ftrace
1905 * from a non-atomic section, use ftrace_kill.
1907 void ftrace_kill(void)
1909 ftrace_disabled
= 1;
1911 clear_ftrace_function();
1915 * register_ftrace_function - register a function for profiling
1916 * @ops - ops structure that holds the function for profiling.
1918 * Register a function to be called by all functions in the
1921 * Note: @ops->func and all the functions it calls must be labeled
1922 * with "notrace", otherwise it will go into a
1925 int register_ftrace_function(struct ftrace_ops
*ops
)
1929 if (unlikely(ftrace_disabled
))
1932 mutex_lock(&ftrace_sysctl_lock
);
1934 ret
= __register_ftrace_function(ops
);
1937 mutex_unlock(&ftrace_sysctl_lock
);
1942 * unregister_ftrace_function - unresgister a function for profiling.
1943 * @ops - ops structure that holds the function to unregister
1945 * Unregister a function that was added to be called by ftrace profiling.
1947 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1951 mutex_lock(&ftrace_sysctl_lock
);
1952 ret
= __unregister_ftrace_function(ops
);
1954 mutex_unlock(&ftrace_sysctl_lock
);
1960 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1961 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1966 if (unlikely(ftrace_disabled
))
1969 mutex_lock(&ftrace_sysctl_lock
);
1971 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1973 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1976 last_ftrace_enabled
= ftrace_enabled
;
1978 if (ftrace_enabled
) {
1980 ftrace_startup_sysctl();
1982 /* we are starting ftrace again */
1983 if (ftrace_list
!= &ftrace_list_end
) {
1984 if (ftrace_list
->next
== &ftrace_list_end
)
1985 ftrace_trace_function
= ftrace_list
->func
;
1987 ftrace_trace_function
= ftrace_list_func
;
1991 /* stopping ftrace calls (just send to ftrace_stub) */
1992 ftrace_trace_function
= ftrace_stub
;
1994 ftrace_shutdown_sysctl();
1998 mutex_unlock(&ftrace_sysctl_lock
);
2002 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2004 static atomic_t ftrace_graph_active
;
2005 static struct notifier_block ftrace_suspend_notifier
;
2007 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
2012 /* The callbacks that hook a function */
2013 trace_func_graph_ret_t ftrace_graph_return
=
2014 (trace_func_graph_ret_t
)ftrace_stub
;
2015 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
2017 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2018 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
2022 unsigned long flags
;
2023 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
2024 struct task_struct
*g
, *t
;
2026 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
2027 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
2028 * sizeof(struct ftrace_ret_stack
),
2030 if (!ret_stack_list
[i
]) {
2038 read_lock_irqsave(&tasklist_lock
, flags
);
2039 do_each_thread(g
, t
) {
2045 if (t
->ret_stack
== NULL
) {
2046 t
->curr_ret_stack
= -1;
2047 /* Make sure IRQs see the -1 first: */
2049 t
->ret_stack
= ret_stack_list
[start
++];
2050 atomic_set(&t
->tracing_graph_pause
, 0);
2051 atomic_set(&t
->trace_overrun
, 0);
2053 } while_each_thread(g
, t
);
2056 read_unlock_irqrestore(&tasklist_lock
, flags
);
2058 for (i
= start
; i
< end
; i
++)
2059 kfree(ret_stack_list
[i
]);
2063 /* Allocate a return stack for each task */
2064 static int start_graph_tracing(void)
2066 struct ftrace_ret_stack
**ret_stack_list
;
2069 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
2070 sizeof(struct ftrace_ret_stack
*),
2073 if (!ret_stack_list
)
2076 /* The cpu_boot init_task->ret_stack will never be freed */
2077 for_each_online_cpu(cpu
)
2078 ftrace_graph_init_task(idle_task(cpu
));
2081 ret
= alloc_retstack_tasklist(ret_stack_list
);
2082 } while (ret
== -EAGAIN
);
2084 kfree(ret_stack_list
);
2089 * Hibernation protection.
2090 * The state of the current task is too much unstable during
2091 * suspend/restore to disk. We want to protect against that.
2094 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
2098 case PM_HIBERNATION_PREPARE
:
2099 pause_graph_tracing();
2102 case PM_POST_HIBERNATION
:
2103 unpause_graph_tracing();
2109 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
2110 trace_func_graph_ent_t entryfunc
)
2114 mutex_lock(&ftrace_sysctl_lock
);
2116 ftrace_suspend_notifier
.notifier_call
= ftrace_suspend_notifier_call
;
2117 register_pm_notifier(&ftrace_suspend_notifier
);
2119 atomic_inc(&ftrace_graph_active
);
2120 ret
= start_graph_tracing();
2122 atomic_dec(&ftrace_graph_active
);
2126 ftrace_graph_return
= retfunc
;
2127 ftrace_graph_entry
= entryfunc
;
2129 ftrace_startup(FTRACE_START_FUNC_RET
);
2132 mutex_unlock(&ftrace_sysctl_lock
);
2136 void unregister_ftrace_graph(void)
2138 mutex_lock(&ftrace_sysctl_lock
);
2140 atomic_dec(&ftrace_graph_active
);
2141 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
2142 ftrace_graph_entry
= ftrace_graph_entry_stub
;
2143 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
2144 unregister_pm_notifier(&ftrace_suspend_notifier
);
2146 mutex_unlock(&ftrace_sysctl_lock
);
2149 /* Allocate a return stack for newly created task */
2150 void ftrace_graph_init_task(struct task_struct
*t
)
2152 if (atomic_read(&ftrace_graph_active
)) {
2153 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
2154 * sizeof(struct ftrace_ret_stack
),
2158 t
->curr_ret_stack
= -1;
2159 atomic_set(&t
->tracing_graph_pause
, 0);
2160 atomic_set(&t
->trace_overrun
, 0);
2162 t
->ret_stack
= NULL
;
2165 void ftrace_graph_exit_task(struct task_struct
*t
)
2167 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
2169 t
->ret_stack
= NULL
;
2170 /* NULL must become visible to IRQs before we free it: */
2176 void ftrace_graph_stop(void)