2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 #define FTRACE_WARN_ON(cond) \
41 #define FTRACE_WARN_ON_ONCE(cond) \
43 if (WARN_ON_ONCE(cond)) \
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly
;
49 static int last_ftrace_enabled
;
52 * ftrace_disabled is set when an anomaly is discovered.
53 * ftrace_disabled is much stronger than ftrace_enabled.
55 static int ftrace_disabled __read_mostly
;
57 static DEFINE_SPINLOCK(ftrace_lock
);
58 static DEFINE_MUTEX(ftrace_sysctl_lock
);
60 static struct ftrace_ops ftrace_list_end __read_mostly
=
65 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
66 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
68 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
70 struct ftrace_ops
*op
= ftrace_list
;
72 /* in case someone actually ports this to alpha! */
73 read_barrier_depends();
75 while (op
!= &ftrace_list_end
) {
77 read_barrier_depends();
78 op
->func(ip
, parent_ip
);
84 * clear_ftrace_function - reset the ftrace function
86 * This NULLs the ftrace function and in essence stops
87 * tracing. There may be lag
89 void clear_ftrace_function(void)
91 ftrace_trace_function
= ftrace_stub
;
94 static int __register_ftrace_function(struct ftrace_ops
*ops
)
96 /* should not be called from interrupt context */
97 spin_lock(&ftrace_lock
);
99 ops
->next
= ftrace_list
;
101 * We are entering ops into the ftrace_list but another
102 * CPU might be walking that list. We need to make sure
103 * the ops->next pointer is valid before another CPU sees
104 * the ops pointer included into the ftrace_list.
109 if (ftrace_enabled
) {
111 * For one func, simply call it directly.
112 * For more than one func, call the chain.
114 if (ops
->next
== &ftrace_list_end
)
115 ftrace_trace_function
= ops
->func
;
117 ftrace_trace_function
= ftrace_list_func
;
120 spin_unlock(&ftrace_lock
);
125 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
127 struct ftrace_ops
**p
;
130 /* should not be called from interrupt context */
131 spin_lock(&ftrace_lock
);
134 * If we are removing the last function, then simply point
135 * to the ftrace_stub.
137 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
138 ftrace_trace_function
= ftrace_stub
;
139 ftrace_list
= &ftrace_list_end
;
143 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
154 if (ftrace_enabled
) {
155 /* If we only have one func left, then call that directly */
156 if (ftrace_list
== &ftrace_list_end
||
157 ftrace_list
->next
== &ftrace_list_end
)
158 ftrace_trace_function
= ftrace_list
->func
;
162 spin_unlock(&ftrace_lock
);
167 #ifdef CONFIG_DYNAMIC_FTRACE
169 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
171 * The hash lock is only needed when the recording of the mcount
172 * callers are dynamic. That is, by the caller themselves and
173 * not recorded via the compilation.
175 static DEFINE_SPINLOCK(ftrace_hash_lock
);
176 #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
177 #define ftrace_hash_unlock(flags) \
178 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
179 static void ftrace_release_hash(unsigned long start
, unsigned long end
);
181 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
182 #define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
183 #define ftrace_hash_unlock(flags) do { } while(0)
184 static inline void ftrace_release_hash(unsigned long start
, unsigned long end
)
190 * Since MCOUNT_ADDR may point to mcount itself, we do not want
191 * to get it confused by reading a reference in the code as we
192 * are parsing on objcopy output of text. Use a variable for
195 static unsigned long mcount_addr
= MCOUNT_ADDR
;
197 static struct task_struct
*ftraced_task
;
200 FTRACE_ENABLE_CALLS
= (1 << 0),
201 FTRACE_DISABLE_CALLS
= (1 << 1),
202 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
203 FTRACE_ENABLE_MCOUNT
= (1 << 3),
204 FTRACE_DISABLE_MCOUNT
= (1 << 4),
207 static int ftrace_filtered
;
208 static int tracing_on
;
209 static int frozen_record_count
;
211 static struct hlist_head ftrace_hash
[FTRACE_HASHSIZE
];
213 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu
);
215 static DEFINE_MUTEX(ftraced_lock
);
216 static DEFINE_MUTEX(ftrace_regex_lock
);
219 struct ftrace_page
*next
;
221 struct dyn_ftrace records
[];
224 #define ENTRIES_PER_PAGE \
225 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
227 /* estimate from running different kernels */
228 #define NR_TO_INIT 10000
230 static struct ftrace_page
*ftrace_pages_start
;
231 static struct ftrace_page
*ftrace_pages
;
233 static int ftraced_trigger
;
234 static int ftraced_suspend
;
235 static int ftraced_stop
;
237 static int ftrace_record_suspend
;
239 static struct dyn_ftrace
*ftrace_free_records
;
242 #ifdef CONFIG_KPROBES
243 static inline void freeze_record(struct dyn_ftrace
*rec
)
245 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
246 rec
->flags
|= FTRACE_FL_FROZEN
;
247 frozen_record_count
++;
251 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
253 if (rec
->flags
& FTRACE_FL_FROZEN
) {
254 rec
->flags
&= ~FTRACE_FL_FROZEN
;
255 frozen_record_count
--;
259 static inline int record_frozen(struct dyn_ftrace
*rec
)
261 return rec
->flags
& FTRACE_FL_FROZEN
;
264 # define freeze_record(rec) ({ 0; })
265 # define unfreeze_record(rec) ({ 0; })
266 # define record_frozen(rec) ({ 0; })
267 #endif /* CONFIG_KPROBES */
269 int skip_trace(unsigned long ip
)
272 struct dyn_ftrace
*rec
;
273 struct hlist_node
*t
;
274 struct hlist_head
*head
;
276 if (frozen_record_count
== 0)
279 head
= &ftrace_hash
[hash_long(ip
, FTRACE_HASHBITS
)];
280 hlist_for_each_entry_rcu(rec
, t
, head
, node
) {
282 if (record_frozen(rec
)) {
283 if (rec
->flags
& FTRACE_FL_FAILED
)
286 if (!(rec
->flags
& FTRACE_FL_CONVERTED
))
289 if (!tracing_on
|| !ftrace_enabled
)
292 if (ftrace_filtered
) {
293 fl
= rec
->flags
& (FTRACE_FL_FILTER
|
295 if (!fl
|| (fl
& FTRACE_FL_NOTRACE
))
307 ftrace_ip_in_hash(unsigned long ip
, unsigned long key
)
309 struct dyn_ftrace
*p
;
310 struct hlist_node
*t
;
313 hlist_for_each_entry_rcu(p
, t
, &ftrace_hash
[key
], node
) {
324 ftrace_add_hash(struct dyn_ftrace
*node
, unsigned long key
)
326 hlist_add_head_rcu(&node
->node
, &ftrace_hash
[key
]);
329 /* called from kstop_machine */
330 static inline void ftrace_del_hash(struct dyn_ftrace
*node
)
332 hlist_del(&node
->node
);
335 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
337 rec
->ip
= (unsigned long)ftrace_free_records
;
338 ftrace_free_records
= rec
;
339 rec
->flags
|= FTRACE_FL_FREE
;
342 void ftrace_release(void *start
, unsigned long size
)
344 struct dyn_ftrace
*rec
;
345 struct ftrace_page
*pg
;
346 unsigned long s
= (unsigned long)start
;
347 unsigned long e
= s
+ size
;
350 if (ftrace_disabled
|| !start
)
353 /* should not be called from interrupt context */
354 spin_lock(&ftrace_lock
);
356 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
357 for (i
= 0; i
< pg
->index
; i
++) {
358 rec
= &pg
->records
[i
];
360 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
361 ftrace_free_rec(rec
);
364 spin_unlock(&ftrace_lock
);
366 ftrace_release_hash(s
, e
);
369 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
371 struct dyn_ftrace
*rec
;
373 /* First check for freed records */
374 if (ftrace_free_records
) {
375 rec
= ftrace_free_records
;
377 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
378 FTRACE_WARN_ON_ONCE(1);
379 ftrace_free_records
= NULL
;
383 ftrace_free_records
= (void *)rec
->ip
;
384 memset(rec
, 0, sizeof(*rec
));
388 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
389 if (!ftrace_pages
->next
)
391 ftrace_pages
= ftrace_pages
->next
;
394 return &ftrace_pages
->records
[ftrace_pages
->index
++];
398 ftrace_record_ip(unsigned long ip
)
400 struct dyn_ftrace
*node
;
406 if (!ftrace_enabled
|| ftrace_disabled
)
409 resched
= need_resched();
410 preempt_disable_notrace();
413 * We simply need to protect against recursion.
414 * Use the the raw version of smp_processor_id and not
415 * __get_cpu_var which can call debug hooks that can
416 * cause a recursive crash here.
418 cpu
= raw_smp_processor_id();
419 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)++;
420 if (per_cpu(ftrace_shutdown_disable_cpu
, cpu
) != 1)
423 if (unlikely(ftrace_record_suspend
))
426 key
= hash_long(ip
, FTRACE_HASHBITS
);
428 FTRACE_WARN_ON_ONCE(key
>= FTRACE_HASHSIZE
);
430 if (ftrace_ip_in_hash(ip
, key
))
433 ftrace_hash_lock(flags
);
435 /* This ip may have hit the hash before the lock */
436 if (ftrace_ip_in_hash(ip
, key
))
439 node
= ftrace_alloc_dyn_node(ip
);
445 ftrace_add_hash(node
, key
);
450 ftrace_hash_unlock(flags
);
452 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)--;
454 /* prevent recursion with scheduler */
456 preempt_enable_no_resched_notrace();
458 preempt_enable_notrace();
461 #define FTRACE_ADDR ((long)(ftrace_caller))
464 __ftrace_replace_code(struct dyn_ftrace
*rec
,
465 unsigned char *old
, unsigned char *new, int enable
)
467 unsigned long ip
, fl
;
471 if (ftrace_filtered
&& enable
) {
473 * If filtering is on:
475 * If this record is set to be filtered and
476 * is enabled then do nothing.
478 * If this record is set to be filtered and
479 * it is not enabled, enable it.
481 * If this record is not set to be filtered
482 * and it is not enabled do nothing.
484 * If this record is set not to trace then
487 * If this record is set not to trace and
488 * it is enabled then disable it.
490 * If this record is not set to be filtered and
491 * it is enabled, disable it.
494 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
|
497 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
498 (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
)) ||
499 !fl
|| (fl
== FTRACE_FL_NOTRACE
))
503 * If it is enabled disable it,
504 * otherwise enable it!
506 if (fl
& FTRACE_FL_ENABLED
) {
507 /* swap new and old */
509 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
510 rec
->flags
&= ~FTRACE_FL_ENABLED
;
512 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
513 rec
->flags
|= FTRACE_FL_ENABLED
;
519 * If this record is set not to trace and is
520 * not enabled, do nothing.
522 fl
= rec
->flags
& (FTRACE_FL_NOTRACE
| FTRACE_FL_ENABLED
);
523 if (fl
== FTRACE_FL_NOTRACE
)
526 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
528 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
531 if (rec
->flags
& FTRACE_FL_ENABLED
)
533 rec
->flags
|= FTRACE_FL_ENABLED
;
535 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
537 rec
->flags
&= ~FTRACE_FL_ENABLED
;
541 return ftrace_modify_code(ip
, old
, new);
544 static void ftrace_replace_code(int enable
)
547 unsigned char *new = NULL
, *old
= NULL
;
548 struct dyn_ftrace
*rec
;
549 struct ftrace_page
*pg
;
552 old
= ftrace_nop_replace();
554 new = ftrace_nop_replace();
556 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
557 for (i
= 0; i
< pg
->index
; i
++) {
558 rec
= &pg
->records
[i
];
560 /* don't modify code that has already faulted */
561 if (rec
->flags
& FTRACE_FL_FAILED
)
564 /* ignore updates to this record's mcount site */
565 if (get_kprobe((void *)rec
->ip
)) {
569 unfreeze_record(rec
);
572 failed
= __ftrace_replace_code(rec
, old
, new, enable
);
573 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
574 rec
->flags
|= FTRACE_FL_FAILED
;
575 if ((system_state
== SYSTEM_BOOTING
) ||
576 !core_kernel_text(rec
->ip
)) {
577 ftrace_del_hash(rec
);
578 ftrace_free_rec(rec
);
585 static void ftrace_shutdown_replenish(void)
587 if (ftrace_pages
->next
)
590 /* allocate another page */
591 ftrace_pages
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
594 static void print_ip_ins(const char *fmt
, unsigned char *p
)
598 printk(KERN_CONT
"%s", fmt
);
600 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
601 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
605 ftrace_code_disable(struct dyn_ftrace
*rec
)
608 unsigned char *nop
, *call
;
613 nop
= ftrace_nop_replace();
614 call
= ftrace_call_replace(ip
, mcount_addr
);
616 ret
= ftrace_modify_code(ip
, call
, nop
);
620 FTRACE_WARN_ON_ONCE(1);
621 pr_info("ftrace faulted on modifying ");
625 FTRACE_WARN_ON_ONCE(1);
626 pr_info("ftrace failed to modify ");
628 print_ip_ins(" expected: ", call
);
629 print_ip_ins(" actual: ", (unsigned char *)ip
);
630 print_ip_ins(" replace: ", nop
);
631 printk(KERN_CONT
"\n");
634 FTRACE_WARN_ON_ONCE(1);
635 pr_info("ftrace faulted on writing ");
639 FTRACE_WARN_ON_ONCE(1);
640 pr_info("ftrace faulted on unknown error ");
644 rec
->flags
|= FTRACE_FL_FAILED
;
650 static int __ftrace_update_code(void *ignore
);
652 static int __ftrace_modify_code(void *data
)
657 if (*command
& FTRACE_ENABLE_CALLS
) {
659 * Update any recorded ips now that we have the
662 __ftrace_update_code(NULL
);
663 ftrace_replace_code(1);
665 } else if (*command
& FTRACE_DISABLE_CALLS
) {
666 ftrace_replace_code(0);
670 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
671 ftrace_update_ftrace_func(ftrace_trace_function
);
673 if (*command
& FTRACE_ENABLE_MCOUNT
) {
674 addr
= (unsigned long)ftrace_record_ip
;
675 ftrace_mcount_set(&addr
);
676 } else if (*command
& FTRACE_DISABLE_MCOUNT
) {
677 addr
= (unsigned long)ftrace_stub
;
678 ftrace_mcount_set(&addr
);
684 static void ftrace_run_update_code(int command
)
686 stop_machine(__ftrace_modify_code
, &command
, NULL
);
689 void ftrace_disable_daemon(void)
691 /* Stop the daemon from calling kstop_machine */
692 mutex_lock(&ftraced_lock
);
694 mutex_unlock(&ftraced_lock
);
696 ftrace_force_update();
699 void ftrace_enable_daemon(void)
701 mutex_lock(&ftraced_lock
);
703 mutex_unlock(&ftraced_lock
);
705 ftrace_force_update();
708 static ftrace_func_t saved_ftrace_func
;
710 static void ftrace_startup(void)
714 if (unlikely(ftrace_disabled
))
717 mutex_lock(&ftraced_lock
);
719 if (ftraced_suspend
== 1)
720 command
|= FTRACE_ENABLE_CALLS
;
722 if (saved_ftrace_func
!= ftrace_trace_function
) {
723 saved_ftrace_func
= ftrace_trace_function
;
724 command
|= FTRACE_UPDATE_TRACE_FUNC
;
727 if (!command
|| !ftrace_enabled
)
730 ftrace_run_update_code(command
);
732 mutex_unlock(&ftraced_lock
);
735 static void ftrace_shutdown(void)
739 if (unlikely(ftrace_disabled
))
742 mutex_lock(&ftraced_lock
);
744 if (!ftraced_suspend
)
745 command
|= FTRACE_DISABLE_CALLS
;
747 if (saved_ftrace_func
!= ftrace_trace_function
) {
748 saved_ftrace_func
= ftrace_trace_function
;
749 command
|= FTRACE_UPDATE_TRACE_FUNC
;
752 if (!command
|| !ftrace_enabled
)
755 ftrace_run_update_code(command
);
757 mutex_unlock(&ftraced_lock
);
760 static void ftrace_startup_sysctl(void)
762 int command
= FTRACE_ENABLE_MCOUNT
;
764 if (unlikely(ftrace_disabled
))
767 mutex_lock(&ftraced_lock
);
768 /* Force update next time */
769 saved_ftrace_func
= NULL
;
770 /* ftraced_suspend is true if we want ftrace running */
772 command
|= FTRACE_ENABLE_CALLS
;
774 ftrace_run_update_code(command
);
775 mutex_unlock(&ftraced_lock
);
778 static void ftrace_shutdown_sysctl(void)
780 int command
= FTRACE_DISABLE_MCOUNT
;
782 if (unlikely(ftrace_disabled
))
785 mutex_lock(&ftraced_lock
);
786 /* ftraced_suspend is true if ftrace is running */
788 command
|= FTRACE_DISABLE_CALLS
;
790 ftrace_run_update_code(command
);
791 mutex_unlock(&ftraced_lock
);
794 static cycle_t ftrace_update_time
;
795 static unsigned long ftrace_update_cnt
;
796 unsigned long ftrace_update_tot_cnt
;
798 static int __ftrace_update_code(void *ignore
)
800 int i
, save_ftrace_enabled
;
802 struct dyn_ftrace
*p
;
803 struct hlist_node
*t
, *n
;
804 struct hlist_head
*head
, temp_list
;
806 /* Don't be recording funcs now */
807 ftrace_record_suspend
++;
808 save_ftrace_enabled
= ftrace_enabled
;
811 start
= ftrace_now(raw_smp_processor_id());
812 ftrace_update_cnt
= 0;
814 /* No locks needed, the machine is stopped! */
815 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
816 INIT_HLIST_HEAD(&temp_list
);
817 head
= &ftrace_hash
[i
];
819 /* all CPUS are stopped, we are safe to modify code */
820 hlist_for_each_entry_safe(p
, t
, n
, head
, node
) {
821 /* Skip over failed records which have not been
823 if (p
->flags
& FTRACE_FL_FAILED
)
826 /* Unconverted records are always at the head of the
827 * hash bucket. Once we encounter a converted record,
828 * simply skip over to the next bucket. Saves ftraced
829 * some processor cycles (ftrace does its bid for
830 * global warming :-p ). */
831 if (p
->flags
& (FTRACE_FL_CONVERTED
))
834 /* Ignore updates to this record's mcount site.
835 * Reintroduce this record at the head of this
836 * bucket to attempt to "convert" it again if
837 * the kprobe on it is unregistered before the
839 if (get_kprobe((void *)p
->ip
)) {
841 INIT_HLIST_NODE(&p
->node
);
842 hlist_add_head(&p
->node
, &temp_list
);
849 /* convert record (i.e, patch mcount-call with NOP) */
850 if (ftrace_code_disable(p
)) {
851 p
->flags
|= FTRACE_FL_CONVERTED
;
854 if ((system_state
== SYSTEM_BOOTING
) ||
855 !core_kernel_text(p
->ip
)) {
862 hlist_for_each_entry_safe(p
, t
, n
, &temp_list
, node
) {
864 INIT_HLIST_NODE(&p
->node
);
865 hlist_add_head(&p
->node
, head
);
869 stop
= ftrace_now(raw_smp_processor_id());
870 ftrace_update_time
= stop
- start
;
871 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
874 ftrace_enabled
= save_ftrace_enabled
;
875 ftrace_record_suspend
--;
880 static int ftrace_update_code(void)
882 if (unlikely(ftrace_disabled
) ||
883 !ftrace_enabled
|| !ftraced_trigger
)
886 stop_machine(__ftrace_update_code
, NULL
, NULL
);
891 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
893 struct ftrace_page
*pg
;
897 /* allocate a few pages */
898 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
899 if (!ftrace_pages_start
)
903 * Allocate a few more pages.
905 * TODO: have some parser search vmlinux before
906 * final linking to find all calls to ftrace.
908 * a) know how many pages to allocate.
910 * b) set up the table then.
912 * The dynamic code is still necessary for
916 pg
= ftrace_pages
= ftrace_pages_start
;
918 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
919 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
922 for (i
= 0; i
< cnt
; i
++) {
923 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
925 /* If we fail, we'll try later anyway */
936 FTRACE_ITER_FILTER
= (1 << 0),
937 FTRACE_ITER_CONT
= (1 << 1),
938 FTRACE_ITER_NOTRACE
= (1 << 2),
939 FTRACE_ITER_FAILURES
= (1 << 3),
942 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
944 struct ftrace_iterator
{
946 struct ftrace_page
*pg
;
949 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
955 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
957 struct ftrace_iterator
*iter
= m
->private;
958 struct dyn_ftrace
*rec
= NULL
;
962 /* should not be called from interrupt context */
963 spin_lock(&ftrace_lock
);
965 if (iter
->idx
>= iter
->pg
->index
) {
966 if (iter
->pg
->next
) {
967 iter
->pg
= iter
->pg
->next
;
972 rec
= &iter
->pg
->records
[iter
->idx
++];
973 if ((rec
->flags
& FTRACE_FL_FREE
) ||
975 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
976 (rec
->flags
& FTRACE_FL_FAILED
)) ||
978 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
979 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
981 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
982 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
987 spin_unlock(&ftrace_lock
);
994 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
996 struct ftrace_iterator
*iter
= m
->private;
1000 if (*pos
!= iter
->pos
) {
1001 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
1005 p
= t_next(m
, p
, &l
);
1011 static void t_stop(struct seq_file
*m
, void *p
)
1015 static int t_show(struct seq_file
*m
, void *v
)
1017 struct dyn_ftrace
*rec
= v
;
1018 char str
[KSYM_SYMBOL_LEN
];
1023 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1025 seq_printf(m
, "%s\n", str
);
1030 static struct seq_operations show_ftrace_seq_ops
= {
1038 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
1040 struct ftrace_iterator
*iter
;
1043 if (unlikely(ftrace_disabled
))
1046 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1050 iter
->pg
= ftrace_pages_start
;
1053 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1055 struct seq_file
*m
= file
->private_data
;
1065 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
1067 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1068 struct ftrace_iterator
*iter
= m
->private;
1070 seq_release(inode
, file
);
1077 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
1081 struct ftrace_iterator
*iter
;
1083 ret
= ftrace_avail_open(inode
, file
);
1085 m
= (struct seq_file
*)file
->private_data
;
1086 iter
= (struct ftrace_iterator
*)m
->private;
1087 iter
->flags
= FTRACE_ITER_FAILURES
;
1094 static void ftrace_filter_reset(int enable
)
1096 struct ftrace_page
*pg
;
1097 struct dyn_ftrace
*rec
;
1098 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1101 /* should not be called from interrupt context */
1102 spin_lock(&ftrace_lock
);
1104 ftrace_filtered
= 0;
1105 pg
= ftrace_pages_start
;
1107 for (i
= 0; i
< pg
->index
; i
++) {
1108 rec
= &pg
->records
[i
];
1109 if (rec
->flags
& FTRACE_FL_FAILED
)
1111 rec
->flags
&= ~type
;
1115 spin_unlock(&ftrace_lock
);
1119 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
1121 struct ftrace_iterator
*iter
;
1124 if (unlikely(ftrace_disabled
))
1127 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1131 mutex_lock(&ftrace_regex_lock
);
1132 if ((file
->f_mode
& FMODE_WRITE
) &&
1133 !(file
->f_flags
& O_APPEND
))
1134 ftrace_filter_reset(enable
);
1136 if (file
->f_mode
& FMODE_READ
) {
1137 iter
->pg
= ftrace_pages_start
;
1139 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
1140 FTRACE_ITER_NOTRACE
;
1142 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1144 struct seq_file
*m
= file
->private_data
;
1149 file
->private_data
= iter
;
1150 mutex_unlock(&ftrace_regex_lock
);
1156 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1158 return ftrace_regex_open(inode
, file
, 1);
1162 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1164 return ftrace_regex_open(inode
, file
, 0);
1168 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1169 size_t cnt
, loff_t
*ppos
)
1171 if (file
->f_mode
& FMODE_READ
)
1172 return seq_read(file
, ubuf
, cnt
, ppos
);
1178 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1182 if (file
->f_mode
& FMODE_READ
)
1183 ret
= seq_lseek(file
, offset
, origin
);
1185 file
->f_pos
= ret
= 1;
1198 ftrace_match(unsigned char *buff
, int len
, int enable
)
1200 char str
[KSYM_SYMBOL_LEN
];
1201 char *search
= NULL
;
1202 struct ftrace_page
*pg
;
1203 struct dyn_ftrace
*rec
;
1204 int type
= MATCH_FULL
;
1205 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1206 unsigned i
, match
= 0, search_len
= 0;
1208 for (i
= 0; i
< len
; i
++) {
1209 if (buff
[i
] == '*') {
1211 search
= buff
+ i
+ 1;
1212 type
= MATCH_END_ONLY
;
1213 search_len
= len
- (i
+ 1);
1215 if (type
== MATCH_END_ONLY
) {
1216 type
= MATCH_MIDDLE_ONLY
;
1219 type
= MATCH_FRONT_ONLY
;
1227 /* should not be called from interrupt context */
1228 spin_lock(&ftrace_lock
);
1230 ftrace_filtered
= 1;
1231 pg
= ftrace_pages_start
;
1233 for (i
= 0; i
< pg
->index
; i
++) {
1237 rec
= &pg
->records
[i
];
1238 if (rec
->flags
& FTRACE_FL_FAILED
)
1240 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1243 if (strcmp(str
, buff
) == 0)
1246 case MATCH_FRONT_ONLY
:
1247 if (memcmp(str
, buff
, match
) == 0)
1250 case MATCH_MIDDLE_ONLY
:
1251 if (strstr(str
, search
))
1254 case MATCH_END_ONLY
:
1255 ptr
= strstr(str
, search
);
1256 if (ptr
&& (ptr
[search_len
] == 0))
1265 spin_unlock(&ftrace_lock
);
1269 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1270 size_t cnt
, loff_t
*ppos
, int enable
)
1272 struct ftrace_iterator
*iter
;
1277 if (!cnt
|| cnt
< 0)
1280 mutex_lock(&ftrace_regex_lock
);
1282 if (file
->f_mode
& FMODE_READ
) {
1283 struct seq_file
*m
= file
->private_data
;
1286 iter
= file
->private_data
;
1289 iter
->flags
&= ~FTRACE_ITER_CONT
;
1290 iter
->buffer_idx
= 0;
1293 ret
= get_user(ch
, ubuf
++);
1299 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1300 /* skip white space */
1301 while (cnt
&& isspace(ch
)) {
1302 ret
= get_user(ch
, ubuf
++);
1310 file
->f_pos
+= read
;
1315 iter
->buffer_idx
= 0;
1318 while (cnt
&& !isspace(ch
)) {
1319 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1320 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1325 ret
= get_user(ch
, ubuf
++);
1334 iter
->buffer
[iter
->buffer_idx
] = 0;
1335 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1336 iter
->buffer_idx
= 0;
1338 iter
->flags
|= FTRACE_ITER_CONT
;
1341 file
->f_pos
+= read
;
1345 mutex_unlock(&ftrace_regex_lock
);
1351 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1352 size_t cnt
, loff_t
*ppos
)
1354 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1358 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1359 size_t cnt
, loff_t
*ppos
)
1361 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1365 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1367 if (unlikely(ftrace_disabled
))
1370 mutex_lock(&ftrace_regex_lock
);
1372 ftrace_filter_reset(enable
);
1374 ftrace_match(buf
, len
, enable
);
1375 mutex_unlock(&ftrace_regex_lock
);
1379 * ftrace_set_filter - set a function to filter on in ftrace
1380 * @buf - the string that holds the function filter text.
1381 * @len - the length of the string.
1382 * @reset - non zero to reset all filters before applying this filter.
1384 * Filters denote which functions should be enabled when tracing is enabled.
1385 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1387 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1389 ftrace_set_regex(buf
, len
, reset
, 1);
1393 * ftrace_set_notrace - set a function to not trace in ftrace
1394 * @buf - the string that holds the function notrace text.
1395 * @len - the length of the string.
1396 * @reset - non zero to reset all filters before applying this filter.
1398 * Notrace Filters denote which functions should not be enabled when tracing
1399 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1402 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1404 ftrace_set_regex(buf
, len
, reset
, 0);
1408 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1410 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1411 struct ftrace_iterator
*iter
;
1413 mutex_lock(&ftrace_regex_lock
);
1414 if (file
->f_mode
& FMODE_READ
) {
1417 seq_release(inode
, file
);
1419 iter
= file
->private_data
;
1421 if (iter
->buffer_idx
) {
1423 iter
->buffer
[iter
->buffer_idx
] = 0;
1424 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1427 mutex_lock(&ftrace_sysctl_lock
);
1428 mutex_lock(&ftraced_lock
);
1429 if (iter
->filtered
&& ftraced_suspend
&& ftrace_enabled
)
1430 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1431 mutex_unlock(&ftraced_lock
);
1432 mutex_unlock(&ftrace_sysctl_lock
);
1435 mutex_unlock(&ftrace_regex_lock
);
1440 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1442 return ftrace_regex_release(inode
, file
, 1);
1446 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1448 return ftrace_regex_release(inode
, file
, 0);
1452 ftraced_read(struct file
*filp
, char __user
*ubuf
,
1453 size_t cnt
, loff_t
*ppos
)
1455 /* don't worry about races */
1456 char *buf
= ftraced_stop
? "disabled\n" : "enabled\n";
1457 int r
= strlen(buf
);
1459 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1463 ftraced_write(struct file
*filp
, const char __user
*ubuf
,
1464 size_t cnt
, loff_t
*ppos
)
1470 if (cnt
>= sizeof(buf
))
1473 if (copy_from_user(&buf
, ubuf
, cnt
))
1476 if (strncmp(buf
, "enable", 6) == 0)
1478 else if (strncmp(buf
, "disable", 7) == 0)
1483 ret
= strict_strtoul(buf
, 10, &val
);
1491 ftrace_enable_daemon();
1493 ftrace_disable_daemon();
1500 static struct file_operations ftrace_avail_fops
= {
1501 .open
= ftrace_avail_open
,
1503 .llseek
= seq_lseek
,
1504 .release
= ftrace_avail_release
,
1507 static struct file_operations ftrace_failures_fops
= {
1508 .open
= ftrace_failures_open
,
1510 .llseek
= seq_lseek
,
1511 .release
= ftrace_avail_release
,
1514 static struct file_operations ftrace_filter_fops
= {
1515 .open
= ftrace_filter_open
,
1516 .read
= ftrace_regex_read
,
1517 .write
= ftrace_filter_write
,
1518 .llseek
= ftrace_regex_lseek
,
1519 .release
= ftrace_filter_release
,
1522 static struct file_operations ftrace_notrace_fops
= {
1523 .open
= ftrace_notrace_open
,
1524 .read
= ftrace_regex_read
,
1525 .write
= ftrace_notrace_write
,
1526 .llseek
= ftrace_regex_lseek
,
1527 .release
= ftrace_notrace_release
,
1530 static struct file_operations ftraced_fops
= {
1531 .open
= tracing_open_generic
,
1532 .read
= ftraced_read
,
1533 .write
= ftraced_write
,
1537 * ftrace_force_update - force an update to all recording ftrace functions
1539 int ftrace_force_update(void)
1543 if (unlikely(ftrace_disabled
))
1546 mutex_lock(&ftrace_sysctl_lock
);
1547 mutex_lock(&ftraced_lock
);
1550 * If ftraced_trigger is not set, then there is nothing
1553 if (ftraced_trigger
&& !ftrace_update_code())
1556 mutex_unlock(&ftraced_lock
);
1557 mutex_unlock(&ftrace_sysctl_lock
);
1562 static __init
int ftrace_init_debugfs(void)
1564 struct dentry
*d_tracer
;
1565 struct dentry
*entry
;
1567 d_tracer
= tracing_init_dentry();
1569 entry
= debugfs_create_file("available_filter_functions", 0444,
1570 d_tracer
, NULL
, &ftrace_avail_fops
);
1572 pr_warning("Could not create debugfs "
1573 "'available_filter_functions' entry\n");
1575 entry
= debugfs_create_file("failures", 0444,
1576 d_tracer
, NULL
, &ftrace_failures_fops
);
1578 pr_warning("Could not create debugfs 'failures' entry\n");
1580 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1581 NULL
, &ftrace_filter_fops
);
1583 pr_warning("Could not create debugfs "
1584 "'set_ftrace_filter' entry\n");
1586 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1587 NULL
, &ftrace_notrace_fops
);
1589 pr_warning("Could not create debugfs "
1590 "'set_ftrace_notrace' entry\n");
1592 entry
= debugfs_create_file("ftraced_enabled", 0644, d_tracer
,
1593 NULL
, &ftraced_fops
);
1595 pr_warning("Could not create debugfs "
1596 "'ftraced_enabled' entry\n");
1600 fs_initcall(ftrace_init_debugfs
);
1602 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1603 static int ftrace_convert_nops(unsigned long *start
,
1608 unsigned long flags
;
1612 addr
= ftrace_call_adjust(*p
++);
1613 /* should not be called from interrupt context */
1614 spin_lock(&ftrace_lock
);
1615 ftrace_record_ip(addr
);
1616 spin_unlock(&ftrace_lock
);
1617 ftrace_shutdown_replenish();
1621 local_irq_save(flags
);
1622 __ftrace_update_code(p
);
1623 local_irq_restore(flags
);
1628 void ftrace_init_module(unsigned long *start
, unsigned long *end
)
1630 if (ftrace_disabled
|| start
== end
)
1632 ftrace_convert_nops(start
, end
);
1635 extern unsigned long __start_mcount_loc
[];
1636 extern unsigned long __stop_mcount_loc
[];
1638 void __init
ftrace_init(void)
1640 unsigned long count
, addr
, flags
;
1643 /* Keep the ftrace pointer to the stub */
1644 addr
= (unsigned long)ftrace_stub
;
1646 local_irq_save(flags
);
1647 ftrace_dyn_arch_init(&addr
);
1648 local_irq_restore(flags
);
1650 /* ftrace_dyn_arch_init places the return code in addr */
1654 count
= __stop_mcount_loc
- __start_mcount_loc
;
1656 ret
= ftrace_dyn_table_alloc(count
);
1660 last_ftrace_enabled
= ftrace_enabled
= 1;
1662 ret
= ftrace_convert_nops(__start_mcount_loc
,
1667 ftrace_disabled
= 1;
1669 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1671 static void ftrace_release_hash(unsigned long start
, unsigned long end
)
1673 struct dyn_ftrace
*rec
;
1674 struct hlist_node
*t
, *n
;
1675 struct hlist_head
*head
, temp_list
;
1676 unsigned long flags
;
1679 preempt_disable_notrace();
1681 /* disable incase we call something that calls mcount */
1682 cpu
= raw_smp_processor_id();
1683 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)++;
1685 ftrace_hash_lock(flags
);
1687 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
1688 INIT_HLIST_HEAD(&temp_list
);
1689 head
= &ftrace_hash
[i
];
1691 /* all CPUS are stopped, we are safe to modify code */
1692 hlist_for_each_entry_safe(rec
, t
, n
, head
, node
) {
1693 if (rec
->flags
& FTRACE_FL_FREE
)
1696 if ((rec
->ip
>= start
) && (rec
->ip
< end
))
1697 ftrace_free_rec(rec
);
1701 ftrace_hash_unlock(flags
);
1703 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)--;
1704 preempt_enable_notrace();
1708 static int ftraced(void *ignore
)
1710 unsigned long usecs
;
1712 while (!kthread_should_stop()) {
1714 set_current_state(TASK_INTERRUPTIBLE
);
1716 /* check once a second */
1717 schedule_timeout(HZ
);
1719 if (unlikely(ftrace_disabled
))
1722 mutex_lock(&ftrace_sysctl_lock
);
1723 mutex_lock(&ftraced_lock
);
1724 if (!ftraced_suspend
&& !ftraced_stop
&&
1725 ftrace_update_code()) {
1726 usecs
= nsecs_to_usecs(ftrace_update_time
);
1727 if (ftrace_update_tot_cnt
> 100000) {
1728 ftrace_update_tot_cnt
= 0;
1729 pr_info("hm, dftrace overflow: %lu change%s"
1730 " (%lu total) in %lu usec%s\n",
1732 ftrace_update_cnt
!= 1 ? "s" : "",
1733 ftrace_update_tot_cnt
,
1734 usecs
, usecs
!= 1 ? "s" : "");
1735 FTRACE_WARN_ON_ONCE(1);
1738 mutex_unlock(&ftraced_lock
);
1739 mutex_unlock(&ftrace_sysctl_lock
);
1741 ftrace_shutdown_replenish();
1743 __set_current_state(TASK_RUNNING
);
1747 static int __init
ftrace_dynamic_init(void)
1749 struct task_struct
*p
;
1753 addr
= (unsigned long)ftrace_record_ip
;
1755 stop_machine(ftrace_dyn_arch_init
, &addr
, NULL
);
1757 /* ftrace_dyn_arch_init places the return code in addr */
1763 ret
= ftrace_dyn_table_alloc(NR_TO_INIT
);
1767 p
= kthread_run(ftraced
, NULL
, "ftraced");
1773 last_ftrace_enabled
= ftrace_enabled
= 1;
1779 ftrace_disabled
= 1;
1783 core_initcall(ftrace_dynamic_init
);
1784 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1787 # define ftrace_startup() do { } while (0)
1788 # define ftrace_shutdown() do { } while (0)
1789 # define ftrace_startup_sysctl() do { } while (0)
1790 # define ftrace_shutdown_sysctl() do { } while (0)
1791 #endif /* CONFIG_DYNAMIC_FTRACE */
1794 * ftrace_kill - kill ftrace
1796 * This function should be used by panic code. It stops ftrace
1797 * but in a not so nice way. If you need to simply kill ftrace
1798 * from a non-atomic section, use ftrace_kill.
1800 void ftrace_kill(void)
1802 ftrace_disabled
= 1;
1804 #ifdef CONFIG_DYNAMIC_FTRACE
1805 ftraced_suspend
= -1;
1807 clear_ftrace_function();
1811 * register_ftrace_function - register a function for profiling
1812 * @ops - ops structure that holds the function for profiling.
1814 * Register a function to be called by all functions in the
1817 * Note: @ops->func and all the functions it calls must be labeled
1818 * with "notrace", otherwise it will go into a
1821 int register_ftrace_function(struct ftrace_ops
*ops
)
1825 if (unlikely(ftrace_disabled
))
1828 mutex_lock(&ftrace_sysctl_lock
);
1829 ret
= __register_ftrace_function(ops
);
1831 mutex_unlock(&ftrace_sysctl_lock
);
1837 * unregister_ftrace_function - unresgister a function for profiling.
1838 * @ops - ops structure that holds the function to unregister
1840 * Unregister a function that was added to be called by ftrace profiling.
1842 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1846 mutex_lock(&ftrace_sysctl_lock
);
1847 ret
= __unregister_ftrace_function(ops
);
1849 mutex_unlock(&ftrace_sysctl_lock
);
1855 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1856 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1861 if (unlikely(ftrace_disabled
))
1864 mutex_lock(&ftrace_sysctl_lock
);
1866 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1868 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1871 last_ftrace_enabled
= ftrace_enabled
;
1873 if (ftrace_enabled
) {
1875 ftrace_startup_sysctl();
1877 /* we are starting ftrace again */
1878 if (ftrace_list
!= &ftrace_list_end
) {
1879 if (ftrace_list
->next
== &ftrace_list_end
)
1880 ftrace_trace_function
= ftrace_list
->func
;
1882 ftrace_trace_function
= ftrace_list_func
;
1886 /* stopping ftrace calls (just send to ftrace_stub) */
1887 ftrace_trace_function
= ftrace_stub
;
1889 ftrace_shutdown_sysctl();
1893 mutex_unlock(&ftrace_sysctl_lock
);