]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/trace/ftrace.c
b08996ca561ddaab57397fe3e3806739f0e71a5d
[mirror_ubuntu-hirsute-kernel.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 #define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41 #define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
50
51 /*
52 * ftrace_disabled is set when an anomaly is discovered.
53 * ftrace_disabled is much stronger than ftrace_enabled.
54 */
55 static int ftrace_disabled __read_mostly;
56
57 static DEFINE_SPINLOCK(ftrace_lock);
58 static DEFINE_MUTEX(ftrace_sysctl_lock);
59
60 static struct ftrace_ops ftrace_list_end __read_mostly =
61 {
62 .func = ftrace_stub,
63 };
64
65 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
66 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
67
68 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
69 {
70 struct ftrace_ops *op = ftrace_list;
71
72 /* in case someone actually ports this to alpha! */
73 read_barrier_depends();
74
75 while (op != &ftrace_list_end) {
76 /* silly alpha */
77 read_barrier_depends();
78 op->func(ip, parent_ip);
79 op = op->next;
80 };
81 }
82
83 /**
84 * clear_ftrace_function - reset the ftrace function
85 *
86 * This NULLs the ftrace function and in essence stops
87 * tracing. There may be lag
88 */
89 void clear_ftrace_function(void)
90 {
91 ftrace_trace_function = ftrace_stub;
92 }
93
94 static int __register_ftrace_function(struct ftrace_ops *ops)
95 {
96 /* should not be called from interrupt context */
97 spin_lock(&ftrace_lock);
98
99 ops->next = ftrace_list;
100 /*
101 * We are entering ops into the ftrace_list but another
102 * CPU might be walking that list. We need to make sure
103 * the ops->next pointer is valid before another CPU sees
104 * the ops pointer included into the ftrace_list.
105 */
106 smp_wmb();
107 ftrace_list = ops;
108
109 if (ftrace_enabled) {
110 /*
111 * For one func, simply call it directly.
112 * For more than one func, call the chain.
113 */
114 if (ops->next == &ftrace_list_end)
115 ftrace_trace_function = ops->func;
116 else
117 ftrace_trace_function = ftrace_list_func;
118 }
119
120 spin_unlock(&ftrace_lock);
121
122 return 0;
123 }
124
125 static int __unregister_ftrace_function(struct ftrace_ops *ops)
126 {
127 struct ftrace_ops **p;
128 int ret = 0;
129
130 /* should not be called from interrupt context */
131 spin_lock(&ftrace_lock);
132
133 /*
134 * If we are removing the last function, then simply point
135 * to the ftrace_stub.
136 */
137 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
138 ftrace_trace_function = ftrace_stub;
139 ftrace_list = &ftrace_list_end;
140 goto out;
141 }
142
143 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
144 if (*p == ops)
145 break;
146
147 if (*p != ops) {
148 ret = -1;
149 goto out;
150 }
151
152 *p = (*p)->next;
153
154 if (ftrace_enabled) {
155 /* If we only have one func left, then call that directly */
156 if (ftrace_list == &ftrace_list_end ||
157 ftrace_list->next == &ftrace_list_end)
158 ftrace_trace_function = ftrace_list->func;
159 }
160
161 out:
162 spin_unlock(&ftrace_lock);
163
164 return ret;
165 }
166
167 #ifdef CONFIG_DYNAMIC_FTRACE
168
169 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
170 /*
171 * The hash lock is only needed when the recording of the mcount
172 * callers are dynamic. That is, by the caller themselves and
173 * not recorded via the compilation.
174 */
175 static DEFINE_SPINLOCK(ftrace_hash_lock);
176 #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
177 #define ftrace_hash_unlock(flags) \
178 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
179 static void ftrace_release_hash(unsigned long start, unsigned long end);
180 #else
181 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
182 #define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
183 #define ftrace_hash_unlock(flags) do { } while(0)
184 static inline void ftrace_release_hash(unsigned long start, unsigned long end)
185 {
186 }
187 #endif
188
189 /*
190 * Since MCOUNT_ADDR may point to mcount itself, we do not want
191 * to get it confused by reading a reference in the code as we
192 * are parsing on objcopy output of text. Use a variable for
193 * it instead.
194 */
195 static unsigned long mcount_addr = MCOUNT_ADDR;
196
197 static struct task_struct *ftraced_task;
198
199 enum {
200 FTRACE_ENABLE_CALLS = (1 << 0),
201 FTRACE_DISABLE_CALLS = (1 << 1),
202 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
203 FTRACE_ENABLE_MCOUNT = (1 << 3),
204 FTRACE_DISABLE_MCOUNT = (1 << 4),
205 };
206
207 static int ftrace_filtered;
208 static int tracing_on;
209 static int frozen_record_count;
210
211 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
212
213 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
214
215 static DEFINE_MUTEX(ftraced_lock);
216 static DEFINE_MUTEX(ftrace_regex_lock);
217
218 struct ftrace_page {
219 struct ftrace_page *next;
220 unsigned long index;
221 struct dyn_ftrace records[];
222 };
223
224 #define ENTRIES_PER_PAGE \
225 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
226
227 /* estimate from running different kernels */
228 #define NR_TO_INIT 10000
229
230 static struct ftrace_page *ftrace_pages_start;
231 static struct ftrace_page *ftrace_pages;
232
233 static int ftraced_trigger;
234 static int ftraced_suspend;
235 static int ftraced_stop;
236
237 static int ftrace_record_suspend;
238
239 static struct dyn_ftrace *ftrace_free_records;
240
241
242 #ifdef CONFIG_KPROBES
243 static inline void freeze_record(struct dyn_ftrace *rec)
244 {
245 if (!(rec->flags & FTRACE_FL_FROZEN)) {
246 rec->flags |= FTRACE_FL_FROZEN;
247 frozen_record_count++;
248 }
249 }
250
251 static inline void unfreeze_record(struct dyn_ftrace *rec)
252 {
253 if (rec->flags & FTRACE_FL_FROZEN) {
254 rec->flags &= ~FTRACE_FL_FROZEN;
255 frozen_record_count--;
256 }
257 }
258
259 static inline int record_frozen(struct dyn_ftrace *rec)
260 {
261 return rec->flags & FTRACE_FL_FROZEN;
262 }
263 #else
264 # define freeze_record(rec) ({ 0; })
265 # define unfreeze_record(rec) ({ 0; })
266 # define record_frozen(rec) ({ 0; })
267 #endif /* CONFIG_KPROBES */
268
269 int skip_trace(unsigned long ip)
270 {
271 unsigned long fl;
272 struct dyn_ftrace *rec;
273 struct hlist_node *t;
274 struct hlist_head *head;
275
276 if (frozen_record_count == 0)
277 return 0;
278
279 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
280 hlist_for_each_entry_rcu(rec, t, head, node) {
281 if (rec->ip == ip) {
282 if (record_frozen(rec)) {
283 if (rec->flags & FTRACE_FL_FAILED)
284 return 1;
285
286 if (!(rec->flags & FTRACE_FL_CONVERTED))
287 return 1;
288
289 if (!tracing_on || !ftrace_enabled)
290 return 1;
291
292 if (ftrace_filtered) {
293 fl = rec->flags & (FTRACE_FL_FILTER |
294 FTRACE_FL_NOTRACE);
295 if (!fl || (fl & FTRACE_FL_NOTRACE))
296 return 1;
297 }
298 }
299 break;
300 }
301 }
302
303 return 0;
304 }
305
306 static inline int
307 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
308 {
309 struct dyn_ftrace *p;
310 struct hlist_node *t;
311 int found = 0;
312
313 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
314 if (p->ip == ip) {
315 found = 1;
316 break;
317 }
318 }
319
320 return found;
321 }
322
323 static inline void
324 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
325 {
326 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
327 }
328
329 /* called from kstop_machine */
330 static inline void ftrace_del_hash(struct dyn_ftrace *node)
331 {
332 hlist_del(&node->node);
333 }
334
335 static void ftrace_free_rec(struct dyn_ftrace *rec)
336 {
337 rec->ip = (unsigned long)ftrace_free_records;
338 ftrace_free_records = rec;
339 rec->flags |= FTRACE_FL_FREE;
340 }
341
342 void ftrace_release(void *start, unsigned long size)
343 {
344 struct dyn_ftrace *rec;
345 struct ftrace_page *pg;
346 unsigned long s = (unsigned long)start;
347 unsigned long e = s + size;
348 int i;
349
350 if (ftrace_disabled || !start)
351 return;
352
353 /* should not be called from interrupt context */
354 spin_lock(&ftrace_lock);
355
356 for (pg = ftrace_pages_start; pg; pg = pg->next) {
357 for (i = 0; i < pg->index; i++) {
358 rec = &pg->records[i];
359
360 if ((rec->ip >= s) && (rec->ip < e))
361 ftrace_free_rec(rec);
362 }
363 }
364 spin_unlock(&ftrace_lock);
365
366 ftrace_release_hash(s, e);
367 }
368
369 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
370 {
371 struct dyn_ftrace *rec;
372
373 /* First check for freed records */
374 if (ftrace_free_records) {
375 rec = ftrace_free_records;
376
377 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
378 FTRACE_WARN_ON_ONCE(1);
379 ftrace_free_records = NULL;
380 return NULL;
381 }
382
383 ftrace_free_records = (void *)rec->ip;
384 memset(rec, 0, sizeof(*rec));
385 return rec;
386 }
387
388 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
389 if (!ftrace_pages->next)
390 return NULL;
391 ftrace_pages = ftrace_pages->next;
392 }
393
394 return &ftrace_pages->records[ftrace_pages->index++];
395 }
396
397 static void
398 ftrace_record_ip(unsigned long ip)
399 {
400 struct dyn_ftrace *node;
401 unsigned long flags;
402 unsigned long key;
403 int resched;
404 int cpu;
405
406 if (!ftrace_enabled || ftrace_disabled)
407 return;
408
409 resched = need_resched();
410 preempt_disable_notrace();
411
412 /*
413 * We simply need to protect against recursion.
414 * Use the the raw version of smp_processor_id and not
415 * __get_cpu_var which can call debug hooks that can
416 * cause a recursive crash here.
417 */
418 cpu = raw_smp_processor_id();
419 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
420 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
421 goto out;
422
423 if (unlikely(ftrace_record_suspend))
424 goto out;
425
426 key = hash_long(ip, FTRACE_HASHBITS);
427
428 FTRACE_WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
429
430 if (ftrace_ip_in_hash(ip, key))
431 goto out;
432
433 ftrace_hash_lock(flags);
434
435 /* This ip may have hit the hash before the lock */
436 if (ftrace_ip_in_hash(ip, key))
437 goto out_unlock;
438
439 node = ftrace_alloc_dyn_node(ip);
440 if (!node)
441 goto out_unlock;
442
443 node->ip = ip;
444
445 ftrace_add_hash(node, key);
446
447 ftraced_trigger = 1;
448
449 out_unlock:
450 ftrace_hash_unlock(flags);
451 out:
452 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
453
454 /* prevent recursion with scheduler */
455 if (resched)
456 preempt_enable_no_resched_notrace();
457 else
458 preempt_enable_notrace();
459 }
460
461 #define FTRACE_ADDR ((long)(ftrace_caller))
462
463 static int
464 __ftrace_replace_code(struct dyn_ftrace *rec,
465 unsigned char *old, unsigned char *new, int enable)
466 {
467 unsigned long ip, fl;
468
469 ip = rec->ip;
470
471 if (ftrace_filtered && enable) {
472 /*
473 * If filtering is on:
474 *
475 * If this record is set to be filtered and
476 * is enabled then do nothing.
477 *
478 * If this record is set to be filtered and
479 * it is not enabled, enable it.
480 *
481 * If this record is not set to be filtered
482 * and it is not enabled do nothing.
483 *
484 * If this record is set not to trace then
485 * do nothing.
486 *
487 * If this record is set not to trace and
488 * it is enabled then disable it.
489 *
490 * If this record is not set to be filtered and
491 * it is enabled, disable it.
492 */
493
494 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
495 FTRACE_FL_ENABLED);
496
497 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
498 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
499 !fl || (fl == FTRACE_FL_NOTRACE))
500 return 0;
501
502 /*
503 * If it is enabled disable it,
504 * otherwise enable it!
505 */
506 if (fl & FTRACE_FL_ENABLED) {
507 /* swap new and old */
508 new = old;
509 old = ftrace_call_replace(ip, FTRACE_ADDR);
510 rec->flags &= ~FTRACE_FL_ENABLED;
511 } else {
512 new = ftrace_call_replace(ip, FTRACE_ADDR);
513 rec->flags |= FTRACE_FL_ENABLED;
514 }
515 } else {
516
517 if (enable) {
518 /*
519 * If this record is set not to trace and is
520 * not enabled, do nothing.
521 */
522 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
523 if (fl == FTRACE_FL_NOTRACE)
524 return 0;
525
526 new = ftrace_call_replace(ip, FTRACE_ADDR);
527 } else
528 old = ftrace_call_replace(ip, FTRACE_ADDR);
529
530 if (enable) {
531 if (rec->flags & FTRACE_FL_ENABLED)
532 return 0;
533 rec->flags |= FTRACE_FL_ENABLED;
534 } else {
535 if (!(rec->flags & FTRACE_FL_ENABLED))
536 return 0;
537 rec->flags &= ~FTRACE_FL_ENABLED;
538 }
539 }
540
541 return ftrace_modify_code(ip, old, new);
542 }
543
544 static void ftrace_replace_code(int enable)
545 {
546 int i, failed;
547 unsigned char *new = NULL, *old = NULL;
548 struct dyn_ftrace *rec;
549 struct ftrace_page *pg;
550
551 if (enable)
552 old = ftrace_nop_replace();
553 else
554 new = ftrace_nop_replace();
555
556 for (pg = ftrace_pages_start; pg; pg = pg->next) {
557 for (i = 0; i < pg->index; i++) {
558 rec = &pg->records[i];
559
560 /* don't modify code that has already faulted */
561 if (rec->flags & FTRACE_FL_FAILED)
562 continue;
563
564 /* ignore updates to this record's mcount site */
565 if (get_kprobe((void *)rec->ip)) {
566 freeze_record(rec);
567 continue;
568 } else {
569 unfreeze_record(rec);
570 }
571
572 failed = __ftrace_replace_code(rec, old, new, enable);
573 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
574 rec->flags |= FTRACE_FL_FAILED;
575 if ((system_state == SYSTEM_BOOTING) ||
576 !core_kernel_text(rec->ip)) {
577 ftrace_del_hash(rec);
578 ftrace_free_rec(rec);
579 }
580 }
581 }
582 }
583 }
584
585 static void ftrace_shutdown_replenish(void)
586 {
587 if (ftrace_pages->next)
588 return;
589
590 /* allocate another page */
591 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
592 }
593
594 static void print_ip_ins(const char *fmt, unsigned char *p)
595 {
596 int i;
597
598 printk(KERN_CONT "%s", fmt);
599
600 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
601 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
602 }
603
604 static int
605 ftrace_code_disable(struct dyn_ftrace *rec)
606 {
607 unsigned long ip;
608 unsigned char *nop, *call;
609 int ret;
610
611 ip = rec->ip;
612
613 nop = ftrace_nop_replace();
614 call = ftrace_call_replace(ip, mcount_addr);
615
616 ret = ftrace_modify_code(ip, call, nop);
617 if (ret) {
618 switch (ret) {
619 case -EFAULT:
620 FTRACE_WARN_ON_ONCE(1);
621 pr_info("ftrace faulted on modifying ");
622 print_ip_sym(ip);
623 break;
624 case -EINVAL:
625 FTRACE_WARN_ON_ONCE(1);
626 pr_info("ftrace failed to modify ");
627 print_ip_sym(ip);
628 print_ip_ins(" expected: ", call);
629 print_ip_ins(" actual: ", (unsigned char *)ip);
630 print_ip_ins(" replace: ", nop);
631 printk(KERN_CONT "\n");
632 break;
633 case -EPERM:
634 FTRACE_WARN_ON_ONCE(1);
635 pr_info("ftrace faulted on writing ");
636 print_ip_sym(ip);
637 break;
638 default:
639 FTRACE_WARN_ON_ONCE(1);
640 pr_info("ftrace faulted on unknown error ");
641 print_ip_sym(ip);
642 }
643
644 rec->flags |= FTRACE_FL_FAILED;
645 return 0;
646 }
647 return 1;
648 }
649
650 static int __ftrace_update_code(void *ignore);
651
652 static int __ftrace_modify_code(void *data)
653 {
654 unsigned long addr;
655 int *command = data;
656
657 if (*command & FTRACE_ENABLE_CALLS) {
658 /*
659 * Update any recorded ips now that we have the
660 * machine stopped
661 */
662 __ftrace_update_code(NULL);
663 ftrace_replace_code(1);
664 tracing_on = 1;
665 } else if (*command & FTRACE_DISABLE_CALLS) {
666 ftrace_replace_code(0);
667 tracing_on = 0;
668 }
669
670 if (*command & FTRACE_UPDATE_TRACE_FUNC)
671 ftrace_update_ftrace_func(ftrace_trace_function);
672
673 if (*command & FTRACE_ENABLE_MCOUNT) {
674 addr = (unsigned long)ftrace_record_ip;
675 ftrace_mcount_set(&addr);
676 } else if (*command & FTRACE_DISABLE_MCOUNT) {
677 addr = (unsigned long)ftrace_stub;
678 ftrace_mcount_set(&addr);
679 }
680
681 return 0;
682 }
683
684 static void ftrace_run_update_code(int command)
685 {
686 stop_machine(__ftrace_modify_code, &command, NULL);
687 }
688
689 void ftrace_disable_daemon(void)
690 {
691 /* Stop the daemon from calling kstop_machine */
692 mutex_lock(&ftraced_lock);
693 ftraced_stop = 1;
694 mutex_unlock(&ftraced_lock);
695
696 ftrace_force_update();
697 }
698
699 void ftrace_enable_daemon(void)
700 {
701 mutex_lock(&ftraced_lock);
702 ftraced_stop = 0;
703 mutex_unlock(&ftraced_lock);
704
705 ftrace_force_update();
706 }
707
708 static ftrace_func_t saved_ftrace_func;
709
710 static void ftrace_startup(void)
711 {
712 int command = 0;
713
714 if (unlikely(ftrace_disabled))
715 return;
716
717 mutex_lock(&ftraced_lock);
718 ftraced_suspend++;
719 if (ftraced_suspend == 1)
720 command |= FTRACE_ENABLE_CALLS;
721
722 if (saved_ftrace_func != ftrace_trace_function) {
723 saved_ftrace_func = ftrace_trace_function;
724 command |= FTRACE_UPDATE_TRACE_FUNC;
725 }
726
727 if (!command || !ftrace_enabled)
728 goto out;
729
730 ftrace_run_update_code(command);
731 out:
732 mutex_unlock(&ftraced_lock);
733 }
734
735 static void ftrace_shutdown(void)
736 {
737 int command = 0;
738
739 if (unlikely(ftrace_disabled))
740 return;
741
742 mutex_lock(&ftraced_lock);
743 ftraced_suspend--;
744 if (!ftraced_suspend)
745 command |= FTRACE_DISABLE_CALLS;
746
747 if (saved_ftrace_func != ftrace_trace_function) {
748 saved_ftrace_func = ftrace_trace_function;
749 command |= FTRACE_UPDATE_TRACE_FUNC;
750 }
751
752 if (!command || !ftrace_enabled)
753 goto out;
754
755 ftrace_run_update_code(command);
756 out:
757 mutex_unlock(&ftraced_lock);
758 }
759
760 static void ftrace_startup_sysctl(void)
761 {
762 int command = FTRACE_ENABLE_MCOUNT;
763
764 if (unlikely(ftrace_disabled))
765 return;
766
767 mutex_lock(&ftraced_lock);
768 /* Force update next time */
769 saved_ftrace_func = NULL;
770 /* ftraced_suspend is true if we want ftrace running */
771 if (ftraced_suspend)
772 command |= FTRACE_ENABLE_CALLS;
773
774 ftrace_run_update_code(command);
775 mutex_unlock(&ftraced_lock);
776 }
777
778 static void ftrace_shutdown_sysctl(void)
779 {
780 int command = FTRACE_DISABLE_MCOUNT;
781
782 if (unlikely(ftrace_disabled))
783 return;
784
785 mutex_lock(&ftraced_lock);
786 /* ftraced_suspend is true if ftrace is running */
787 if (ftraced_suspend)
788 command |= FTRACE_DISABLE_CALLS;
789
790 ftrace_run_update_code(command);
791 mutex_unlock(&ftraced_lock);
792 }
793
794 static cycle_t ftrace_update_time;
795 static unsigned long ftrace_update_cnt;
796 unsigned long ftrace_update_tot_cnt;
797
798 static int __ftrace_update_code(void *ignore)
799 {
800 int i, save_ftrace_enabled;
801 cycle_t start, stop;
802 struct dyn_ftrace *p;
803 struct hlist_node *t, *n;
804 struct hlist_head *head, temp_list;
805
806 /* Don't be recording funcs now */
807 ftrace_record_suspend++;
808 save_ftrace_enabled = ftrace_enabled;
809 ftrace_enabled = 0;
810
811 start = ftrace_now(raw_smp_processor_id());
812 ftrace_update_cnt = 0;
813
814 /* No locks needed, the machine is stopped! */
815 for (i = 0; i < FTRACE_HASHSIZE; i++) {
816 INIT_HLIST_HEAD(&temp_list);
817 head = &ftrace_hash[i];
818
819 /* all CPUS are stopped, we are safe to modify code */
820 hlist_for_each_entry_safe(p, t, n, head, node) {
821 /* Skip over failed records which have not been
822 * freed. */
823 if (p->flags & FTRACE_FL_FAILED)
824 continue;
825
826 /* Unconverted records are always at the head of the
827 * hash bucket. Once we encounter a converted record,
828 * simply skip over to the next bucket. Saves ftraced
829 * some processor cycles (ftrace does its bid for
830 * global warming :-p ). */
831 if (p->flags & (FTRACE_FL_CONVERTED))
832 break;
833
834 /* Ignore updates to this record's mcount site.
835 * Reintroduce this record at the head of this
836 * bucket to attempt to "convert" it again if
837 * the kprobe on it is unregistered before the
838 * next run. */
839 if (get_kprobe((void *)p->ip)) {
840 ftrace_del_hash(p);
841 INIT_HLIST_NODE(&p->node);
842 hlist_add_head(&p->node, &temp_list);
843 freeze_record(p);
844 continue;
845 } else {
846 unfreeze_record(p);
847 }
848
849 /* convert record (i.e, patch mcount-call with NOP) */
850 if (ftrace_code_disable(p)) {
851 p->flags |= FTRACE_FL_CONVERTED;
852 ftrace_update_cnt++;
853 } else {
854 if ((system_state == SYSTEM_BOOTING) ||
855 !core_kernel_text(p->ip)) {
856 ftrace_del_hash(p);
857 ftrace_free_rec(p);
858 }
859 }
860 }
861
862 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
863 hlist_del(&p->node);
864 INIT_HLIST_NODE(&p->node);
865 hlist_add_head(&p->node, head);
866 }
867 }
868
869 stop = ftrace_now(raw_smp_processor_id());
870 ftrace_update_time = stop - start;
871 ftrace_update_tot_cnt += ftrace_update_cnt;
872 ftraced_trigger = 0;
873
874 ftrace_enabled = save_ftrace_enabled;
875 ftrace_record_suspend--;
876
877 return 0;
878 }
879
880 static int ftrace_update_code(void)
881 {
882 if (unlikely(ftrace_disabled) ||
883 !ftrace_enabled || !ftraced_trigger)
884 return 0;
885
886 stop_machine(__ftrace_update_code, NULL, NULL);
887
888 return 1;
889 }
890
891 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
892 {
893 struct ftrace_page *pg;
894 int cnt;
895 int i;
896
897 /* allocate a few pages */
898 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
899 if (!ftrace_pages_start)
900 return -1;
901
902 /*
903 * Allocate a few more pages.
904 *
905 * TODO: have some parser search vmlinux before
906 * final linking to find all calls to ftrace.
907 * Then we can:
908 * a) know how many pages to allocate.
909 * and/or
910 * b) set up the table then.
911 *
912 * The dynamic code is still necessary for
913 * modules.
914 */
915
916 pg = ftrace_pages = ftrace_pages_start;
917
918 cnt = num_to_init / ENTRIES_PER_PAGE;
919 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
920 num_to_init, cnt);
921
922 for (i = 0; i < cnt; i++) {
923 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
924
925 /* If we fail, we'll try later anyway */
926 if (!pg->next)
927 break;
928
929 pg = pg->next;
930 }
931
932 return 0;
933 }
934
935 enum {
936 FTRACE_ITER_FILTER = (1 << 0),
937 FTRACE_ITER_CONT = (1 << 1),
938 FTRACE_ITER_NOTRACE = (1 << 2),
939 FTRACE_ITER_FAILURES = (1 << 3),
940 };
941
942 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
943
944 struct ftrace_iterator {
945 loff_t pos;
946 struct ftrace_page *pg;
947 unsigned idx;
948 unsigned flags;
949 unsigned char buffer[FTRACE_BUFF_MAX+1];
950 unsigned buffer_idx;
951 unsigned filtered;
952 };
953
954 static void *
955 t_next(struct seq_file *m, void *v, loff_t *pos)
956 {
957 struct ftrace_iterator *iter = m->private;
958 struct dyn_ftrace *rec = NULL;
959
960 (*pos)++;
961
962 /* should not be called from interrupt context */
963 spin_lock(&ftrace_lock);
964 retry:
965 if (iter->idx >= iter->pg->index) {
966 if (iter->pg->next) {
967 iter->pg = iter->pg->next;
968 iter->idx = 0;
969 goto retry;
970 }
971 } else {
972 rec = &iter->pg->records[iter->idx++];
973 if ((rec->flags & FTRACE_FL_FREE) ||
974
975 (!(iter->flags & FTRACE_ITER_FAILURES) &&
976 (rec->flags & FTRACE_FL_FAILED)) ||
977
978 ((iter->flags & FTRACE_ITER_FAILURES) &&
979 !(rec->flags & FTRACE_FL_FAILED)) ||
980
981 ((iter->flags & FTRACE_ITER_NOTRACE) &&
982 !(rec->flags & FTRACE_FL_NOTRACE))) {
983 rec = NULL;
984 goto retry;
985 }
986 }
987 spin_unlock(&ftrace_lock);
988
989 iter->pos = *pos;
990
991 return rec;
992 }
993
994 static void *t_start(struct seq_file *m, loff_t *pos)
995 {
996 struct ftrace_iterator *iter = m->private;
997 void *p = NULL;
998 loff_t l = -1;
999
1000 if (*pos != iter->pos) {
1001 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
1002 ;
1003 } else {
1004 l = *pos;
1005 p = t_next(m, p, &l);
1006 }
1007
1008 return p;
1009 }
1010
1011 static void t_stop(struct seq_file *m, void *p)
1012 {
1013 }
1014
1015 static int t_show(struct seq_file *m, void *v)
1016 {
1017 struct dyn_ftrace *rec = v;
1018 char str[KSYM_SYMBOL_LEN];
1019
1020 if (!rec)
1021 return 0;
1022
1023 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1024
1025 seq_printf(m, "%s\n", str);
1026
1027 return 0;
1028 }
1029
1030 static struct seq_operations show_ftrace_seq_ops = {
1031 .start = t_start,
1032 .next = t_next,
1033 .stop = t_stop,
1034 .show = t_show,
1035 };
1036
1037 static int
1038 ftrace_avail_open(struct inode *inode, struct file *file)
1039 {
1040 struct ftrace_iterator *iter;
1041 int ret;
1042
1043 if (unlikely(ftrace_disabled))
1044 return -ENODEV;
1045
1046 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1047 if (!iter)
1048 return -ENOMEM;
1049
1050 iter->pg = ftrace_pages_start;
1051 iter->pos = -1;
1052
1053 ret = seq_open(file, &show_ftrace_seq_ops);
1054 if (!ret) {
1055 struct seq_file *m = file->private_data;
1056
1057 m->private = iter;
1058 } else {
1059 kfree(iter);
1060 }
1061
1062 return ret;
1063 }
1064
1065 int ftrace_avail_release(struct inode *inode, struct file *file)
1066 {
1067 struct seq_file *m = (struct seq_file *)file->private_data;
1068 struct ftrace_iterator *iter = m->private;
1069
1070 seq_release(inode, file);
1071 kfree(iter);
1072
1073 return 0;
1074 }
1075
1076 static int
1077 ftrace_failures_open(struct inode *inode, struct file *file)
1078 {
1079 int ret;
1080 struct seq_file *m;
1081 struct ftrace_iterator *iter;
1082
1083 ret = ftrace_avail_open(inode, file);
1084 if (!ret) {
1085 m = (struct seq_file *)file->private_data;
1086 iter = (struct ftrace_iterator *)m->private;
1087 iter->flags = FTRACE_ITER_FAILURES;
1088 }
1089
1090 return ret;
1091 }
1092
1093
1094 static void ftrace_filter_reset(int enable)
1095 {
1096 struct ftrace_page *pg;
1097 struct dyn_ftrace *rec;
1098 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1099 unsigned i;
1100
1101 /* should not be called from interrupt context */
1102 spin_lock(&ftrace_lock);
1103 if (enable)
1104 ftrace_filtered = 0;
1105 pg = ftrace_pages_start;
1106 while (pg) {
1107 for (i = 0; i < pg->index; i++) {
1108 rec = &pg->records[i];
1109 if (rec->flags & FTRACE_FL_FAILED)
1110 continue;
1111 rec->flags &= ~type;
1112 }
1113 pg = pg->next;
1114 }
1115 spin_unlock(&ftrace_lock);
1116 }
1117
1118 static int
1119 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1120 {
1121 struct ftrace_iterator *iter;
1122 int ret = 0;
1123
1124 if (unlikely(ftrace_disabled))
1125 return -ENODEV;
1126
1127 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1128 if (!iter)
1129 return -ENOMEM;
1130
1131 mutex_lock(&ftrace_regex_lock);
1132 if ((file->f_mode & FMODE_WRITE) &&
1133 !(file->f_flags & O_APPEND))
1134 ftrace_filter_reset(enable);
1135
1136 if (file->f_mode & FMODE_READ) {
1137 iter->pg = ftrace_pages_start;
1138 iter->pos = -1;
1139 iter->flags = enable ? FTRACE_ITER_FILTER :
1140 FTRACE_ITER_NOTRACE;
1141
1142 ret = seq_open(file, &show_ftrace_seq_ops);
1143 if (!ret) {
1144 struct seq_file *m = file->private_data;
1145 m->private = iter;
1146 } else
1147 kfree(iter);
1148 } else
1149 file->private_data = iter;
1150 mutex_unlock(&ftrace_regex_lock);
1151
1152 return ret;
1153 }
1154
1155 static int
1156 ftrace_filter_open(struct inode *inode, struct file *file)
1157 {
1158 return ftrace_regex_open(inode, file, 1);
1159 }
1160
1161 static int
1162 ftrace_notrace_open(struct inode *inode, struct file *file)
1163 {
1164 return ftrace_regex_open(inode, file, 0);
1165 }
1166
1167 static ssize_t
1168 ftrace_regex_read(struct file *file, char __user *ubuf,
1169 size_t cnt, loff_t *ppos)
1170 {
1171 if (file->f_mode & FMODE_READ)
1172 return seq_read(file, ubuf, cnt, ppos);
1173 else
1174 return -EPERM;
1175 }
1176
1177 static loff_t
1178 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1179 {
1180 loff_t ret;
1181
1182 if (file->f_mode & FMODE_READ)
1183 ret = seq_lseek(file, offset, origin);
1184 else
1185 file->f_pos = ret = 1;
1186
1187 return ret;
1188 }
1189
1190 enum {
1191 MATCH_FULL,
1192 MATCH_FRONT_ONLY,
1193 MATCH_MIDDLE_ONLY,
1194 MATCH_END_ONLY,
1195 };
1196
1197 static void
1198 ftrace_match(unsigned char *buff, int len, int enable)
1199 {
1200 char str[KSYM_SYMBOL_LEN];
1201 char *search = NULL;
1202 struct ftrace_page *pg;
1203 struct dyn_ftrace *rec;
1204 int type = MATCH_FULL;
1205 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1206 unsigned i, match = 0, search_len = 0;
1207
1208 for (i = 0; i < len; i++) {
1209 if (buff[i] == '*') {
1210 if (!i) {
1211 search = buff + i + 1;
1212 type = MATCH_END_ONLY;
1213 search_len = len - (i + 1);
1214 } else {
1215 if (type == MATCH_END_ONLY) {
1216 type = MATCH_MIDDLE_ONLY;
1217 } else {
1218 match = i;
1219 type = MATCH_FRONT_ONLY;
1220 }
1221 buff[i] = 0;
1222 break;
1223 }
1224 }
1225 }
1226
1227 /* should not be called from interrupt context */
1228 spin_lock(&ftrace_lock);
1229 if (enable)
1230 ftrace_filtered = 1;
1231 pg = ftrace_pages_start;
1232 while (pg) {
1233 for (i = 0; i < pg->index; i++) {
1234 int matched = 0;
1235 char *ptr;
1236
1237 rec = &pg->records[i];
1238 if (rec->flags & FTRACE_FL_FAILED)
1239 continue;
1240 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1241 switch (type) {
1242 case MATCH_FULL:
1243 if (strcmp(str, buff) == 0)
1244 matched = 1;
1245 break;
1246 case MATCH_FRONT_ONLY:
1247 if (memcmp(str, buff, match) == 0)
1248 matched = 1;
1249 break;
1250 case MATCH_MIDDLE_ONLY:
1251 if (strstr(str, search))
1252 matched = 1;
1253 break;
1254 case MATCH_END_ONLY:
1255 ptr = strstr(str, search);
1256 if (ptr && (ptr[search_len] == 0))
1257 matched = 1;
1258 break;
1259 }
1260 if (matched)
1261 rec->flags |= flag;
1262 }
1263 pg = pg->next;
1264 }
1265 spin_unlock(&ftrace_lock);
1266 }
1267
1268 static ssize_t
1269 ftrace_regex_write(struct file *file, const char __user *ubuf,
1270 size_t cnt, loff_t *ppos, int enable)
1271 {
1272 struct ftrace_iterator *iter;
1273 char ch;
1274 size_t read = 0;
1275 ssize_t ret;
1276
1277 if (!cnt || cnt < 0)
1278 return 0;
1279
1280 mutex_lock(&ftrace_regex_lock);
1281
1282 if (file->f_mode & FMODE_READ) {
1283 struct seq_file *m = file->private_data;
1284 iter = m->private;
1285 } else
1286 iter = file->private_data;
1287
1288 if (!*ppos) {
1289 iter->flags &= ~FTRACE_ITER_CONT;
1290 iter->buffer_idx = 0;
1291 }
1292
1293 ret = get_user(ch, ubuf++);
1294 if (ret)
1295 goto out;
1296 read++;
1297 cnt--;
1298
1299 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1300 /* skip white space */
1301 while (cnt && isspace(ch)) {
1302 ret = get_user(ch, ubuf++);
1303 if (ret)
1304 goto out;
1305 read++;
1306 cnt--;
1307 }
1308
1309 if (isspace(ch)) {
1310 file->f_pos += read;
1311 ret = read;
1312 goto out;
1313 }
1314
1315 iter->buffer_idx = 0;
1316 }
1317
1318 while (cnt && !isspace(ch)) {
1319 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1320 iter->buffer[iter->buffer_idx++] = ch;
1321 else {
1322 ret = -EINVAL;
1323 goto out;
1324 }
1325 ret = get_user(ch, ubuf++);
1326 if (ret)
1327 goto out;
1328 read++;
1329 cnt--;
1330 }
1331
1332 if (isspace(ch)) {
1333 iter->filtered++;
1334 iter->buffer[iter->buffer_idx] = 0;
1335 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1336 iter->buffer_idx = 0;
1337 } else
1338 iter->flags |= FTRACE_ITER_CONT;
1339
1340
1341 file->f_pos += read;
1342
1343 ret = read;
1344 out:
1345 mutex_unlock(&ftrace_regex_lock);
1346
1347 return ret;
1348 }
1349
1350 static ssize_t
1351 ftrace_filter_write(struct file *file, const char __user *ubuf,
1352 size_t cnt, loff_t *ppos)
1353 {
1354 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1355 }
1356
1357 static ssize_t
1358 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1359 size_t cnt, loff_t *ppos)
1360 {
1361 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1362 }
1363
1364 static void
1365 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1366 {
1367 if (unlikely(ftrace_disabled))
1368 return;
1369
1370 mutex_lock(&ftrace_regex_lock);
1371 if (reset)
1372 ftrace_filter_reset(enable);
1373 if (buf)
1374 ftrace_match(buf, len, enable);
1375 mutex_unlock(&ftrace_regex_lock);
1376 }
1377
1378 /**
1379 * ftrace_set_filter - set a function to filter on in ftrace
1380 * @buf - the string that holds the function filter text.
1381 * @len - the length of the string.
1382 * @reset - non zero to reset all filters before applying this filter.
1383 *
1384 * Filters denote which functions should be enabled when tracing is enabled.
1385 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1386 */
1387 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1388 {
1389 ftrace_set_regex(buf, len, reset, 1);
1390 }
1391
1392 /**
1393 * ftrace_set_notrace - set a function to not trace in ftrace
1394 * @buf - the string that holds the function notrace text.
1395 * @len - the length of the string.
1396 * @reset - non zero to reset all filters before applying this filter.
1397 *
1398 * Notrace Filters denote which functions should not be enabled when tracing
1399 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1400 * for tracing.
1401 */
1402 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1403 {
1404 ftrace_set_regex(buf, len, reset, 0);
1405 }
1406
1407 static int
1408 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1409 {
1410 struct seq_file *m = (struct seq_file *)file->private_data;
1411 struct ftrace_iterator *iter;
1412
1413 mutex_lock(&ftrace_regex_lock);
1414 if (file->f_mode & FMODE_READ) {
1415 iter = m->private;
1416
1417 seq_release(inode, file);
1418 } else
1419 iter = file->private_data;
1420
1421 if (iter->buffer_idx) {
1422 iter->filtered++;
1423 iter->buffer[iter->buffer_idx] = 0;
1424 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1425 }
1426
1427 mutex_lock(&ftrace_sysctl_lock);
1428 mutex_lock(&ftraced_lock);
1429 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1430 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1431 mutex_unlock(&ftraced_lock);
1432 mutex_unlock(&ftrace_sysctl_lock);
1433
1434 kfree(iter);
1435 mutex_unlock(&ftrace_regex_lock);
1436 return 0;
1437 }
1438
1439 static int
1440 ftrace_filter_release(struct inode *inode, struct file *file)
1441 {
1442 return ftrace_regex_release(inode, file, 1);
1443 }
1444
1445 static int
1446 ftrace_notrace_release(struct inode *inode, struct file *file)
1447 {
1448 return ftrace_regex_release(inode, file, 0);
1449 }
1450
1451 static ssize_t
1452 ftraced_read(struct file *filp, char __user *ubuf,
1453 size_t cnt, loff_t *ppos)
1454 {
1455 /* don't worry about races */
1456 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1457 int r = strlen(buf);
1458
1459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1460 }
1461
1462 static ssize_t
1463 ftraced_write(struct file *filp, const char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1465 {
1466 char buf[64];
1467 long val;
1468 int ret;
1469
1470 if (cnt >= sizeof(buf))
1471 return -EINVAL;
1472
1473 if (copy_from_user(&buf, ubuf, cnt))
1474 return -EFAULT;
1475
1476 if (strncmp(buf, "enable", 6) == 0)
1477 val = 1;
1478 else if (strncmp(buf, "disable", 7) == 0)
1479 val = 0;
1480 else {
1481 buf[cnt] = 0;
1482
1483 ret = strict_strtoul(buf, 10, &val);
1484 if (ret < 0)
1485 return ret;
1486
1487 val = !!val;
1488 }
1489
1490 if (val)
1491 ftrace_enable_daemon();
1492 else
1493 ftrace_disable_daemon();
1494
1495 filp->f_pos += cnt;
1496
1497 return cnt;
1498 }
1499
1500 static struct file_operations ftrace_avail_fops = {
1501 .open = ftrace_avail_open,
1502 .read = seq_read,
1503 .llseek = seq_lseek,
1504 .release = ftrace_avail_release,
1505 };
1506
1507 static struct file_operations ftrace_failures_fops = {
1508 .open = ftrace_failures_open,
1509 .read = seq_read,
1510 .llseek = seq_lseek,
1511 .release = ftrace_avail_release,
1512 };
1513
1514 static struct file_operations ftrace_filter_fops = {
1515 .open = ftrace_filter_open,
1516 .read = ftrace_regex_read,
1517 .write = ftrace_filter_write,
1518 .llseek = ftrace_regex_lseek,
1519 .release = ftrace_filter_release,
1520 };
1521
1522 static struct file_operations ftrace_notrace_fops = {
1523 .open = ftrace_notrace_open,
1524 .read = ftrace_regex_read,
1525 .write = ftrace_notrace_write,
1526 .llseek = ftrace_regex_lseek,
1527 .release = ftrace_notrace_release,
1528 };
1529
1530 static struct file_operations ftraced_fops = {
1531 .open = tracing_open_generic,
1532 .read = ftraced_read,
1533 .write = ftraced_write,
1534 };
1535
1536 /**
1537 * ftrace_force_update - force an update to all recording ftrace functions
1538 */
1539 int ftrace_force_update(void)
1540 {
1541 int ret = 0;
1542
1543 if (unlikely(ftrace_disabled))
1544 return -ENODEV;
1545
1546 mutex_lock(&ftrace_sysctl_lock);
1547 mutex_lock(&ftraced_lock);
1548
1549 /*
1550 * If ftraced_trigger is not set, then there is nothing
1551 * to update.
1552 */
1553 if (ftraced_trigger && !ftrace_update_code())
1554 ret = -EBUSY;
1555
1556 mutex_unlock(&ftraced_lock);
1557 mutex_unlock(&ftrace_sysctl_lock);
1558
1559 return ret;
1560 }
1561
1562 static __init int ftrace_init_debugfs(void)
1563 {
1564 struct dentry *d_tracer;
1565 struct dentry *entry;
1566
1567 d_tracer = tracing_init_dentry();
1568
1569 entry = debugfs_create_file("available_filter_functions", 0444,
1570 d_tracer, NULL, &ftrace_avail_fops);
1571 if (!entry)
1572 pr_warning("Could not create debugfs "
1573 "'available_filter_functions' entry\n");
1574
1575 entry = debugfs_create_file("failures", 0444,
1576 d_tracer, NULL, &ftrace_failures_fops);
1577 if (!entry)
1578 pr_warning("Could not create debugfs 'failures' entry\n");
1579
1580 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1581 NULL, &ftrace_filter_fops);
1582 if (!entry)
1583 pr_warning("Could not create debugfs "
1584 "'set_ftrace_filter' entry\n");
1585
1586 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1587 NULL, &ftrace_notrace_fops);
1588 if (!entry)
1589 pr_warning("Could not create debugfs "
1590 "'set_ftrace_notrace' entry\n");
1591
1592 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1593 NULL, &ftraced_fops);
1594 if (!entry)
1595 pr_warning("Could not create debugfs "
1596 "'ftraced_enabled' entry\n");
1597 return 0;
1598 }
1599
1600 fs_initcall(ftrace_init_debugfs);
1601
1602 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1603 static int ftrace_convert_nops(unsigned long *start,
1604 unsigned long *end)
1605 {
1606 unsigned long *p;
1607 unsigned long addr;
1608 unsigned long flags;
1609
1610 p = start;
1611 while (p < end) {
1612 addr = ftrace_call_adjust(*p++);
1613 /* should not be called from interrupt context */
1614 spin_lock(&ftrace_lock);
1615 ftrace_record_ip(addr);
1616 spin_unlock(&ftrace_lock);
1617 ftrace_shutdown_replenish();
1618 }
1619
1620 /* p is ignored */
1621 local_irq_save(flags);
1622 __ftrace_update_code(p);
1623 local_irq_restore(flags);
1624
1625 return 0;
1626 }
1627
1628 void ftrace_init_module(unsigned long *start, unsigned long *end)
1629 {
1630 if (ftrace_disabled || start == end)
1631 return;
1632 ftrace_convert_nops(start, end);
1633 }
1634
1635 extern unsigned long __start_mcount_loc[];
1636 extern unsigned long __stop_mcount_loc[];
1637
1638 void __init ftrace_init(void)
1639 {
1640 unsigned long count, addr, flags;
1641 int ret;
1642
1643 /* Keep the ftrace pointer to the stub */
1644 addr = (unsigned long)ftrace_stub;
1645
1646 local_irq_save(flags);
1647 ftrace_dyn_arch_init(&addr);
1648 local_irq_restore(flags);
1649
1650 /* ftrace_dyn_arch_init places the return code in addr */
1651 if (addr)
1652 goto failed;
1653
1654 count = __stop_mcount_loc - __start_mcount_loc;
1655
1656 ret = ftrace_dyn_table_alloc(count);
1657 if (ret)
1658 goto failed;
1659
1660 last_ftrace_enabled = ftrace_enabled = 1;
1661
1662 ret = ftrace_convert_nops(__start_mcount_loc,
1663 __stop_mcount_loc);
1664
1665 return;
1666 failed:
1667 ftrace_disabled = 1;
1668 }
1669 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1670
1671 static void ftrace_release_hash(unsigned long start, unsigned long end)
1672 {
1673 struct dyn_ftrace *rec;
1674 struct hlist_node *t, *n;
1675 struct hlist_head *head, temp_list;
1676 unsigned long flags;
1677 int i, cpu;
1678
1679 preempt_disable_notrace();
1680
1681 /* disable incase we call something that calls mcount */
1682 cpu = raw_smp_processor_id();
1683 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1684
1685 ftrace_hash_lock(flags);
1686
1687 for (i = 0; i < FTRACE_HASHSIZE; i++) {
1688 INIT_HLIST_HEAD(&temp_list);
1689 head = &ftrace_hash[i];
1690
1691 /* all CPUS are stopped, we are safe to modify code */
1692 hlist_for_each_entry_safe(rec, t, n, head, node) {
1693 if (rec->flags & FTRACE_FL_FREE)
1694 continue;
1695
1696 if ((rec->ip >= start) && (rec->ip < end))
1697 ftrace_free_rec(rec);
1698 }
1699 }
1700
1701 ftrace_hash_unlock(flags);
1702
1703 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1704 preempt_enable_notrace();
1705
1706 }
1707
1708 static int ftraced(void *ignore)
1709 {
1710 unsigned long usecs;
1711
1712 while (!kthread_should_stop()) {
1713
1714 set_current_state(TASK_INTERRUPTIBLE);
1715
1716 /* check once a second */
1717 schedule_timeout(HZ);
1718
1719 if (unlikely(ftrace_disabled))
1720 continue;
1721
1722 mutex_lock(&ftrace_sysctl_lock);
1723 mutex_lock(&ftraced_lock);
1724 if (!ftraced_suspend && !ftraced_stop &&
1725 ftrace_update_code()) {
1726 usecs = nsecs_to_usecs(ftrace_update_time);
1727 if (ftrace_update_tot_cnt > 100000) {
1728 ftrace_update_tot_cnt = 0;
1729 pr_info("hm, dftrace overflow: %lu change%s"
1730 " (%lu total) in %lu usec%s\n",
1731 ftrace_update_cnt,
1732 ftrace_update_cnt != 1 ? "s" : "",
1733 ftrace_update_tot_cnt,
1734 usecs, usecs != 1 ? "s" : "");
1735 FTRACE_WARN_ON_ONCE(1);
1736 }
1737 }
1738 mutex_unlock(&ftraced_lock);
1739 mutex_unlock(&ftrace_sysctl_lock);
1740
1741 ftrace_shutdown_replenish();
1742 }
1743 __set_current_state(TASK_RUNNING);
1744 return 0;
1745 }
1746
1747 static int __init ftrace_dynamic_init(void)
1748 {
1749 struct task_struct *p;
1750 unsigned long addr;
1751 int ret;
1752
1753 addr = (unsigned long)ftrace_record_ip;
1754
1755 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1756
1757 /* ftrace_dyn_arch_init places the return code in addr */
1758 if (addr) {
1759 ret = (int)addr;
1760 goto failed;
1761 }
1762
1763 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1764 if (ret)
1765 goto failed;
1766
1767 p = kthread_run(ftraced, NULL, "ftraced");
1768 if (IS_ERR(p)) {
1769 ret = -1;
1770 goto failed;
1771 }
1772
1773 last_ftrace_enabled = ftrace_enabled = 1;
1774 ftraced_task = p;
1775
1776 return 0;
1777
1778 failed:
1779 ftrace_disabled = 1;
1780 return ret;
1781 }
1782
1783 core_initcall(ftrace_dynamic_init);
1784 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1785
1786 #else
1787 # define ftrace_startup() do { } while (0)
1788 # define ftrace_shutdown() do { } while (0)
1789 # define ftrace_startup_sysctl() do { } while (0)
1790 # define ftrace_shutdown_sysctl() do { } while (0)
1791 #endif /* CONFIG_DYNAMIC_FTRACE */
1792
1793 /**
1794 * ftrace_kill - kill ftrace
1795 *
1796 * This function should be used by panic code. It stops ftrace
1797 * but in a not so nice way. If you need to simply kill ftrace
1798 * from a non-atomic section, use ftrace_kill.
1799 */
1800 void ftrace_kill(void)
1801 {
1802 ftrace_disabled = 1;
1803 ftrace_enabled = 0;
1804 #ifdef CONFIG_DYNAMIC_FTRACE
1805 ftraced_suspend = -1;
1806 #endif
1807 clear_ftrace_function();
1808 }
1809
1810 /**
1811 * register_ftrace_function - register a function for profiling
1812 * @ops - ops structure that holds the function for profiling.
1813 *
1814 * Register a function to be called by all functions in the
1815 * kernel.
1816 *
1817 * Note: @ops->func and all the functions it calls must be labeled
1818 * with "notrace", otherwise it will go into a
1819 * recursive loop.
1820 */
1821 int register_ftrace_function(struct ftrace_ops *ops)
1822 {
1823 int ret;
1824
1825 if (unlikely(ftrace_disabled))
1826 return -1;
1827
1828 mutex_lock(&ftrace_sysctl_lock);
1829 ret = __register_ftrace_function(ops);
1830 ftrace_startup();
1831 mutex_unlock(&ftrace_sysctl_lock);
1832
1833 return ret;
1834 }
1835
1836 /**
1837 * unregister_ftrace_function - unresgister a function for profiling.
1838 * @ops - ops structure that holds the function to unregister
1839 *
1840 * Unregister a function that was added to be called by ftrace profiling.
1841 */
1842 int unregister_ftrace_function(struct ftrace_ops *ops)
1843 {
1844 int ret;
1845
1846 mutex_lock(&ftrace_sysctl_lock);
1847 ret = __unregister_ftrace_function(ops);
1848 ftrace_shutdown();
1849 mutex_unlock(&ftrace_sysctl_lock);
1850
1851 return ret;
1852 }
1853
1854 int
1855 ftrace_enable_sysctl(struct ctl_table *table, int write,
1856 struct file *file, void __user *buffer, size_t *lenp,
1857 loff_t *ppos)
1858 {
1859 int ret;
1860
1861 if (unlikely(ftrace_disabled))
1862 return -ENODEV;
1863
1864 mutex_lock(&ftrace_sysctl_lock);
1865
1866 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1867
1868 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1869 goto out;
1870
1871 last_ftrace_enabled = ftrace_enabled;
1872
1873 if (ftrace_enabled) {
1874
1875 ftrace_startup_sysctl();
1876
1877 /* we are starting ftrace again */
1878 if (ftrace_list != &ftrace_list_end) {
1879 if (ftrace_list->next == &ftrace_list_end)
1880 ftrace_trace_function = ftrace_list->func;
1881 else
1882 ftrace_trace_function = ftrace_list_func;
1883 }
1884
1885 } else {
1886 /* stopping ftrace calls (just send to ftrace_stub) */
1887 ftrace_trace_function = ftrace_stub;
1888
1889 ftrace_shutdown_sysctl();
1890 }
1891
1892 out:
1893 mutex_unlock(&ftrace_sysctl_lock);
1894 return ret;
1895 }