]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/trace/ftrace.c
ftrace: break out modify loop immediately on detection of error
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 #define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41 #define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
50
51 /* set when tracing only a pid */
52 struct pid *ftrace_pid_trace;
53 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
54
55 /* Quick disabling of function tracer. */
56 int function_trace_stop;
57
58 /*
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
61 */
62 static int ftrace_disabled __read_mostly;
63
64 static DEFINE_SPINLOCK(ftrace_lock);
65 static DEFINE_MUTEX(ftrace_sysctl_lock);
66 static DEFINE_MUTEX(ftrace_start_lock);
67
68 static struct ftrace_ops ftrace_list_end __read_mostly =
69 {
70 .func = ftrace_stub,
71 };
72
73 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77
78 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 {
80 struct ftrace_ops *op = ftrace_list;
81
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
84
85 while (op != &ftrace_list_end) {
86 /* silly alpha */
87 read_barrier_depends();
88 op->func(ip, parent_ip);
89 op = op->next;
90 };
91 }
92
93 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 {
95 if (!test_tsk_trace_trace(current))
96 return;
97
98 ftrace_pid_function(ip, parent_ip);
99 }
100
101 static void set_ftrace_pid_function(ftrace_func_t func)
102 {
103 /* do not set ftrace_pid_function to itself! */
104 if (func != ftrace_pid_func)
105 ftrace_pid_function = func;
106 }
107
108 /**
109 * clear_ftrace_function - reset the ftrace function
110 *
111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
113 */
114 void clear_ftrace_function(void)
115 {
116 ftrace_trace_function = ftrace_stub;
117 __ftrace_trace_function = ftrace_stub;
118 ftrace_pid_function = ftrace_stub;
119 }
120
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 /*
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
125 */
126 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 {
128 if (function_trace_stop)
129 return;
130
131 __ftrace_trace_function(ip, parent_ip);
132 }
133 #endif
134
135 static int __register_ftrace_function(struct ftrace_ops *ops)
136 {
137 /* should not be called from interrupt context */
138 spin_lock(&ftrace_lock);
139
140 ops->next = ftrace_list;
141 /*
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
146 */
147 smp_wmb();
148 ftrace_list = ops;
149
150 if (ftrace_enabled) {
151 ftrace_func_t func;
152
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
157
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
161 }
162
163 /*
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
166 */
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
169 #else
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
172 #endif
173 }
174
175 spin_unlock(&ftrace_lock);
176
177 return 0;
178 }
179
180 static int __unregister_ftrace_function(struct ftrace_ops *ops)
181 {
182 struct ftrace_ops **p;
183 int ret = 0;
184
185 /* should not be called from interrupt context */
186 spin_lock(&ftrace_lock);
187
188 /*
189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
191 */
192 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 ftrace_trace_function = ftrace_stub;
194 ftrace_list = &ftrace_list_end;
195 goto out;
196 }
197
198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199 if (*p == ops)
200 break;
201
202 if (*p != ops) {
203 ret = -1;
204 goto out;
205 }
206
207 *p = (*p)->next;
208
209 if (ftrace_enabled) {
210 /* If we only have one func left, then call that directly */
211 if (ftrace_list->next == &ftrace_list_end) {
212 ftrace_func_t func = ftrace_list->func;
213
214 if (ftrace_pid_trace) {
215 set_ftrace_pid_function(func);
216 func = ftrace_pid_func;
217 }
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function = func;
220 #else
221 __ftrace_trace_function = func;
222 #endif
223 }
224 }
225
226 out:
227 spin_unlock(&ftrace_lock);
228
229 return ret;
230 }
231
232 static void ftrace_update_pid_func(void)
233 {
234 ftrace_func_t func;
235
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock);
238
239 if (ftrace_trace_function == ftrace_stub)
240 goto out;
241
242 func = ftrace_trace_function;
243
244 if (ftrace_pid_trace) {
245 set_ftrace_pid_function(func);
246 func = ftrace_pid_func;
247 } else {
248 if (func == ftrace_pid_func)
249 func = ftrace_pid_function;
250 }
251
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254 #else
255 __ftrace_trace_function = func;
256 #endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260 }
261
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
265 #endif
266
267 /*
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
271 * it instead.
272 */
273 static unsigned long mcount_addr = MCOUNT_ADDR;
274
275 enum {
276 FTRACE_ENABLE_CALLS = (1 << 0),
277 FTRACE_DISABLE_CALLS = (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
279 FTRACE_ENABLE_MCOUNT = (1 << 3),
280 FTRACE_DISABLE_MCOUNT = (1 << 4),
281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
283 };
284
285 static int ftrace_filtered;
286
287 static LIST_HEAD(ftrace_new_addrs);
288
289 static DEFINE_MUTEX(ftrace_regex_lock);
290
291 struct ftrace_page {
292 struct ftrace_page *next;
293 unsigned long index;
294 struct dyn_ftrace records[];
295 };
296
297 #define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
299
300 /* estimate from running different kernels */
301 #define NR_TO_INIT 10000
302
303 static struct ftrace_page *ftrace_pages_start;
304 static struct ftrace_page *ftrace_pages;
305
306 static struct dyn_ftrace *ftrace_free_records;
307
308
309 #ifdef CONFIG_KPROBES
310
311 static int frozen_record_count;
312
313 static inline void freeze_record(struct dyn_ftrace *rec)
314 {
315 if (!(rec->flags & FTRACE_FL_FROZEN)) {
316 rec->flags |= FTRACE_FL_FROZEN;
317 frozen_record_count++;
318 }
319 }
320
321 static inline void unfreeze_record(struct dyn_ftrace *rec)
322 {
323 if (rec->flags & FTRACE_FL_FROZEN) {
324 rec->flags &= ~FTRACE_FL_FROZEN;
325 frozen_record_count--;
326 }
327 }
328
329 static inline int record_frozen(struct dyn_ftrace *rec)
330 {
331 return rec->flags & FTRACE_FL_FROZEN;
332 }
333 #else
334 # define freeze_record(rec) ({ 0; })
335 # define unfreeze_record(rec) ({ 0; })
336 # define record_frozen(rec) ({ 0; })
337 #endif /* CONFIG_KPROBES */
338
339 static void ftrace_free_rec(struct dyn_ftrace *rec)
340 {
341 rec->ip = (unsigned long)ftrace_free_records;
342 ftrace_free_records = rec;
343 rec->flags |= FTRACE_FL_FREE;
344 }
345
346 void ftrace_release(void *start, unsigned long size)
347 {
348 struct dyn_ftrace *rec;
349 struct ftrace_page *pg;
350 unsigned long s = (unsigned long)start;
351 unsigned long e = s + size;
352 int i;
353
354 if (ftrace_disabled || !start)
355 return;
356
357 /* should not be called from interrupt context */
358 spin_lock(&ftrace_lock);
359
360 for (pg = ftrace_pages_start; pg; pg = pg->next) {
361 for (i = 0; i < pg->index; i++) {
362 rec = &pg->records[i];
363
364 if ((rec->ip >= s) && (rec->ip < e))
365 ftrace_free_rec(rec);
366 }
367 }
368 spin_unlock(&ftrace_lock);
369 }
370
371 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
372 {
373 struct dyn_ftrace *rec;
374
375 /* First check for freed records */
376 if (ftrace_free_records) {
377 rec = ftrace_free_records;
378
379 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
380 FTRACE_WARN_ON_ONCE(1);
381 ftrace_free_records = NULL;
382 return NULL;
383 }
384
385 ftrace_free_records = (void *)rec->ip;
386 memset(rec, 0, sizeof(*rec));
387 return rec;
388 }
389
390 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
391 if (!ftrace_pages->next) {
392 /* allocate another page */
393 ftrace_pages->next =
394 (void *)get_zeroed_page(GFP_KERNEL);
395 if (!ftrace_pages->next)
396 return NULL;
397 }
398 ftrace_pages = ftrace_pages->next;
399 }
400
401 return &ftrace_pages->records[ftrace_pages->index++];
402 }
403
404 static struct dyn_ftrace *
405 ftrace_record_ip(unsigned long ip)
406 {
407 struct dyn_ftrace *rec;
408
409 if (ftrace_disabled)
410 return NULL;
411
412 rec = ftrace_alloc_dyn_node(ip);
413 if (!rec)
414 return NULL;
415
416 rec->ip = ip;
417
418 list_add(&rec->list, &ftrace_new_addrs);
419
420 return rec;
421 }
422
423 static void print_ip_ins(const char *fmt, unsigned char *p)
424 {
425 int i;
426
427 printk(KERN_CONT "%s", fmt);
428
429 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
430 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
431 }
432
433 static void ftrace_bug(int failed, unsigned long ip)
434 {
435 switch (failed) {
436 case -EFAULT:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
439 print_ip_sym(ip);
440 break;
441 case -EINVAL:
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
444 print_ip_sym(ip);
445 print_ip_ins(" actual: ", (unsigned char *)ip);
446 printk(KERN_CONT "\n");
447 break;
448 case -EPERM:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
451 print_ip_sym(ip);
452 break;
453 default:
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
456 print_ip_sym(ip);
457 }
458 }
459
460
461 static int
462 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
463 {
464 unsigned long ip, fl;
465 unsigned long ftrace_addr;
466
467 ftrace_addr = (unsigned long)ftrace_caller;
468
469 ip = rec->ip;
470
471 /*
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
474 *
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
477 *
478 */
479 if (rec->flags & FTRACE_FL_NOTRACE) {
480 if (rec->flags & FTRACE_FL_ENABLED)
481 rec->flags &= ~FTRACE_FL_ENABLED;
482 else
483 return 0;
484
485 } else if (ftrace_filtered && enable) {
486 /*
487 * Filtering is on:
488 */
489
490 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
491
492 /* Record is filtered and enabled, do nothing */
493 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
494 return 0;
495
496 /* Record is not filtered and is not enabled do nothing */
497 if (!fl)
498 return 0;
499
500 /* Record is not filtered but enabled, disable it */
501 if (fl == FTRACE_FL_ENABLED)
502 rec->flags &= ~FTRACE_FL_ENABLED;
503 else
504 /* Otherwise record is filtered but not enabled, enable it */
505 rec->flags |= FTRACE_FL_ENABLED;
506 } else {
507 /* Disable or not filtered */
508
509 if (enable) {
510 /* if record is enabled, do nothing */
511 if (rec->flags & FTRACE_FL_ENABLED)
512 return 0;
513
514 rec->flags |= FTRACE_FL_ENABLED;
515
516 } else {
517
518 /* if record is not enabled do nothing */
519 if (!(rec->flags & FTRACE_FL_ENABLED))
520 return 0;
521
522 rec->flags &= ~FTRACE_FL_ENABLED;
523 }
524 }
525
526 if (rec->flags & FTRACE_FL_ENABLED)
527 return ftrace_make_call(rec, ftrace_addr);
528 else
529 return ftrace_make_nop(NULL, rec, ftrace_addr);
530 }
531
532 static void ftrace_replace_code(int enable)
533 {
534 int i, failed;
535 struct dyn_ftrace *rec;
536 struct ftrace_page *pg;
537
538 for (pg = ftrace_pages_start; pg; pg = pg->next) {
539 for (i = 0; i < pg->index; i++) {
540 rec = &pg->records[i];
541
542 /*
543 * Skip over free records and records that have
544 * failed.
545 */
546 if (rec->flags & FTRACE_FL_FREE ||
547 rec->flags & FTRACE_FL_FAILED)
548 continue;
549
550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec->ip)) {
552 freeze_record(rec);
553 continue;
554 } else {
555 unfreeze_record(rec);
556 }
557
558 failed = __ftrace_replace_code(rec, enable);
559 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
560 rec->flags |= FTRACE_FL_FAILED;
561 if ((system_state == SYSTEM_BOOTING) ||
562 !core_kernel_text(rec->ip)) {
563 ftrace_free_rec(rec);
564 } else {
565 ftrace_bug(failed, rec->ip);
566 /* Stop processing */
567 return;
568 }
569 }
570 }
571 }
572 }
573
574 static int
575 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
576 {
577 unsigned long ip;
578 int ret;
579
580 ip = rec->ip;
581
582 ret = ftrace_make_nop(mod, rec, mcount_addr);
583 if (ret) {
584 ftrace_bug(ret, ip);
585 rec->flags |= FTRACE_FL_FAILED;
586 return 0;
587 }
588 return 1;
589 }
590
591 /*
592 * archs can override this function if they must do something
593 * before the modifying code is performed.
594 */
595 int __weak ftrace_arch_code_modify_prepare(void)
596 {
597 return 0;
598 }
599
600 /*
601 * archs can override this function if they must do something
602 * after the modifying code is performed.
603 */
604 int __weak ftrace_arch_code_modify_post_process(void)
605 {
606 return 0;
607 }
608
609 static int __ftrace_modify_code(void *data)
610 {
611 int *command = data;
612
613 if (*command & FTRACE_ENABLE_CALLS)
614 ftrace_replace_code(1);
615 else if (*command & FTRACE_DISABLE_CALLS)
616 ftrace_replace_code(0);
617
618 if (*command & FTRACE_UPDATE_TRACE_FUNC)
619 ftrace_update_ftrace_func(ftrace_trace_function);
620
621 if (*command & FTRACE_START_FUNC_RET)
622 ftrace_enable_ftrace_graph_caller();
623 else if (*command & FTRACE_STOP_FUNC_RET)
624 ftrace_disable_ftrace_graph_caller();
625
626 return 0;
627 }
628
629 static void ftrace_run_update_code(int command)
630 {
631 int ret;
632
633 ret = ftrace_arch_code_modify_prepare();
634 FTRACE_WARN_ON(ret);
635 if (ret)
636 return;
637
638 stop_machine(__ftrace_modify_code, &command, NULL);
639
640 ret = ftrace_arch_code_modify_post_process();
641 FTRACE_WARN_ON(ret);
642 }
643
644 static ftrace_func_t saved_ftrace_func;
645 static int ftrace_start_up;
646
647 static void ftrace_startup_enable(int command)
648 {
649 if (saved_ftrace_func != ftrace_trace_function) {
650 saved_ftrace_func = ftrace_trace_function;
651 command |= FTRACE_UPDATE_TRACE_FUNC;
652 }
653
654 if (!command || !ftrace_enabled)
655 return;
656
657 ftrace_run_update_code(command);
658 }
659
660 static void ftrace_startup(int command)
661 {
662 if (unlikely(ftrace_disabled))
663 return;
664
665 mutex_lock(&ftrace_start_lock);
666 ftrace_start_up++;
667 command |= FTRACE_ENABLE_CALLS;
668
669 ftrace_startup_enable(command);
670
671 mutex_unlock(&ftrace_start_lock);
672 }
673
674 static void ftrace_shutdown(int command)
675 {
676 if (unlikely(ftrace_disabled))
677 return;
678
679 mutex_lock(&ftrace_start_lock);
680 ftrace_start_up--;
681 if (!ftrace_start_up)
682 command |= FTRACE_DISABLE_CALLS;
683
684 if (saved_ftrace_func != ftrace_trace_function) {
685 saved_ftrace_func = ftrace_trace_function;
686 command |= FTRACE_UPDATE_TRACE_FUNC;
687 }
688
689 if (!command || !ftrace_enabled)
690 goto out;
691
692 ftrace_run_update_code(command);
693 out:
694 mutex_unlock(&ftrace_start_lock);
695 }
696
697 static void ftrace_startup_sysctl(void)
698 {
699 int command = FTRACE_ENABLE_MCOUNT;
700
701 if (unlikely(ftrace_disabled))
702 return;
703
704 mutex_lock(&ftrace_start_lock);
705 /* Force update next time */
706 saved_ftrace_func = NULL;
707 /* ftrace_start_up is true if we want ftrace running */
708 if (ftrace_start_up)
709 command |= FTRACE_ENABLE_CALLS;
710
711 ftrace_run_update_code(command);
712 mutex_unlock(&ftrace_start_lock);
713 }
714
715 static void ftrace_shutdown_sysctl(void)
716 {
717 int command = FTRACE_DISABLE_MCOUNT;
718
719 if (unlikely(ftrace_disabled))
720 return;
721
722 mutex_lock(&ftrace_start_lock);
723 /* ftrace_start_up is true if ftrace is running */
724 if (ftrace_start_up)
725 command |= FTRACE_DISABLE_CALLS;
726
727 ftrace_run_update_code(command);
728 mutex_unlock(&ftrace_start_lock);
729 }
730
731 static cycle_t ftrace_update_time;
732 static unsigned long ftrace_update_cnt;
733 unsigned long ftrace_update_tot_cnt;
734
735 static int ftrace_update_code(struct module *mod)
736 {
737 struct dyn_ftrace *p, *t;
738 cycle_t start, stop;
739
740 start = ftrace_now(raw_smp_processor_id());
741 ftrace_update_cnt = 0;
742
743 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
744
745 /* If something went wrong, bail without enabling anything */
746 if (unlikely(ftrace_disabled))
747 return -1;
748
749 list_del_init(&p->list);
750
751 /* convert record (i.e, patch mcount-call with NOP) */
752 if (ftrace_code_disable(mod, p)) {
753 p->flags |= FTRACE_FL_CONVERTED;
754 ftrace_update_cnt++;
755 } else
756 ftrace_free_rec(p);
757 }
758
759 stop = ftrace_now(raw_smp_processor_id());
760 ftrace_update_time = stop - start;
761 ftrace_update_tot_cnt += ftrace_update_cnt;
762
763 return 0;
764 }
765
766 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
767 {
768 struct ftrace_page *pg;
769 int cnt;
770 int i;
771
772 /* allocate a few pages */
773 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
774 if (!ftrace_pages_start)
775 return -1;
776
777 /*
778 * Allocate a few more pages.
779 *
780 * TODO: have some parser search vmlinux before
781 * final linking to find all calls to ftrace.
782 * Then we can:
783 * a) know how many pages to allocate.
784 * and/or
785 * b) set up the table then.
786 *
787 * The dynamic code is still necessary for
788 * modules.
789 */
790
791 pg = ftrace_pages = ftrace_pages_start;
792
793 cnt = num_to_init / ENTRIES_PER_PAGE;
794 pr_info("ftrace: allocating %ld entries in %d pages\n",
795 num_to_init, cnt + 1);
796
797 for (i = 0; i < cnt; i++) {
798 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
799
800 /* If we fail, we'll try later anyway */
801 if (!pg->next)
802 break;
803
804 pg = pg->next;
805 }
806
807 return 0;
808 }
809
810 enum {
811 FTRACE_ITER_FILTER = (1 << 0),
812 FTRACE_ITER_CONT = (1 << 1),
813 FTRACE_ITER_NOTRACE = (1 << 2),
814 FTRACE_ITER_FAILURES = (1 << 3),
815 };
816
817 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
818
819 struct ftrace_iterator {
820 struct ftrace_page *pg;
821 unsigned idx;
822 unsigned flags;
823 unsigned char buffer[FTRACE_BUFF_MAX+1];
824 unsigned buffer_idx;
825 unsigned filtered;
826 };
827
828 static void *
829 t_next(struct seq_file *m, void *v, loff_t *pos)
830 {
831 struct ftrace_iterator *iter = m->private;
832 struct dyn_ftrace *rec = NULL;
833
834 (*pos)++;
835
836 /* should not be called from interrupt context */
837 spin_lock(&ftrace_lock);
838 retry:
839 if (iter->idx >= iter->pg->index) {
840 if (iter->pg->next) {
841 iter->pg = iter->pg->next;
842 iter->idx = 0;
843 goto retry;
844 } else {
845 iter->idx = -1;
846 }
847 } else {
848 rec = &iter->pg->records[iter->idx++];
849 if ((rec->flags & FTRACE_FL_FREE) ||
850
851 (!(iter->flags & FTRACE_ITER_FAILURES) &&
852 (rec->flags & FTRACE_FL_FAILED)) ||
853
854 ((iter->flags & FTRACE_ITER_FAILURES) &&
855 !(rec->flags & FTRACE_FL_FAILED)) ||
856
857 ((iter->flags & FTRACE_ITER_FILTER) &&
858 !(rec->flags & FTRACE_FL_FILTER)) ||
859
860 ((iter->flags & FTRACE_ITER_NOTRACE) &&
861 !(rec->flags & FTRACE_FL_NOTRACE))) {
862 rec = NULL;
863 goto retry;
864 }
865 }
866 spin_unlock(&ftrace_lock);
867
868 return rec;
869 }
870
871 static void *t_start(struct seq_file *m, loff_t *pos)
872 {
873 struct ftrace_iterator *iter = m->private;
874 void *p = NULL;
875
876 if (*pos > 0) {
877 if (iter->idx < 0)
878 return p;
879 (*pos)--;
880 iter->idx--;
881 }
882
883 p = t_next(m, p, pos);
884
885 return p;
886 }
887
888 static void t_stop(struct seq_file *m, void *p)
889 {
890 }
891
892 static int t_show(struct seq_file *m, void *v)
893 {
894 struct dyn_ftrace *rec = v;
895 char str[KSYM_SYMBOL_LEN];
896
897 if (!rec)
898 return 0;
899
900 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
901
902 seq_printf(m, "%s\n", str);
903
904 return 0;
905 }
906
907 static struct seq_operations show_ftrace_seq_ops = {
908 .start = t_start,
909 .next = t_next,
910 .stop = t_stop,
911 .show = t_show,
912 };
913
914 static int
915 ftrace_avail_open(struct inode *inode, struct file *file)
916 {
917 struct ftrace_iterator *iter;
918 int ret;
919
920 if (unlikely(ftrace_disabled))
921 return -ENODEV;
922
923 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
924 if (!iter)
925 return -ENOMEM;
926
927 iter->pg = ftrace_pages_start;
928
929 ret = seq_open(file, &show_ftrace_seq_ops);
930 if (!ret) {
931 struct seq_file *m = file->private_data;
932
933 m->private = iter;
934 } else {
935 kfree(iter);
936 }
937
938 return ret;
939 }
940
941 int ftrace_avail_release(struct inode *inode, struct file *file)
942 {
943 struct seq_file *m = (struct seq_file *)file->private_data;
944 struct ftrace_iterator *iter = m->private;
945
946 seq_release(inode, file);
947 kfree(iter);
948
949 return 0;
950 }
951
952 static int
953 ftrace_failures_open(struct inode *inode, struct file *file)
954 {
955 int ret;
956 struct seq_file *m;
957 struct ftrace_iterator *iter;
958
959 ret = ftrace_avail_open(inode, file);
960 if (!ret) {
961 m = (struct seq_file *)file->private_data;
962 iter = (struct ftrace_iterator *)m->private;
963 iter->flags = FTRACE_ITER_FAILURES;
964 }
965
966 return ret;
967 }
968
969
970 static void ftrace_filter_reset(int enable)
971 {
972 struct ftrace_page *pg;
973 struct dyn_ftrace *rec;
974 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
975 unsigned i;
976
977 /* should not be called from interrupt context */
978 spin_lock(&ftrace_lock);
979 if (enable)
980 ftrace_filtered = 0;
981 pg = ftrace_pages_start;
982 while (pg) {
983 for (i = 0; i < pg->index; i++) {
984 rec = &pg->records[i];
985 if (rec->flags & FTRACE_FL_FAILED)
986 continue;
987 rec->flags &= ~type;
988 }
989 pg = pg->next;
990 }
991 spin_unlock(&ftrace_lock);
992 }
993
994 static int
995 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
996 {
997 struct ftrace_iterator *iter;
998 int ret = 0;
999
1000 if (unlikely(ftrace_disabled))
1001 return -ENODEV;
1002
1003 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1004 if (!iter)
1005 return -ENOMEM;
1006
1007 mutex_lock(&ftrace_regex_lock);
1008 if ((file->f_mode & FMODE_WRITE) &&
1009 !(file->f_flags & O_APPEND))
1010 ftrace_filter_reset(enable);
1011
1012 if (file->f_mode & FMODE_READ) {
1013 iter->pg = ftrace_pages_start;
1014 iter->flags = enable ? FTRACE_ITER_FILTER :
1015 FTRACE_ITER_NOTRACE;
1016
1017 ret = seq_open(file, &show_ftrace_seq_ops);
1018 if (!ret) {
1019 struct seq_file *m = file->private_data;
1020 m->private = iter;
1021 } else
1022 kfree(iter);
1023 } else
1024 file->private_data = iter;
1025 mutex_unlock(&ftrace_regex_lock);
1026
1027 return ret;
1028 }
1029
1030 static int
1031 ftrace_filter_open(struct inode *inode, struct file *file)
1032 {
1033 return ftrace_regex_open(inode, file, 1);
1034 }
1035
1036 static int
1037 ftrace_notrace_open(struct inode *inode, struct file *file)
1038 {
1039 return ftrace_regex_open(inode, file, 0);
1040 }
1041
1042 static ssize_t
1043 ftrace_regex_read(struct file *file, char __user *ubuf,
1044 size_t cnt, loff_t *ppos)
1045 {
1046 if (file->f_mode & FMODE_READ)
1047 return seq_read(file, ubuf, cnt, ppos);
1048 else
1049 return -EPERM;
1050 }
1051
1052 static loff_t
1053 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1054 {
1055 loff_t ret;
1056
1057 if (file->f_mode & FMODE_READ)
1058 ret = seq_lseek(file, offset, origin);
1059 else
1060 file->f_pos = ret = 1;
1061
1062 return ret;
1063 }
1064
1065 enum {
1066 MATCH_FULL,
1067 MATCH_FRONT_ONLY,
1068 MATCH_MIDDLE_ONLY,
1069 MATCH_END_ONLY,
1070 };
1071
1072 static void
1073 ftrace_match(unsigned char *buff, int len, int enable)
1074 {
1075 char str[KSYM_SYMBOL_LEN];
1076 char *search = NULL;
1077 struct ftrace_page *pg;
1078 struct dyn_ftrace *rec;
1079 int type = MATCH_FULL;
1080 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1081 unsigned i, match = 0, search_len = 0;
1082 int not = 0;
1083
1084 if (buff[0] == '!') {
1085 not = 1;
1086 buff++;
1087 len--;
1088 }
1089
1090 for (i = 0; i < len; i++) {
1091 if (buff[i] == '*') {
1092 if (!i) {
1093 search = buff + i + 1;
1094 type = MATCH_END_ONLY;
1095 search_len = len - (i + 1);
1096 } else {
1097 if (type == MATCH_END_ONLY) {
1098 type = MATCH_MIDDLE_ONLY;
1099 } else {
1100 match = i;
1101 type = MATCH_FRONT_ONLY;
1102 }
1103 buff[i] = 0;
1104 break;
1105 }
1106 }
1107 }
1108
1109 /* should not be called from interrupt context */
1110 spin_lock(&ftrace_lock);
1111 if (enable)
1112 ftrace_filtered = 1;
1113 pg = ftrace_pages_start;
1114 while (pg) {
1115 for (i = 0; i < pg->index; i++) {
1116 int matched = 0;
1117 char *ptr;
1118
1119 rec = &pg->records[i];
1120 if (rec->flags & FTRACE_FL_FAILED)
1121 continue;
1122 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1123 switch (type) {
1124 case MATCH_FULL:
1125 if (strcmp(str, buff) == 0)
1126 matched = 1;
1127 break;
1128 case MATCH_FRONT_ONLY:
1129 if (memcmp(str, buff, match) == 0)
1130 matched = 1;
1131 break;
1132 case MATCH_MIDDLE_ONLY:
1133 if (strstr(str, search))
1134 matched = 1;
1135 break;
1136 case MATCH_END_ONLY:
1137 ptr = strstr(str, search);
1138 if (ptr && (ptr[search_len] == 0))
1139 matched = 1;
1140 break;
1141 }
1142 if (matched) {
1143 if (not)
1144 rec->flags &= ~flag;
1145 else
1146 rec->flags |= flag;
1147 }
1148 }
1149 pg = pg->next;
1150 }
1151 spin_unlock(&ftrace_lock);
1152 }
1153
1154 static ssize_t
1155 ftrace_regex_write(struct file *file, const char __user *ubuf,
1156 size_t cnt, loff_t *ppos, int enable)
1157 {
1158 struct ftrace_iterator *iter;
1159 char ch;
1160 size_t read = 0;
1161 ssize_t ret;
1162
1163 if (!cnt || cnt < 0)
1164 return 0;
1165
1166 mutex_lock(&ftrace_regex_lock);
1167
1168 if (file->f_mode & FMODE_READ) {
1169 struct seq_file *m = file->private_data;
1170 iter = m->private;
1171 } else
1172 iter = file->private_data;
1173
1174 if (!*ppos) {
1175 iter->flags &= ~FTRACE_ITER_CONT;
1176 iter->buffer_idx = 0;
1177 }
1178
1179 ret = get_user(ch, ubuf++);
1180 if (ret)
1181 goto out;
1182 read++;
1183 cnt--;
1184
1185 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1186 /* skip white space */
1187 while (cnt && isspace(ch)) {
1188 ret = get_user(ch, ubuf++);
1189 if (ret)
1190 goto out;
1191 read++;
1192 cnt--;
1193 }
1194
1195 if (isspace(ch)) {
1196 file->f_pos += read;
1197 ret = read;
1198 goto out;
1199 }
1200
1201 iter->buffer_idx = 0;
1202 }
1203
1204 while (cnt && !isspace(ch)) {
1205 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1206 iter->buffer[iter->buffer_idx++] = ch;
1207 else {
1208 ret = -EINVAL;
1209 goto out;
1210 }
1211 ret = get_user(ch, ubuf++);
1212 if (ret)
1213 goto out;
1214 read++;
1215 cnt--;
1216 }
1217
1218 if (isspace(ch)) {
1219 iter->filtered++;
1220 iter->buffer[iter->buffer_idx] = 0;
1221 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1222 iter->buffer_idx = 0;
1223 } else
1224 iter->flags |= FTRACE_ITER_CONT;
1225
1226
1227 file->f_pos += read;
1228
1229 ret = read;
1230 out:
1231 mutex_unlock(&ftrace_regex_lock);
1232
1233 return ret;
1234 }
1235
1236 static ssize_t
1237 ftrace_filter_write(struct file *file, const char __user *ubuf,
1238 size_t cnt, loff_t *ppos)
1239 {
1240 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1241 }
1242
1243 static ssize_t
1244 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1245 size_t cnt, loff_t *ppos)
1246 {
1247 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1248 }
1249
1250 static void
1251 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1252 {
1253 if (unlikely(ftrace_disabled))
1254 return;
1255
1256 mutex_lock(&ftrace_regex_lock);
1257 if (reset)
1258 ftrace_filter_reset(enable);
1259 if (buf)
1260 ftrace_match(buf, len, enable);
1261 mutex_unlock(&ftrace_regex_lock);
1262 }
1263
1264 /**
1265 * ftrace_set_filter - set a function to filter on in ftrace
1266 * @buf - the string that holds the function filter text.
1267 * @len - the length of the string.
1268 * @reset - non zero to reset all filters before applying this filter.
1269 *
1270 * Filters denote which functions should be enabled when tracing is enabled.
1271 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1272 */
1273 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1274 {
1275 ftrace_set_regex(buf, len, reset, 1);
1276 }
1277
1278 /**
1279 * ftrace_set_notrace - set a function to not trace in ftrace
1280 * @buf - the string that holds the function notrace text.
1281 * @len - the length of the string.
1282 * @reset - non zero to reset all filters before applying this filter.
1283 *
1284 * Notrace Filters denote which functions should not be enabled when tracing
1285 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1286 * for tracing.
1287 */
1288 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1289 {
1290 ftrace_set_regex(buf, len, reset, 0);
1291 }
1292
1293 static int
1294 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1295 {
1296 struct seq_file *m = (struct seq_file *)file->private_data;
1297 struct ftrace_iterator *iter;
1298
1299 mutex_lock(&ftrace_regex_lock);
1300 if (file->f_mode & FMODE_READ) {
1301 iter = m->private;
1302
1303 seq_release(inode, file);
1304 } else
1305 iter = file->private_data;
1306
1307 if (iter->buffer_idx) {
1308 iter->filtered++;
1309 iter->buffer[iter->buffer_idx] = 0;
1310 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1311 }
1312
1313 mutex_lock(&ftrace_sysctl_lock);
1314 mutex_lock(&ftrace_start_lock);
1315 if (ftrace_start_up && ftrace_enabled)
1316 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1317 mutex_unlock(&ftrace_start_lock);
1318 mutex_unlock(&ftrace_sysctl_lock);
1319
1320 kfree(iter);
1321 mutex_unlock(&ftrace_regex_lock);
1322 return 0;
1323 }
1324
1325 static int
1326 ftrace_filter_release(struct inode *inode, struct file *file)
1327 {
1328 return ftrace_regex_release(inode, file, 1);
1329 }
1330
1331 static int
1332 ftrace_notrace_release(struct inode *inode, struct file *file)
1333 {
1334 return ftrace_regex_release(inode, file, 0);
1335 }
1336
1337 static struct file_operations ftrace_avail_fops = {
1338 .open = ftrace_avail_open,
1339 .read = seq_read,
1340 .llseek = seq_lseek,
1341 .release = ftrace_avail_release,
1342 };
1343
1344 static struct file_operations ftrace_failures_fops = {
1345 .open = ftrace_failures_open,
1346 .read = seq_read,
1347 .llseek = seq_lseek,
1348 .release = ftrace_avail_release,
1349 };
1350
1351 static struct file_operations ftrace_filter_fops = {
1352 .open = ftrace_filter_open,
1353 .read = ftrace_regex_read,
1354 .write = ftrace_filter_write,
1355 .llseek = ftrace_regex_lseek,
1356 .release = ftrace_filter_release,
1357 };
1358
1359 static struct file_operations ftrace_notrace_fops = {
1360 .open = ftrace_notrace_open,
1361 .read = ftrace_regex_read,
1362 .write = ftrace_notrace_write,
1363 .llseek = ftrace_regex_lseek,
1364 .release = ftrace_notrace_release,
1365 };
1366
1367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368
1369 static DEFINE_MUTEX(graph_lock);
1370
1371 int ftrace_graph_count;
1372 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1373
1374 static void *
1375 g_next(struct seq_file *m, void *v, loff_t *pos)
1376 {
1377 unsigned long *array = m->private;
1378 int index = *pos;
1379
1380 (*pos)++;
1381
1382 if (index >= ftrace_graph_count)
1383 return NULL;
1384
1385 return &array[index];
1386 }
1387
1388 static void *g_start(struct seq_file *m, loff_t *pos)
1389 {
1390 void *p = NULL;
1391
1392 mutex_lock(&graph_lock);
1393
1394 p = g_next(m, p, pos);
1395
1396 return p;
1397 }
1398
1399 static void g_stop(struct seq_file *m, void *p)
1400 {
1401 mutex_unlock(&graph_lock);
1402 }
1403
1404 static int g_show(struct seq_file *m, void *v)
1405 {
1406 unsigned long *ptr = v;
1407 char str[KSYM_SYMBOL_LEN];
1408
1409 if (!ptr)
1410 return 0;
1411
1412 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1413
1414 seq_printf(m, "%s\n", str);
1415
1416 return 0;
1417 }
1418
1419 static struct seq_operations ftrace_graph_seq_ops = {
1420 .start = g_start,
1421 .next = g_next,
1422 .stop = g_stop,
1423 .show = g_show,
1424 };
1425
1426 static int
1427 ftrace_graph_open(struct inode *inode, struct file *file)
1428 {
1429 int ret = 0;
1430
1431 if (unlikely(ftrace_disabled))
1432 return -ENODEV;
1433
1434 mutex_lock(&graph_lock);
1435 if ((file->f_mode & FMODE_WRITE) &&
1436 !(file->f_flags & O_APPEND)) {
1437 ftrace_graph_count = 0;
1438 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1439 }
1440
1441 if (file->f_mode & FMODE_READ) {
1442 ret = seq_open(file, &ftrace_graph_seq_ops);
1443 if (!ret) {
1444 struct seq_file *m = file->private_data;
1445 m->private = ftrace_graph_funcs;
1446 }
1447 } else
1448 file->private_data = ftrace_graph_funcs;
1449 mutex_unlock(&graph_lock);
1450
1451 return ret;
1452 }
1453
1454 static ssize_t
1455 ftrace_graph_read(struct file *file, char __user *ubuf,
1456 size_t cnt, loff_t *ppos)
1457 {
1458 if (file->f_mode & FMODE_READ)
1459 return seq_read(file, ubuf, cnt, ppos);
1460 else
1461 return -EPERM;
1462 }
1463
1464 static int
1465 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1466 {
1467 char str[KSYM_SYMBOL_LEN];
1468 struct dyn_ftrace *rec;
1469 struct ftrace_page *pg;
1470 int found = 0;
1471 int i, j;
1472
1473 if (ftrace_disabled)
1474 return -ENODEV;
1475
1476 /* should not be called from interrupt context */
1477 spin_lock(&ftrace_lock);
1478
1479 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1480 for (i = 0; i < pg->index; i++) {
1481 rec = &pg->records[i];
1482
1483 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1484 continue;
1485
1486 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1487 if (strcmp(str, buffer) == 0) {
1488 found = 1;
1489 for (j = 0; j < idx; j++)
1490 if (array[j] == rec->ip) {
1491 found = 0;
1492 break;
1493 }
1494 if (found)
1495 array[idx] = rec->ip;
1496 break;
1497 }
1498 }
1499 }
1500 spin_unlock(&ftrace_lock);
1501
1502 return found ? 0 : -EINVAL;
1503 }
1504
1505 static ssize_t
1506 ftrace_graph_write(struct file *file, const char __user *ubuf,
1507 size_t cnt, loff_t *ppos)
1508 {
1509 unsigned char buffer[FTRACE_BUFF_MAX+1];
1510 unsigned long *array;
1511 size_t read = 0;
1512 ssize_t ret;
1513 int index = 0;
1514 char ch;
1515
1516 if (!cnt || cnt < 0)
1517 return 0;
1518
1519 mutex_lock(&graph_lock);
1520
1521 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1522 ret = -EBUSY;
1523 goto out;
1524 }
1525
1526 if (file->f_mode & FMODE_READ) {
1527 struct seq_file *m = file->private_data;
1528 array = m->private;
1529 } else
1530 array = file->private_data;
1531
1532 ret = get_user(ch, ubuf++);
1533 if (ret)
1534 goto out;
1535 read++;
1536 cnt--;
1537
1538 /* skip white space */
1539 while (cnt && isspace(ch)) {
1540 ret = get_user(ch, ubuf++);
1541 if (ret)
1542 goto out;
1543 read++;
1544 cnt--;
1545 }
1546
1547 if (isspace(ch)) {
1548 *ppos += read;
1549 ret = read;
1550 goto out;
1551 }
1552
1553 while (cnt && !isspace(ch)) {
1554 if (index < FTRACE_BUFF_MAX)
1555 buffer[index++] = ch;
1556 else {
1557 ret = -EINVAL;
1558 goto out;
1559 }
1560 ret = get_user(ch, ubuf++);
1561 if (ret)
1562 goto out;
1563 read++;
1564 cnt--;
1565 }
1566 buffer[index] = 0;
1567
1568 /* we allow only one at a time */
1569 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1570 if (ret)
1571 goto out;
1572
1573 ftrace_graph_count++;
1574
1575 file->f_pos += read;
1576
1577 ret = read;
1578 out:
1579 mutex_unlock(&graph_lock);
1580
1581 return ret;
1582 }
1583
1584 static const struct file_operations ftrace_graph_fops = {
1585 .open = ftrace_graph_open,
1586 .read = ftrace_graph_read,
1587 .write = ftrace_graph_write,
1588 };
1589 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1590
1591 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1592 {
1593 struct dentry *entry;
1594
1595 entry = debugfs_create_file("available_filter_functions", 0444,
1596 d_tracer, NULL, &ftrace_avail_fops);
1597 if (!entry)
1598 pr_warning("Could not create debugfs "
1599 "'available_filter_functions' entry\n");
1600
1601 entry = debugfs_create_file("failures", 0444,
1602 d_tracer, NULL, &ftrace_failures_fops);
1603 if (!entry)
1604 pr_warning("Could not create debugfs 'failures' entry\n");
1605
1606 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1607 NULL, &ftrace_filter_fops);
1608 if (!entry)
1609 pr_warning("Could not create debugfs "
1610 "'set_ftrace_filter' entry\n");
1611
1612 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1613 NULL, &ftrace_notrace_fops);
1614 if (!entry)
1615 pr_warning("Could not create debugfs "
1616 "'set_ftrace_notrace' entry\n");
1617
1618 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1619 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1620 NULL,
1621 &ftrace_graph_fops);
1622 if (!entry)
1623 pr_warning("Could not create debugfs "
1624 "'set_graph_function' entry\n");
1625 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1626
1627 return 0;
1628 }
1629
1630 static int ftrace_convert_nops(struct module *mod,
1631 unsigned long *start,
1632 unsigned long *end)
1633 {
1634 unsigned long *p;
1635 unsigned long addr;
1636 unsigned long flags;
1637
1638 mutex_lock(&ftrace_start_lock);
1639 p = start;
1640 while (p < end) {
1641 addr = ftrace_call_adjust(*p++);
1642 /*
1643 * Some architecture linkers will pad between
1644 * the different mcount_loc sections of different
1645 * object files to satisfy alignments.
1646 * Skip any NULL pointers.
1647 */
1648 if (!addr)
1649 continue;
1650 ftrace_record_ip(addr);
1651 }
1652
1653 /* disable interrupts to prevent kstop machine */
1654 local_irq_save(flags);
1655 ftrace_update_code(mod);
1656 local_irq_restore(flags);
1657 mutex_unlock(&ftrace_start_lock);
1658
1659 return 0;
1660 }
1661
1662 void ftrace_init_module(struct module *mod,
1663 unsigned long *start, unsigned long *end)
1664 {
1665 if (ftrace_disabled || start == end)
1666 return;
1667 ftrace_convert_nops(mod, start, end);
1668 }
1669
1670 extern unsigned long __start_mcount_loc[];
1671 extern unsigned long __stop_mcount_loc[];
1672
1673 void __init ftrace_init(void)
1674 {
1675 unsigned long count, addr, flags;
1676 int ret;
1677
1678 /* Keep the ftrace pointer to the stub */
1679 addr = (unsigned long)ftrace_stub;
1680
1681 local_irq_save(flags);
1682 ftrace_dyn_arch_init(&addr);
1683 local_irq_restore(flags);
1684
1685 /* ftrace_dyn_arch_init places the return code in addr */
1686 if (addr)
1687 goto failed;
1688
1689 count = __stop_mcount_loc - __start_mcount_loc;
1690
1691 ret = ftrace_dyn_table_alloc(count);
1692 if (ret)
1693 goto failed;
1694
1695 last_ftrace_enabled = ftrace_enabled = 1;
1696
1697 ret = ftrace_convert_nops(NULL,
1698 __start_mcount_loc,
1699 __stop_mcount_loc);
1700
1701 return;
1702 failed:
1703 ftrace_disabled = 1;
1704 }
1705
1706 #else
1707
1708 static int __init ftrace_nodyn_init(void)
1709 {
1710 ftrace_enabled = 1;
1711 return 0;
1712 }
1713 device_initcall(ftrace_nodyn_init);
1714
1715 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1716 static inline void ftrace_startup_enable(int command) { }
1717 /* Keep as macros so we do not need to define the commands */
1718 # define ftrace_startup(command) do { } while (0)
1719 # define ftrace_shutdown(command) do { } while (0)
1720 # define ftrace_startup_sysctl() do { } while (0)
1721 # define ftrace_shutdown_sysctl() do { } while (0)
1722 #endif /* CONFIG_DYNAMIC_FTRACE */
1723
1724 static ssize_t
1725 ftrace_pid_read(struct file *file, char __user *ubuf,
1726 size_t cnt, loff_t *ppos)
1727 {
1728 char buf[64];
1729 int r;
1730
1731 if (ftrace_pid_trace == ftrace_swapper_pid)
1732 r = sprintf(buf, "swapper tasks\n");
1733 else if (ftrace_pid_trace)
1734 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1735 else
1736 r = sprintf(buf, "no pid\n");
1737
1738 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1739 }
1740
1741 static void clear_ftrace_swapper(void)
1742 {
1743 struct task_struct *p;
1744 int cpu;
1745
1746 get_online_cpus();
1747 for_each_online_cpu(cpu) {
1748 p = idle_task(cpu);
1749 clear_tsk_trace_trace(p);
1750 }
1751 put_online_cpus();
1752 }
1753
1754 static void set_ftrace_swapper(void)
1755 {
1756 struct task_struct *p;
1757 int cpu;
1758
1759 get_online_cpus();
1760 for_each_online_cpu(cpu) {
1761 p = idle_task(cpu);
1762 set_tsk_trace_trace(p);
1763 }
1764 put_online_cpus();
1765 }
1766
1767 static void clear_ftrace_pid(struct pid *pid)
1768 {
1769 struct task_struct *p;
1770
1771 rcu_read_lock();
1772 do_each_pid_task(pid, PIDTYPE_PID, p) {
1773 clear_tsk_trace_trace(p);
1774 } while_each_pid_task(pid, PIDTYPE_PID, p);
1775 rcu_read_unlock();
1776
1777 put_pid(pid);
1778 }
1779
1780 static void set_ftrace_pid(struct pid *pid)
1781 {
1782 struct task_struct *p;
1783
1784 rcu_read_lock();
1785 do_each_pid_task(pid, PIDTYPE_PID, p) {
1786 set_tsk_trace_trace(p);
1787 } while_each_pid_task(pid, PIDTYPE_PID, p);
1788 rcu_read_unlock();
1789 }
1790
1791 static void clear_ftrace_pid_task(struct pid **pid)
1792 {
1793 if (*pid == ftrace_swapper_pid)
1794 clear_ftrace_swapper();
1795 else
1796 clear_ftrace_pid(*pid);
1797
1798 *pid = NULL;
1799 }
1800
1801 static void set_ftrace_pid_task(struct pid *pid)
1802 {
1803 if (pid == ftrace_swapper_pid)
1804 set_ftrace_swapper();
1805 else
1806 set_ftrace_pid(pid);
1807 }
1808
1809 static ssize_t
1810 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1811 size_t cnt, loff_t *ppos)
1812 {
1813 struct pid *pid;
1814 char buf[64];
1815 long val;
1816 int ret;
1817
1818 if (cnt >= sizeof(buf))
1819 return -EINVAL;
1820
1821 if (copy_from_user(&buf, ubuf, cnt))
1822 return -EFAULT;
1823
1824 buf[cnt] = 0;
1825
1826 ret = strict_strtol(buf, 10, &val);
1827 if (ret < 0)
1828 return ret;
1829
1830 mutex_lock(&ftrace_start_lock);
1831 if (val < 0) {
1832 /* disable pid tracing */
1833 if (!ftrace_pid_trace)
1834 goto out;
1835
1836 clear_ftrace_pid_task(&ftrace_pid_trace);
1837
1838 } else {
1839 /* swapper task is special */
1840 if (!val) {
1841 pid = ftrace_swapper_pid;
1842 if (pid == ftrace_pid_trace)
1843 goto out;
1844 } else {
1845 pid = find_get_pid(val);
1846
1847 if (pid == ftrace_pid_trace) {
1848 put_pid(pid);
1849 goto out;
1850 }
1851 }
1852
1853 if (ftrace_pid_trace)
1854 clear_ftrace_pid_task(&ftrace_pid_trace);
1855
1856 if (!pid)
1857 goto out;
1858
1859 ftrace_pid_trace = pid;
1860
1861 set_ftrace_pid_task(ftrace_pid_trace);
1862 }
1863
1864 /* update the function call */
1865 ftrace_update_pid_func();
1866 ftrace_startup_enable(0);
1867
1868 out:
1869 mutex_unlock(&ftrace_start_lock);
1870
1871 return cnt;
1872 }
1873
1874 static struct file_operations ftrace_pid_fops = {
1875 .read = ftrace_pid_read,
1876 .write = ftrace_pid_write,
1877 };
1878
1879 static __init int ftrace_init_debugfs(void)
1880 {
1881 struct dentry *d_tracer;
1882 struct dentry *entry;
1883
1884 d_tracer = tracing_init_dentry();
1885 if (!d_tracer)
1886 return 0;
1887
1888 ftrace_init_dyn_debugfs(d_tracer);
1889
1890 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1891 NULL, &ftrace_pid_fops);
1892 if (!entry)
1893 pr_warning("Could not create debugfs "
1894 "'set_ftrace_pid' entry\n");
1895 return 0;
1896 }
1897
1898 fs_initcall(ftrace_init_debugfs);
1899
1900 /**
1901 * ftrace_kill - kill ftrace
1902 *
1903 * This function should be used by panic code. It stops ftrace
1904 * but in a not so nice way. If you need to simply kill ftrace
1905 * from a non-atomic section, use ftrace_kill.
1906 */
1907 void ftrace_kill(void)
1908 {
1909 ftrace_disabled = 1;
1910 ftrace_enabled = 0;
1911 clear_ftrace_function();
1912 }
1913
1914 /**
1915 * register_ftrace_function - register a function for profiling
1916 * @ops - ops structure that holds the function for profiling.
1917 *
1918 * Register a function to be called by all functions in the
1919 * kernel.
1920 *
1921 * Note: @ops->func and all the functions it calls must be labeled
1922 * with "notrace", otherwise it will go into a
1923 * recursive loop.
1924 */
1925 int register_ftrace_function(struct ftrace_ops *ops)
1926 {
1927 int ret;
1928
1929 if (unlikely(ftrace_disabled))
1930 return -1;
1931
1932 mutex_lock(&ftrace_sysctl_lock);
1933
1934 ret = __register_ftrace_function(ops);
1935 ftrace_startup(0);
1936
1937 mutex_unlock(&ftrace_sysctl_lock);
1938 return ret;
1939 }
1940
1941 /**
1942 * unregister_ftrace_function - unresgister a function for profiling.
1943 * @ops - ops structure that holds the function to unregister
1944 *
1945 * Unregister a function that was added to be called by ftrace profiling.
1946 */
1947 int unregister_ftrace_function(struct ftrace_ops *ops)
1948 {
1949 int ret;
1950
1951 mutex_lock(&ftrace_sysctl_lock);
1952 ret = __unregister_ftrace_function(ops);
1953 ftrace_shutdown(0);
1954 mutex_unlock(&ftrace_sysctl_lock);
1955
1956 return ret;
1957 }
1958
1959 int
1960 ftrace_enable_sysctl(struct ctl_table *table, int write,
1961 struct file *file, void __user *buffer, size_t *lenp,
1962 loff_t *ppos)
1963 {
1964 int ret;
1965
1966 if (unlikely(ftrace_disabled))
1967 return -ENODEV;
1968
1969 mutex_lock(&ftrace_sysctl_lock);
1970
1971 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1972
1973 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1974 goto out;
1975
1976 last_ftrace_enabled = ftrace_enabled;
1977
1978 if (ftrace_enabled) {
1979
1980 ftrace_startup_sysctl();
1981
1982 /* we are starting ftrace again */
1983 if (ftrace_list != &ftrace_list_end) {
1984 if (ftrace_list->next == &ftrace_list_end)
1985 ftrace_trace_function = ftrace_list->func;
1986 else
1987 ftrace_trace_function = ftrace_list_func;
1988 }
1989
1990 } else {
1991 /* stopping ftrace calls (just send to ftrace_stub) */
1992 ftrace_trace_function = ftrace_stub;
1993
1994 ftrace_shutdown_sysctl();
1995 }
1996
1997 out:
1998 mutex_unlock(&ftrace_sysctl_lock);
1999 return ret;
2000 }
2001
2002 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2003
2004 static atomic_t ftrace_graph_active;
2005 static struct notifier_block ftrace_suspend_notifier;
2006
2007 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2008 {
2009 return 0;
2010 }
2011
2012 /* The callbacks that hook a function */
2013 trace_func_graph_ret_t ftrace_graph_return =
2014 (trace_func_graph_ret_t)ftrace_stub;
2015 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2016
2017 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2018 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2019 {
2020 int i;
2021 int ret = 0;
2022 unsigned long flags;
2023 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2024 struct task_struct *g, *t;
2025
2026 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2027 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2028 * sizeof(struct ftrace_ret_stack),
2029 GFP_KERNEL);
2030 if (!ret_stack_list[i]) {
2031 start = 0;
2032 end = i;
2033 ret = -ENOMEM;
2034 goto free;
2035 }
2036 }
2037
2038 read_lock_irqsave(&tasklist_lock, flags);
2039 do_each_thread(g, t) {
2040 if (start == end) {
2041 ret = -EAGAIN;
2042 goto unlock;
2043 }
2044
2045 if (t->ret_stack == NULL) {
2046 t->curr_ret_stack = -1;
2047 /* Make sure IRQs see the -1 first: */
2048 barrier();
2049 t->ret_stack = ret_stack_list[start++];
2050 atomic_set(&t->tracing_graph_pause, 0);
2051 atomic_set(&t->trace_overrun, 0);
2052 }
2053 } while_each_thread(g, t);
2054
2055 unlock:
2056 read_unlock_irqrestore(&tasklist_lock, flags);
2057 free:
2058 for (i = start; i < end; i++)
2059 kfree(ret_stack_list[i]);
2060 return ret;
2061 }
2062
2063 /* Allocate a return stack for each task */
2064 static int start_graph_tracing(void)
2065 {
2066 struct ftrace_ret_stack **ret_stack_list;
2067 int ret, cpu;
2068
2069 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2070 sizeof(struct ftrace_ret_stack *),
2071 GFP_KERNEL);
2072
2073 if (!ret_stack_list)
2074 return -ENOMEM;
2075
2076 /* The cpu_boot init_task->ret_stack will never be freed */
2077 for_each_online_cpu(cpu)
2078 ftrace_graph_init_task(idle_task(cpu));
2079
2080 do {
2081 ret = alloc_retstack_tasklist(ret_stack_list);
2082 } while (ret == -EAGAIN);
2083
2084 kfree(ret_stack_list);
2085 return ret;
2086 }
2087
2088 /*
2089 * Hibernation protection.
2090 * The state of the current task is too much unstable during
2091 * suspend/restore to disk. We want to protect against that.
2092 */
2093 static int
2094 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2095 void *unused)
2096 {
2097 switch (state) {
2098 case PM_HIBERNATION_PREPARE:
2099 pause_graph_tracing();
2100 break;
2101
2102 case PM_POST_HIBERNATION:
2103 unpause_graph_tracing();
2104 break;
2105 }
2106 return NOTIFY_DONE;
2107 }
2108
2109 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2110 trace_func_graph_ent_t entryfunc)
2111 {
2112 int ret = 0;
2113
2114 mutex_lock(&ftrace_sysctl_lock);
2115
2116 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2117 register_pm_notifier(&ftrace_suspend_notifier);
2118
2119 atomic_inc(&ftrace_graph_active);
2120 ret = start_graph_tracing();
2121 if (ret) {
2122 atomic_dec(&ftrace_graph_active);
2123 goto out;
2124 }
2125
2126 ftrace_graph_return = retfunc;
2127 ftrace_graph_entry = entryfunc;
2128
2129 ftrace_startup(FTRACE_START_FUNC_RET);
2130
2131 out:
2132 mutex_unlock(&ftrace_sysctl_lock);
2133 return ret;
2134 }
2135
2136 void unregister_ftrace_graph(void)
2137 {
2138 mutex_lock(&ftrace_sysctl_lock);
2139
2140 atomic_dec(&ftrace_graph_active);
2141 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2142 ftrace_graph_entry = ftrace_graph_entry_stub;
2143 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2144 unregister_pm_notifier(&ftrace_suspend_notifier);
2145
2146 mutex_unlock(&ftrace_sysctl_lock);
2147 }
2148
2149 /* Allocate a return stack for newly created task */
2150 void ftrace_graph_init_task(struct task_struct *t)
2151 {
2152 if (atomic_read(&ftrace_graph_active)) {
2153 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2154 * sizeof(struct ftrace_ret_stack),
2155 GFP_KERNEL);
2156 if (!t->ret_stack)
2157 return;
2158 t->curr_ret_stack = -1;
2159 atomic_set(&t->tracing_graph_pause, 0);
2160 atomic_set(&t->trace_overrun, 0);
2161 } else
2162 t->ret_stack = NULL;
2163 }
2164
2165 void ftrace_graph_exit_task(struct task_struct *t)
2166 {
2167 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2168
2169 t->ret_stack = NULL;
2170 /* NULL must become visible to IRQs before we free it: */
2171 barrier();
2172
2173 kfree(ret_stack);
2174 }
2175
2176 void ftrace_graph_stop(void)
2177 {
2178 ftrace_stop();
2179 }
2180 #endif
2181