]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/ftrace.c
Merge branch 'perf/urgent' into perf/core, to pick up fixes
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
49 })
50
51 #define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
57 })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 .func = ftrace_stub,
81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 INIT_OPS_HASH(ftrace_list_end)
83 };
84
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids);
96 struct ftrace_pid {
97 struct list_head list;
98 struct pid *pid;
99 };
100
101 /*
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
104 */
105 static int ftrace_disabled __read_mostly;
106
107 static DEFINE_MUTEX(ftrace_lock);
108
109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113 static struct ftrace_ops global_ops;
114 static struct ftrace_ops control_ops;
115
116 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 struct ftrace_ops *op, struct pt_regs *regs);
118
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121 struct ftrace_ops *op, struct pt_regs *regs);
122 #else
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
126 #endif
127
128 /*
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131 * are simply leaked, so there is no need to interact with a grace-period
132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
133 * concurrent insertions into the ftrace_global_list.
134 *
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
136 */
137 #define do_for_each_ftrace_op(op, list) \
138 op = rcu_dereference_raw_notrace(list); \
139 do
140
141 /*
142 * Optimized for just a single item in the list (as that is the normal case).
143 */
144 #define while_for_each_ftrace_op(op) \
145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
146 unlikely((op) != &ftrace_list_end))
147
148 static inline void ftrace_ops_init(struct ftrace_ops *ops)
149 {
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 mutex_init(&ops->local_hash.regex_lock);
153 ops->func_hash = &ops->local_hash;
154 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
155 }
156 #endif
157 }
158
159 /**
160 * ftrace_nr_registered_ops - return number of ops registered
161 *
162 * Returns the number of ftrace_ops registered and tracing functions
163 */
164 int ftrace_nr_registered_ops(void)
165 {
166 struct ftrace_ops *ops;
167 int cnt = 0;
168
169 mutex_lock(&ftrace_lock);
170
171 for (ops = ftrace_ops_list;
172 ops != &ftrace_list_end; ops = ops->next)
173 cnt++;
174
175 mutex_unlock(&ftrace_lock);
176
177 return cnt;
178 }
179
180 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181 struct ftrace_ops *op, struct pt_regs *regs)
182 {
183 if (!test_tsk_trace_trace(current))
184 return;
185
186 ftrace_pid_function(ip, parent_ip, op, regs);
187 }
188
189 static void set_ftrace_pid_function(ftrace_func_t func)
190 {
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194 }
195
196 /**
197 * clear_ftrace_function - reset the ftrace function
198 *
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
201 */
202 void clear_ftrace_function(void)
203 {
204 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206 }
207
208 static void control_ops_disable_all(struct ftrace_ops *ops)
209 {
210 int cpu;
211
212 for_each_possible_cpu(cpu)
213 *per_cpu_ptr(ops->disabled, cpu) = 1;
214 }
215
216 static int control_ops_alloc(struct ftrace_ops *ops)
217 {
218 int __percpu *disabled;
219
220 disabled = alloc_percpu(int);
221 if (!disabled)
222 return -ENOMEM;
223
224 ops->disabled = disabled;
225 control_ops_disable_all(ops);
226 return 0;
227 }
228
229 static void ftrace_sync(struct work_struct *work)
230 {
231 /*
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
235 *
236 * Yes, function tracing is rude.
237 */
238 }
239
240 static void ftrace_sync_ipi(void *data)
241 {
242 /* Probably not needed, but do it anyway */
243 smp_rmb();
244 }
245
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
248 #else
249 static inline void update_function_graph_func(void) { }
250 #endif
251
252 static void update_ftrace_function(void)
253 {
254 ftrace_func_t func;
255
256 /*
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
260 */
261 set_function_trace_op = ftrace_ops_list;
262
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list == &ftrace_list_end) {
265 func = ftrace_stub;
266
267 /*
268 * If we are at the end of the list and this ops is
269 * recursion safe and not dynamic and the arch supports passing ops,
270 * then have the mcount trampoline call the function directly.
271 */
272 } else if (ftrace_ops_list->next == &ftrace_list_end) {
273 func = ftrace_ops_get_func(ftrace_ops_list);
274
275 } else {
276 /* Just use the default ftrace_ops */
277 set_function_trace_op = &ftrace_list_end;
278 func = ftrace_ops_list_func;
279 }
280
281 update_function_graph_func();
282
283 /* If there's no change, then do nothing more here */
284 if (ftrace_trace_function == func)
285 return;
286
287 /*
288 * If we are using the list function, it doesn't care
289 * about the function_trace_ops.
290 */
291 if (func == ftrace_ops_list_func) {
292 ftrace_trace_function = func;
293 /*
294 * Don't even bother setting function_trace_ops,
295 * it would be racy to do so anyway.
296 */
297 return;
298 }
299
300 #ifndef CONFIG_DYNAMIC_FTRACE
301 /*
302 * For static tracing, we need to be a bit more careful.
303 * The function change takes affect immediately. Thus,
304 * we need to coorditate the setting of the function_trace_ops
305 * with the setting of the ftrace_trace_function.
306 *
307 * Set the function to the list ops, which will call the
308 * function we want, albeit indirectly, but it handles the
309 * ftrace_ops and doesn't depend on function_trace_op.
310 */
311 ftrace_trace_function = ftrace_ops_list_func;
312 /*
313 * Make sure all CPUs see this. Yes this is slow, but static
314 * tracing is slow and nasty to have enabled.
315 */
316 schedule_on_each_cpu(ftrace_sync);
317 /* Now all cpus are using the list ops. */
318 function_trace_op = set_function_trace_op;
319 /* Make sure the function_trace_op is visible on all CPUs */
320 smp_wmb();
321 /* Nasty way to force a rmb on all cpus */
322 smp_call_function(ftrace_sync_ipi, NULL, 1);
323 /* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
325
326 ftrace_trace_function = func;
327 }
328
329 int using_ftrace_ops_list_func(void)
330 {
331 return ftrace_trace_function == ftrace_ops_list_func;
332 }
333
334 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
335 {
336 ops->next = *list;
337 /*
338 * We are entering ops into the list but another
339 * CPU might be walking that list. We need to make sure
340 * the ops->next pointer is valid before another CPU sees
341 * the ops pointer included into the list.
342 */
343 rcu_assign_pointer(*list, ops);
344 }
345
346 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
347 {
348 struct ftrace_ops **p;
349
350 /*
351 * If we are removing the last function, then simply point
352 * to the ftrace_stub.
353 */
354 if (*list == ops && ops->next == &ftrace_list_end) {
355 *list = &ftrace_list_end;
356 return 0;
357 }
358
359 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
360 if (*p == ops)
361 break;
362
363 if (*p != ops)
364 return -1;
365
366 *p = (*p)->next;
367 return 0;
368 }
369
370 static void add_ftrace_list_ops(struct ftrace_ops **list,
371 struct ftrace_ops *main_ops,
372 struct ftrace_ops *ops)
373 {
374 int first = *list == &ftrace_list_end;
375 add_ftrace_ops(list, ops);
376 if (first)
377 add_ftrace_ops(&ftrace_ops_list, main_ops);
378 }
379
380 static int remove_ftrace_list_ops(struct ftrace_ops **list,
381 struct ftrace_ops *main_ops,
382 struct ftrace_ops *ops)
383 {
384 int ret = remove_ftrace_ops(list, ops);
385 if (!ret && *list == &ftrace_list_end)
386 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
387 return ret;
388 }
389
390 static int __register_ftrace_function(struct ftrace_ops *ops)
391 {
392 if (ops->flags & FTRACE_OPS_FL_DELETED)
393 return -EINVAL;
394
395 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
396 return -EBUSY;
397
398 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
399 /*
400 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
401 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
402 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
403 */
404 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
405 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
406 return -EINVAL;
407
408 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
409 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
410 #endif
411
412 if (!core_kernel_data((unsigned long)ops))
413 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
414
415 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
416 if (control_ops_alloc(ops))
417 return -ENOMEM;
418 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
419 } else
420 add_ftrace_ops(&ftrace_ops_list, ops);
421
422 if (ftrace_enabled)
423 update_ftrace_function();
424
425 return 0;
426 }
427
428 static int __unregister_ftrace_function(struct ftrace_ops *ops)
429 {
430 int ret;
431
432 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
433 return -EBUSY;
434
435 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
436 ret = remove_ftrace_list_ops(&ftrace_control_list,
437 &control_ops, ops);
438 } else
439 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
440
441 if (ret < 0)
442 return ret;
443
444 if (ftrace_enabled)
445 update_ftrace_function();
446
447 return 0;
448 }
449
450 static void ftrace_update_pid_func(void)
451 {
452 /* Only do something if we are tracing something */
453 if (ftrace_trace_function == ftrace_stub)
454 return;
455
456 update_ftrace_function();
457 }
458
459 #ifdef CONFIG_FUNCTION_PROFILER
460 struct ftrace_profile {
461 struct hlist_node node;
462 unsigned long ip;
463 unsigned long counter;
464 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
465 unsigned long long time;
466 unsigned long long time_squared;
467 #endif
468 };
469
470 struct ftrace_profile_page {
471 struct ftrace_profile_page *next;
472 unsigned long index;
473 struct ftrace_profile records[];
474 };
475
476 struct ftrace_profile_stat {
477 atomic_t disabled;
478 struct hlist_head *hash;
479 struct ftrace_profile_page *pages;
480 struct ftrace_profile_page *start;
481 struct tracer_stat stat;
482 };
483
484 #define PROFILE_RECORDS_SIZE \
485 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
486
487 #define PROFILES_PER_PAGE \
488 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
489
490 static int ftrace_profile_enabled __read_mostly;
491
492 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
493 static DEFINE_MUTEX(ftrace_profile_lock);
494
495 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
496
497 #define FTRACE_PROFILE_HASH_BITS 10
498 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
499
500 static void *
501 function_stat_next(void *v, int idx)
502 {
503 struct ftrace_profile *rec = v;
504 struct ftrace_profile_page *pg;
505
506 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
507
508 again:
509 if (idx != 0)
510 rec++;
511
512 if ((void *)rec >= (void *)&pg->records[pg->index]) {
513 pg = pg->next;
514 if (!pg)
515 return NULL;
516 rec = &pg->records[0];
517 if (!rec->counter)
518 goto again;
519 }
520
521 return rec;
522 }
523
524 static void *function_stat_start(struct tracer_stat *trace)
525 {
526 struct ftrace_profile_stat *stat =
527 container_of(trace, struct ftrace_profile_stat, stat);
528
529 if (!stat || !stat->start)
530 return NULL;
531
532 return function_stat_next(&stat->start->records[0], 0);
533 }
534
535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
536 /* function graph compares on total time */
537 static int function_stat_cmp(void *p1, void *p2)
538 {
539 struct ftrace_profile *a = p1;
540 struct ftrace_profile *b = p2;
541
542 if (a->time < b->time)
543 return -1;
544 if (a->time > b->time)
545 return 1;
546 else
547 return 0;
548 }
549 #else
550 /* not function graph compares against hits */
551 static int function_stat_cmp(void *p1, void *p2)
552 {
553 struct ftrace_profile *a = p1;
554 struct ftrace_profile *b = p2;
555
556 if (a->counter < b->counter)
557 return -1;
558 if (a->counter > b->counter)
559 return 1;
560 else
561 return 0;
562 }
563 #endif
564
565 static int function_stat_headers(struct seq_file *m)
566 {
567 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
568 seq_printf(m, " Function "
569 "Hit Time Avg s^2\n"
570 " -------- "
571 "--- ---- --- ---\n");
572 #else
573 seq_printf(m, " Function Hit\n"
574 " -------- ---\n");
575 #endif
576 return 0;
577 }
578
579 static int function_stat_show(struct seq_file *m, void *v)
580 {
581 struct ftrace_profile *rec = v;
582 char str[KSYM_SYMBOL_LEN];
583 int ret = 0;
584 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
585 static struct trace_seq s;
586 unsigned long long avg;
587 unsigned long long stddev;
588 #endif
589 mutex_lock(&ftrace_profile_lock);
590
591 /* we raced with function_profile_reset() */
592 if (unlikely(rec->counter == 0)) {
593 ret = -EBUSY;
594 goto out;
595 }
596
597 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
598 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
599
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
601 seq_printf(m, " ");
602 avg = rec->time;
603 do_div(avg, rec->counter);
604
605 /* Sample standard deviation (s^2) */
606 if (rec->counter <= 1)
607 stddev = 0;
608 else {
609 /*
610 * Apply Welford's method:
611 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
612 */
613 stddev = rec->counter * rec->time_squared -
614 rec->time * rec->time;
615
616 /*
617 * Divide only 1000 for ns^2 -> us^2 conversion.
618 * trace_print_graph_duration will divide 1000 again.
619 */
620 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
621 }
622
623 trace_seq_init(&s);
624 trace_print_graph_duration(rec->time, &s);
625 trace_seq_puts(&s, " ");
626 trace_print_graph_duration(avg, &s);
627 trace_seq_puts(&s, " ");
628 trace_print_graph_duration(stddev, &s);
629 trace_print_seq(m, &s);
630 #endif
631 seq_putc(m, '\n');
632 out:
633 mutex_unlock(&ftrace_profile_lock);
634
635 return ret;
636 }
637
638 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
639 {
640 struct ftrace_profile_page *pg;
641
642 pg = stat->pages = stat->start;
643
644 while (pg) {
645 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
646 pg->index = 0;
647 pg = pg->next;
648 }
649
650 memset(stat->hash, 0,
651 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
652 }
653
654 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
655 {
656 struct ftrace_profile_page *pg;
657 int functions;
658 int pages;
659 int i;
660
661 /* If we already allocated, do nothing */
662 if (stat->pages)
663 return 0;
664
665 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
666 if (!stat->pages)
667 return -ENOMEM;
668
669 #ifdef CONFIG_DYNAMIC_FTRACE
670 functions = ftrace_update_tot_cnt;
671 #else
672 /*
673 * We do not know the number of functions that exist because
674 * dynamic tracing is what counts them. With past experience
675 * we have around 20K functions. That should be more than enough.
676 * It is highly unlikely we will execute every function in
677 * the kernel.
678 */
679 functions = 20000;
680 #endif
681
682 pg = stat->start = stat->pages;
683
684 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
685
686 for (i = 1; i < pages; i++) {
687 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
688 if (!pg->next)
689 goto out_free;
690 pg = pg->next;
691 }
692
693 return 0;
694
695 out_free:
696 pg = stat->start;
697 while (pg) {
698 unsigned long tmp = (unsigned long)pg;
699
700 pg = pg->next;
701 free_page(tmp);
702 }
703
704 stat->pages = NULL;
705 stat->start = NULL;
706
707 return -ENOMEM;
708 }
709
710 static int ftrace_profile_init_cpu(int cpu)
711 {
712 struct ftrace_profile_stat *stat;
713 int size;
714
715 stat = &per_cpu(ftrace_profile_stats, cpu);
716
717 if (stat->hash) {
718 /* If the profile is already created, simply reset it */
719 ftrace_profile_reset(stat);
720 return 0;
721 }
722
723 /*
724 * We are profiling all functions, but usually only a few thousand
725 * functions are hit. We'll make a hash of 1024 items.
726 */
727 size = FTRACE_PROFILE_HASH_SIZE;
728
729 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
730
731 if (!stat->hash)
732 return -ENOMEM;
733
734 /* Preallocate the function profiling pages */
735 if (ftrace_profile_pages_init(stat) < 0) {
736 kfree(stat->hash);
737 stat->hash = NULL;
738 return -ENOMEM;
739 }
740
741 return 0;
742 }
743
744 static int ftrace_profile_init(void)
745 {
746 int cpu;
747 int ret = 0;
748
749 for_each_possible_cpu(cpu) {
750 ret = ftrace_profile_init_cpu(cpu);
751 if (ret)
752 break;
753 }
754
755 return ret;
756 }
757
758 /* interrupts must be disabled */
759 static struct ftrace_profile *
760 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
761 {
762 struct ftrace_profile *rec;
763 struct hlist_head *hhd;
764 unsigned long key;
765
766 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
767 hhd = &stat->hash[key];
768
769 if (hlist_empty(hhd))
770 return NULL;
771
772 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
773 if (rec->ip == ip)
774 return rec;
775 }
776
777 return NULL;
778 }
779
780 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
781 struct ftrace_profile *rec)
782 {
783 unsigned long key;
784
785 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
786 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
787 }
788
789 /*
790 * The memory is already allocated, this simply finds a new record to use.
791 */
792 static struct ftrace_profile *
793 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
794 {
795 struct ftrace_profile *rec = NULL;
796
797 /* prevent recursion (from NMIs) */
798 if (atomic_inc_return(&stat->disabled) != 1)
799 goto out;
800
801 /*
802 * Try to find the function again since an NMI
803 * could have added it
804 */
805 rec = ftrace_find_profiled_func(stat, ip);
806 if (rec)
807 goto out;
808
809 if (stat->pages->index == PROFILES_PER_PAGE) {
810 if (!stat->pages->next)
811 goto out;
812 stat->pages = stat->pages->next;
813 }
814
815 rec = &stat->pages->records[stat->pages->index++];
816 rec->ip = ip;
817 ftrace_add_profile(stat, rec);
818
819 out:
820 atomic_dec(&stat->disabled);
821
822 return rec;
823 }
824
825 static void
826 function_profile_call(unsigned long ip, unsigned long parent_ip,
827 struct ftrace_ops *ops, struct pt_regs *regs)
828 {
829 struct ftrace_profile_stat *stat;
830 struct ftrace_profile *rec;
831 unsigned long flags;
832
833 if (!ftrace_profile_enabled)
834 return;
835
836 local_irq_save(flags);
837
838 stat = this_cpu_ptr(&ftrace_profile_stats);
839 if (!stat->hash || !ftrace_profile_enabled)
840 goto out;
841
842 rec = ftrace_find_profiled_func(stat, ip);
843 if (!rec) {
844 rec = ftrace_profile_alloc(stat, ip);
845 if (!rec)
846 goto out;
847 }
848
849 rec->counter++;
850 out:
851 local_irq_restore(flags);
852 }
853
854 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
855 static int profile_graph_entry(struct ftrace_graph_ent *trace)
856 {
857 function_profile_call(trace->func, 0, NULL, NULL);
858 return 1;
859 }
860
861 static void profile_graph_return(struct ftrace_graph_ret *trace)
862 {
863 struct ftrace_profile_stat *stat;
864 unsigned long long calltime;
865 struct ftrace_profile *rec;
866 unsigned long flags;
867
868 local_irq_save(flags);
869 stat = this_cpu_ptr(&ftrace_profile_stats);
870 if (!stat->hash || !ftrace_profile_enabled)
871 goto out;
872
873 /* If the calltime was zero'd ignore it */
874 if (!trace->calltime)
875 goto out;
876
877 calltime = trace->rettime - trace->calltime;
878
879 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
880 int index;
881
882 index = trace->depth;
883
884 /* Append this call time to the parent time to subtract */
885 if (index)
886 current->ret_stack[index - 1].subtime += calltime;
887
888 if (current->ret_stack[index].subtime < calltime)
889 calltime -= current->ret_stack[index].subtime;
890 else
891 calltime = 0;
892 }
893
894 rec = ftrace_find_profiled_func(stat, trace->func);
895 if (rec) {
896 rec->time += calltime;
897 rec->time_squared += calltime * calltime;
898 }
899
900 out:
901 local_irq_restore(flags);
902 }
903
904 static int register_ftrace_profiler(void)
905 {
906 return register_ftrace_graph(&profile_graph_return,
907 &profile_graph_entry);
908 }
909
910 static void unregister_ftrace_profiler(void)
911 {
912 unregister_ftrace_graph();
913 }
914 #else
915 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
916 .func = function_profile_call,
917 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
918 INIT_OPS_HASH(ftrace_profile_ops)
919 };
920
921 static int register_ftrace_profiler(void)
922 {
923 return register_ftrace_function(&ftrace_profile_ops);
924 }
925
926 static void unregister_ftrace_profiler(void)
927 {
928 unregister_ftrace_function(&ftrace_profile_ops);
929 }
930 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
931
932 static ssize_t
933 ftrace_profile_write(struct file *filp, const char __user *ubuf,
934 size_t cnt, loff_t *ppos)
935 {
936 unsigned long val;
937 int ret;
938
939 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
940 if (ret)
941 return ret;
942
943 val = !!val;
944
945 mutex_lock(&ftrace_profile_lock);
946 if (ftrace_profile_enabled ^ val) {
947 if (val) {
948 ret = ftrace_profile_init();
949 if (ret < 0) {
950 cnt = ret;
951 goto out;
952 }
953
954 ret = register_ftrace_profiler();
955 if (ret < 0) {
956 cnt = ret;
957 goto out;
958 }
959 ftrace_profile_enabled = 1;
960 } else {
961 ftrace_profile_enabled = 0;
962 /*
963 * unregister_ftrace_profiler calls stop_machine
964 * so this acts like an synchronize_sched.
965 */
966 unregister_ftrace_profiler();
967 }
968 }
969 out:
970 mutex_unlock(&ftrace_profile_lock);
971
972 *ppos += cnt;
973
974 return cnt;
975 }
976
977 static ssize_t
978 ftrace_profile_read(struct file *filp, char __user *ubuf,
979 size_t cnt, loff_t *ppos)
980 {
981 char buf[64]; /* big enough to hold a number */
982 int r;
983
984 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
986 }
987
988 static const struct file_operations ftrace_profile_fops = {
989 .open = tracing_open_generic,
990 .read = ftrace_profile_read,
991 .write = ftrace_profile_write,
992 .llseek = default_llseek,
993 };
994
995 /* used to initialize the real stat files */
996 static struct tracer_stat function_stats __initdata = {
997 .name = "functions",
998 .stat_start = function_stat_start,
999 .stat_next = function_stat_next,
1000 .stat_cmp = function_stat_cmp,
1001 .stat_headers = function_stat_headers,
1002 .stat_show = function_stat_show
1003 };
1004
1005 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1006 {
1007 struct ftrace_profile_stat *stat;
1008 struct dentry *entry;
1009 char *name;
1010 int ret;
1011 int cpu;
1012
1013 for_each_possible_cpu(cpu) {
1014 stat = &per_cpu(ftrace_profile_stats, cpu);
1015
1016 /* allocate enough for function name + cpu number */
1017 name = kmalloc(32, GFP_KERNEL);
1018 if (!name) {
1019 /*
1020 * The files created are permanent, if something happens
1021 * we still do not free memory.
1022 */
1023 WARN(1,
1024 "Could not allocate stat file for cpu %d\n",
1025 cpu);
1026 return;
1027 }
1028 stat->stat = function_stats;
1029 snprintf(name, 32, "function%d", cpu);
1030 stat->stat.name = name;
1031 ret = register_stat_tracer(&stat->stat);
1032 if (ret) {
1033 WARN(1,
1034 "Could not register function stat for cpu %d\n",
1035 cpu);
1036 kfree(name);
1037 return;
1038 }
1039 }
1040
1041 entry = debugfs_create_file("function_profile_enabled", 0644,
1042 d_tracer, NULL, &ftrace_profile_fops);
1043 if (!entry)
1044 pr_warning("Could not create debugfs "
1045 "'function_profile_enabled' entry\n");
1046 }
1047
1048 #else /* CONFIG_FUNCTION_PROFILER */
1049 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1050 {
1051 }
1052 #endif /* CONFIG_FUNCTION_PROFILER */
1053
1054 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1055
1056 #ifdef CONFIG_DYNAMIC_FTRACE
1057
1058 static struct ftrace_ops *removed_ops;
1059
1060 /*
1061 * Set when doing a global update, like enabling all recs or disabling them.
1062 * It is not set when just updating a single ftrace_ops.
1063 */
1064 static bool update_all_ops;
1065
1066 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1067 # error Dynamic ftrace depends on MCOUNT_RECORD
1068 #endif
1069
1070 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1071
1072 struct ftrace_func_probe {
1073 struct hlist_node node;
1074 struct ftrace_probe_ops *ops;
1075 unsigned long flags;
1076 unsigned long ip;
1077 void *data;
1078 struct list_head free_list;
1079 };
1080
1081 struct ftrace_func_entry {
1082 struct hlist_node hlist;
1083 unsigned long ip;
1084 };
1085
1086 struct ftrace_hash {
1087 unsigned long size_bits;
1088 struct hlist_head *buckets;
1089 unsigned long count;
1090 struct rcu_head rcu;
1091 };
1092
1093 /*
1094 * We make these constant because no one should touch them,
1095 * but they are used as the default "empty hash", to avoid allocating
1096 * it all the time. These are in a read only section such that if
1097 * anyone does try to modify it, it will cause an exception.
1098 */
1099 static const struct hlist_head empty_buckets[1];
1100 static const struct ftrace_hash empty_hash = {
1101 .buckets = (struct hlist_head *)empty_buckets,
1102 };
1103 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1104
1105 static struct ftrace_ops global_ops = {
1106 .func = ftrace_stub,
1107 .local_hash.notrace_hash = EMPTY_HASH,
1108 .local_hash.filter_hash = EMPTY_HASH,
1109 INIT_OPS_HASH(global_ops)
1110 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1111 FTRACE_OPS_FL_INITIALIZED,
1112 };
1113
1114 struct ftrace_page {
1115 struct ftrace_page *next;
1116 struct dyn_ftrace *records;
1117 int index;
1118 int size;
1119 };
1120
1121 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1122 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1123
1124 /* estimate from running different kernels */
1125 #define NR_TO_INIT 10000
1126
1127 static struct ftrace_page *ftrace_pages_start;
1128 static struct ftrace_page *ftrace_pages;
1129
1130 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1131 {
1132 return !hash || !hash->count;
1133 }
1134
1135 static struct ftrace_func_entry *
1136 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1137 {
1138 unsigned long key;
1139 struct ftrace_func_entry *entry;
1140 struct hlist_head *hhd;
1141
1142 if (ftrace_hash_empty(hash))
1143 return NULL;
1144
1145 if (hash->size_bits > 0)
1146 key = hash_long(ip, hash->size_bits);
1147 else
1148 key = 0;
1149
1150 hhd = &hash->buckets[key];
1151
1152 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1153 if (entry->ip == ip)
1154 return entry;
1155 }
1156 return NULL;
1157 }
1158
1159 static void __add_hash_entry(struct ftrace_hash *hash,
1160 struct ftrace_func_entry *entry)
1161 {
1162 struct hlist_head *hhd;
1163 unsigned long key;
1164
1165 if (hash->size_bits)
1166 key = hash_long(entry->ip, hash->size_bits);
1167 else
1168 key = 0;
1169
1170 hhd = &hash->buckets[key];
1171 hlist_add_head(&entry->hlist, hhd);
1172 hash->count++;
1173 }
1174
1175 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1176 {
1177 struct ftrace_func_entry *entry;
1178
1179 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1180 if (!entry)
1181 return -ENOMEM;
1182
1183 entry->ip = ip;
1184 __add_hash_entry(hash, entry);
1185
1186 return 0;
1187 }
1188
1189 static void
1190 free_hash_entry(struct ftrace_hash *hash,
1191 struct ftrace_func_entry *entry)
1192 {
1193 hlist_del(&entry->hlist);
1194 kfree(entry);
1195 hash->count--;
1196 }
1197
1198 static void
1199 remove_hash_entry(struct ftrace_hash *hash,
1200 struct ftrace_func_entry *entry)
1201 {
1202 hlist_del(&entry->hlist);
1203 hash->count--;
1204 }
1205
1206 static void ftrace_hash_clear(struct ftrace_hash *hash)
1207 {
1208 struct hlist_head *hhd;
1209 struct hlist_node *tn;
1210 struct ftrace_func_entry *entry;
1211 int size = 1 << hash->size_bits;
1212 int i;
1213
1214 if (!hash->count)
1215 return;
1216
1217 for (i = 0; i < size; i++) {
1218 hhd = &hash->buckets[i];
1219 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1220 free_hash_entry(hash, entry);
1221 }
1222 FTRACE_WARN_ON(hash->count);
1223 }
1224
1225 static void free_ftrace_hash(struct ftrace_hash *hash)
1226 {
1227 if (!hash || hash == EMPTY_HASH)
1228 return;
1229 ftrace_hash_clear(hash);
1230 kfree(hash->buckets);
1231 kfree(hash);
1232 }
1233
1234 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1235 {
1236 struct ftrace_hash *hash;
1237
1238 hash = container_of(rcu, struct ftrace_hash, rcu);
1239 free_ftrace_hash(hash);
1240 }
1241
1242 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1243 {
1244 if (!hash || hash == EMPTY_HASH)
1245 return;
1246 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1247 }
1248
1249 void ftrace_free_filter(struct ftrace_ops *ops)
1250 {
1251 ftrace_ops_init(ops);
1252 free_ftrace_hash(ops->func_hash->filter_hash);
1253 free_ftrace_hash(ops->func_hash->notrace_hash);
1254 }
1255
1256 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1257 {
1258 struct ftrace_hash *hash;
1259 int size;
1260
1261 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1262 if (!hash)
1263 return NULL;
1264
1265 size = 1 << size_bits;
1266 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1267
1268 if (!hash->buckets) {
1269 kfree(hash);
1270 return NULL;
1271 }
1272
1273 hash->size_bits = size_bits;
1274
1275 return hash;
1276 }
1277
1278 static struct ftrace_hash *
1279 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1280 {
1281 struct ftrace_func_entry *entry;
1282 struct ftrace_hash *new_hash;
1283 int size;
1284 int ret;
1285 int i;
1286
1287 new_hash = alloc_ftrace_hash(size_bits);
1288 if (!new_hash)
1289 return NULL;
1290
1291 /* Empty hash? */
1292 if (ftrace_hash_empty(hash))
1293 return new_hash;
1294
1295 size = 1 << hash->size_bits;
1296 for (i = 0; i < size; i++) {
1297 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1298 ret = add_hash_entry(new_hash, entry->ip);
1299 if (ret < 0)
1300 goto free_hash;
1301 }
1302 }
1303
1304 FTRACE_WARN_ON(new_hash->count != hash->count);
1305
1306 return new_hash;
1307
1308 free_hash:
1309 free_ftrace_hash(new_hash);
1310 return NULL;
1311 }
1312
1313 static void
1314 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1315 static void
1316 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1317
1318 static int
1319 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1320 struct ftrace_hash **dst, struct ftrace_hash *src)
1321 {
1322 struct ftrace_func_entry *entry;
1323 struct hlist_node *tn;
1324 struct hlist_head *hhd;
1325 struct ftrace_hash *new_hash;
1326 int size = src->count;
1327 int bits = 0;
1328 int i;
1329
1330 /*
1331 * If the new source is empty, just free dst and assign it
1332 * the empty_hash.
1333 */
1334 if (!src->count) {
1335 new_hash = EMPTY_HASH;
1336 goto update;
1337 }
1338
1339 /*
1340 * Make the hash size about 1/2 the # found
1341 */
1342 for (size /= 2; size; size >>= 1)
1343 bits++;
1344
1345 /* Don't allocate too much */
1346 if (bits > FTRACE_HASH_MAX_BITS)
1347 bits = FTRACE_HASH_MAX_BITS;
1348
1349 new_hash = alloc_ftrace_hash(bits);
1350 if (!new_hash)
1351 return -ENOMEM;
1352
1353 size = 1 << src->size_bits;
1354 for (i = 0; i < size; i++) {
1355 hhd = &src->buckets[i];
1356 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1357 remove_hash_entry(src, entry);
1358 __add_hash_entry(new_hash, entry);
1359 }
1360 }
1361
1362 update:
1363 /*
1364 * Remove the current set, update the hash and add
1365 * them back.
1366 */
1367 ftrace_hash_rec_disable_modify(ops, enable);
1368
1369 rcu_assign_pointer(*dst, new_hash);
1370
1371 ftrace_hash_rec_enable_modify(ops, enable);
1372
1373 return 0;
1374 }
1375
1376 static bool hash_contains_ip(unsigned long ip,
1377 struct ftrace_ops_hash *hash)
1378 {
1379 /*
1380 * The function record is a match if it exists in the filter
1381 * hash and not in the notrace hash. Note, an emty hash is
1382 * considered a match for the filter hash, but an empty
1383 * notrace hash is considered not in the notrace hash.
1384 */
1385 return (ftrace_hash_empty(hash->filter_hash) ||
1386 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1387 (ftrace_hash_empty(hash->notrace_hash) ||
1388 !ftrace_lookup_ip(hash->notrace_hash, ip));
1389 }
1390
1391 /*
1392 * Test the hashes for this ops to see if we want to call
1393 * the ops->func or not.
1394 *
1395 * It's a match if the ip is in the ops->filter_hash or
1396 * the filter_hash does not exist or is empty,
1397 * AND
1398 * the ip is not in the ops->notrace_hash.
1399 *
1400 * This needs to be called with preemption disabled as
1401 * the hashes are freed with call_rcu_sched().
1402 */
1403 static int
1404 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1405 {
1406 struct ftrace_ops_hash hash;
1407 int ret;
1408
1409 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1410 /*
1411 * There's a small race when adding ops that the ftrace handler
1412 * that wants regs, may be called without them. We can not
1413 * allow that handler to be called if regs is NULL.
1414 */
1415 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1416 return 0;
1417 #endif
1418
1419 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1420 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1421
1422 if (hash_contains_ip(ip, &hash))
1423 ret = 1;
1424 else
1425 ret = 0;
1426
1427 return ret;
1428 }
1429
1430 /*
1431 * This is a double for. Do not use 'break' to break out of the loop,
1432 * you must use a goto.
1433 */
1434 #define do_for_each_ftrace_rec(pg, rec) \
1435 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1436 int _____i; \
1437 for (_____i = 0; _____i < pg->index; _____i++) { \
1438 rec = &pg->records[_____i];
1439
1440 #define while_for_each_ftrace_rec() \
1441 } \
1442 }
1443
1444
1445 static int ftrace_cmp_recs(const void *a, const void *b)
1446 {
1447 const struct dyn_ftrace *key = a;
1448 const struct dyn_ftrace *rec = b;
1449
1450 if (key->flags < rec->ip)
1451 return -1;
1452 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1453 return 1;
1454 return 0;
1455 }
1456
1457 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1458 {
1459 struct ftrace_page *pg;
1460 struct dyn_ftrace *rec;
1461 struct dyn_ftrace key;
1462
1463 key.ip = start;
1464 key.flags = end; /* overload flags, as it is unsigned long */
1465
1466 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1467 if (end < pg->records[0].ip ||
1468 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1469 continue;
1470 rec = bsearch(&key, pg->records, pg->index,
1471 sizeof(struct dyn_ftrace),
1472 ftrace_cmp_recs);
1473 if (rec)
1474 return rec->ip;
1475 }
1476
1477 return 0;
1478 }
1479
1480 /**
1481 * ftrace_location - return true if the ip giving is a traced location
1482 * @ip: the instruction pointer to check
1483 *
1484 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1485 * That is, the instruction that is either a NOP or call to
1486 * the function tracer. It checks the ftrace internal tables to
1487 * determine if the address belongs or not.
1488 */
1489 unsigned long ftrace_location(unsigned long ip)
1490 {
1491 return ftrace_location_range(ip, ip);
1492 }
1493
1494 /**
1495 * ftrace_text_reserved - return true if range contains an ftrace location
1496 * @start: start of range to search
1497 * @end: end of range to search (inclusive). @end points to the last byte to check.
1498 *
1499 * Returns 1 if @start and @end contains a ftrace location.
1500 * That is, the instruction that is either a NOP or call to
1501 * the function tracer. It checks the ftrace internal tables to
1502 * determine if the address belongs or not.
1503 */
1504 int ftrace_text_reserved(const void *start, const void *end)
1505 {
1506 unsigned long ret;
1507
1508 ret = ftrace_location_range((unsigned long)start,
1509 (unsigned long)end);
1510
1511 return (int)!!ret;
1512 }
1513
1514 /* Test if ops registered to this rec needs regs */
1515 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1516 {
1517 struct ftrace_ops *ops;
1518 bool keep_regs = false;
1519
1520 for (ops = ftrace_ops_list;
1521 ops != &ftrace_list_end; ops = ops->next) {
1522 /* pass rec in as regs to have non-NULL val */
1523 if (ftrace_ops_test(ops, rec->ip, rec)) {
1524 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1525 keep_regs = true;
1526 break;
1527 }
1528 }
1529 }
1530
1531 return keep_regs;
1532 }
1533
1534 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1535 int filter_hash,
1536 bool inc)
1537 {
1538 struct ftrace_hash *hash;
1539 struct ftrace_hash *other_hash;
1540 struct ftrace_page *pg;
1541 struct dyn_ftrace *rec;
1542 int count = 0;
1543 int all = 0;
1544
1545 /* Only update if the ops has been registered */
1546 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1547 return;
1548
1549 /*
1550 * In the filter_hash case:
1551 * If the count is zero, we update all records.
1552 * Otherwise we just update the items in the hash.
1553 *
1554 * In the notrace_hash case:
1555 * We enable the update in the hash.
1556 * As disabling notrace means enabling the tracing,
1557 * and enabling notrace means disabling, the inc variable
1558 * gets inversed.
1559 */
1560 if (filter_hash) {
1561 hash = ops->func_hash->filter_hash;
1562 other_hash = ops->func_hash->notrace_hash;
1563 if (ftrace_hash_empty(hash))
1564 all = 1;
1565 } else {
1566 inc = !inc;
1567 hash = ops->func_hash->notrace_hash;
1568 other_hash = ops->func_hash->filter_hash;
1569 /*
1570 * If the notrace hash has no items,
1571 * then there's nothing to do.
1572 */
1573 if (ftrace_hash_empty(hash))
1574 return;
1575 }
1576
1577 do_for_each_ftrace_rec(pg, rec) {
1578 int in_other_hash = 0;
1579 int in_hash = 0;
1580 int match = 0;
1581
1582 if (all) {
1583 /*
1584 * Only the filter_hash affects all records.
1585 * Update if the record is not in the notrace hash.
1586 */
1587 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1588 match = 1;
1589 } else {
1590 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1591 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1592
1593 /*
1594 * If filter_hash is set, we want to match all functions
1595 * that are in the hash but not in the other hash.
1596 *
1597 * If filter_hash is not set, then we are decrementing.
1598 * That means we match anything that is in the hash
1599 * and also in the other_hash. That is, we need to turn
1600 * off functions in the other hash because they are disabled
1601 * by this hash.
1602 */
1603 if (filter_hash && in_hash && !in_other_hash)
1604 match = 1;
1605 else if (!filter_hash && in_hash &&
1606 (in_other_hash || ftrace_hash_empty(other_hash)))
1607 match = 1;
1608 }
1609 if (!match)
1610 continue;
1611
1612 if (inc) {
1613 rec->flags++;
1614 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1615 return;
1616
1617 /*
1618 * If there's only a single callback registered to a
1619 * function, and the ops has a trampoline registered
1620 * for it, then we can call it directly.
1621 */
1622 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1623 rec->flags |= FTRACE_FL_TRAMP;
1624 else
1625 /*
1626 * If we are adding another function callback
1627 * to this function, and the previous had a
1628 * custom trampoline in use, then we need to go
1629 * back to the default trampoline.
1630 */
1631 rec->flags &= ~FTRACE_FL_TRAMP;
1632
1633 /*
1634 * If any ops wants regs saved for this function
1635 * then all ops will get saved regs.
1636 */
1637 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1638 rec->flags |= FTRACE_FL_REGS;
1639 } else {
1640 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1641 return;
1642 rec->flags--;
1643
1644 /*
1645 * If the rec had REGS enabled and the ops that is
1646 * being removed had REGS set, then see if there is
1647 * still any ops for this record that wants regs.
1648 * If not, we can stop recording them.
1649 */
1650 if (ftrace_rec_count(rec) > 0 &&
1651 rec->flags & FTRACE_FL_REGS &&
1652 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1653 if (!test_rec_ops_needs_regs(rec))
1654 rec->flags &= ~FTRACE_FL_REGS;
1655 }
1656
1657 /*
1658 * If the rec had TRAMP enabled, then it needs to
1659 * be cleared. As TRAMP can only be enabled iff
1660 * there is only a single ops attached to it.
1661 * In otherwords, always disable it on decrementing.
1662 * In the future, we may set it if rec count is
1663 * decremented to one, and the ops that is left
1664 * has a trampoline.
1665 */
1666 rec->flags &= ~FTRACE_FL_TRAMP;
1667
1668 /*
1669 * flags will be cleared in ftrace_check_record()
1670 * if rec count is zero.
1671 */
1672 }
1673 count++;
1674 /* Shortcut, if we handled all records, we are done. */
1675 if (!all && count == hash->count)
1676 return;
1677 } while_for_each_ftrace_rec();
1678 }
1679
1680 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1681 int filter_hash)
1682 {
1683 __ftrace_hash_rec_update(ops, filter_hash, 0);
1684 }
1685
1686 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1687 int filter_hash)
1688 {
1689 __ftrace_hash_rec_update(ops, filter_hash, 1);
1690 }
1691
1692 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1693 int filter_hash, int inc)
1694 {
1695 struct ftrace_ops *op;
1696
1697 __ftrace_hash_rec_update(ops, filter_hash, inc);
1698
1699 if (ops->func_hash != &global_ops.local_hash)
1700 return;
1701
1702 /*
1703 * If the ops shares the global_ops hash, then we need to update
1704 * all ops that are enabled and use this hash.
1705 */
1706 do_for_each_ftrace_op(op, ftrace_ops_list) {
1707 /* Already done */
1708 if (op == ops)
1709 continue;
1710 if (op->func_hash == &global_ops.local_hash)
1711 __ftrace_hash_rec_update(op, filter_hash, inc);
1712 } while_for_each_ftrace_op(op);
1713 }
1714
1715 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1716 int filter_hash)
1717 {
1718 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1719 }
1720
1721 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1722 int filter_hash)
1723 {
1724 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1725 }
1726
1727 static void print_ip_ins(const char *fmt, unsigned char *p)
1728 {
1729 int i;
1730
1731 printk(KERN_CONT "%s", fmt);
1732
1733 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1734 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1735 }
1736
1737 /**
1738 * ftrace_bug - report and shutdown function tracer
1739 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1740 * @ip: The address that failed
1741 *
1742 * The arch code that enables or disables the function tracing
1743 * can call ftrace_bug() when it has detected a problem in
1744 * modifying the code. @failed should be one of either:
1745 * EFAULT - if the problem happens on reading the @ip address
1746 * EINVAL - if what is read at @ip is not what was expected
1747 * EPERM - if the problem happens on writting to the @ip address
1748 */
1749 void ftrace_bug(int failed, unsigned long ip)
1750 {
1751 switch (failed) {
1752 case -EFAULT:
1753 FTRACE_WARN_ON_ONCE(1);
1754 pr_info("ftrace faulted on modifying ");
1755 print_ip_sym(ip);
1756 break;
1757 case -EINVAL:
1758 FTRACE_WARN_ON_ONCE(1);
1759 pr_info("ftrace failed to modify ");
1760 print_ip_sym(ip);
1761 print_ip_ins(" actual: ", (unsigned char *)ip);
1762 printk(KERN_CONT "\n");
1763 break;
1764 case -EPERM:
1765 FTRACE_WARN_ON_ONCE(1);
1766 pr_info("ftrace faulted on writing ");
1767 print_ip_sym(ip);
1768 break;
1769 default:
1770 FTRACE_WARN_ON_ONCE(1);
1771 pr_info("ftrace faulted on unknown error ");
1772 print_ip_sym(ip);
1773 }
1774 }
1775
1776 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1777 {
1778 unsigned long flag = 0UL;
1779
1780 /*
1781 * If we are updating calls:
1782 *
1783 * If the record has a ref count, then we need to enable it
1784 * because someone is using it.
1785 *
1786 * Otherwise we make sure its disabled.
1787 *
1788 * If we are disabling calls, then disable all records that
1789 * are enabled.
1790 */
1791 if (enable && ftrace_rec_count(rec))
1792 flag = FTRACE_FL_ENABLED;
1793
1794 /*
1795 * If enabling and the REGS flag does not match the REGS_EN, or
1796 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1797 * this record. Set flags to fail the compare against ENABLED.
1798 */
1799 if (flag) {
1800 if (!(rec->flags & FTRACE_FL_REGS) !=
1801 !(rec->flags & FTRACE_FL_REGS_EN))
1802 flag |= FTRACE_FL_REGS;
1803
1804 if (!(rec->flags & FTRACE_FL_TRAMP) !=
1805 !(rec->flags & FTRACE_FL_TRAMP_EN))
1806 flag |= FTRACE_FL_TRAMP;
1807 }
1808
1809 /* If the state of this record hasn't changed, then do nothing */
1810 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1811 return FTRACE_UPDATE_IGNORE;
1812
1813 if (flag) {
1814 /* Save off if rec is being enabled (for return value) */
1815 flag ^= rec->flags & FTRACE_FL_ENABLED;
1816
1817 if (update) {
1818 rec->flags |= FTRACE_FL_ENABLED;
1819 if (flag & FTRACE_FL_REGS) {
1820 if (rec->flags & FTRACE_FL_REGS)
1821 rec->flags |= FTRACE_FL_REGS_EN;
1822 else
1823 rec->flags &= ~FTRACE_FL_REGS_EN;
1824 }
1825 if (flag & FTRACE_FL_TRAMP) {
1826 if (rec->flags & FTRACE_FL_TRAMP)
1827 rec->flags |= FTRACE_FL_TRAMP_EN;
1828 else
1829 rec->flags &= ~FTRACE_FL_TRAMP_EN;
1830 }
1831 }
1832
1833 /*
1834 * If this record is being updated from a nop, then
1835 * return UPDATE_MAKE_CALL.
1836 * Otherwise,
1837 * return UPDATE_MODIFY_CALL to tell the caller to convert
1838 * from the save regs, to a non-save regs function or
1839 * vice versa, or from a trampoline call.
1840 */
1841 if (flag & FTRACE_FL_ENABLED)
1842 return FTRACE_UPDATE_MAKE_CALL;
1843
1844 return FTRACE_UPDATE_MODIFY_CALL;
1845 }
1846
1847 if (update) {
1848 /* If there's no more users, clear all flags */
1849 if (!ftrace_rec_count(rec))
1850 rec->flags = 0;
1851 else
1852 /* Just disable the record (keep REGS state) */
1853 rec->flags &= ~FTRACE_FL_ENABLED;
1854 }
1855
1856 return FTRACE_UPDATE_MAKE_NOP;
1857 }
1858
1859 /**
1860 * ftrace_update_record, set a record that now is tracing or not
1861 * @rec: the record to update
1862 * @enable: set to 1 if the record is tracing, zero to force disable
1863 *
1864 * The records that represent all functions that can be traced need
1865 * to be updated when tracing has been enabled.
1866 */
1867 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1868 {
1869 return ftrace_check_record(rec, enable, 1);
1870 }
1871
1872 /**
1873 * ftrace_test_record, check if the record has been enabled or not
1874 * @rec: the record to test
1875 * @enable: set to 1 to check if enabled, 0 if it is disabled
1876 *
1877 * The arch code may need to test if a record is already set to
1878 * tracing to determine how to modify the function code that it
1879 * represents.
1880 */
1881 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1882 {
1883 return ftrace_check_record(rec, enable, 0);
1884 }
1885
1886 static struct ftrace_ops *
1887 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
1888 {
1889 struct ftrace_ops *op;
1890 unsigned long ip = rec->ip;
1891
1892 do_for_each_ftrace_op(op, ftrace_ops_list) {
1893
1894 if (!op->trampoline)
1895 continue;
1896
1897 if (hash_contains_ip(ip, op->func_hash))
1898 return op;
1899 } while_for_each_ftrace_op(op);
1900
1901 return NULL;
1902 }
1903
1904 static struct ftrace_ops *
1905 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1906 {
1907 struct ftrace_ops *op;
1908 unsigned long ip = rec->ip;
1909
1910 /*
1911 * Need to check removed ops first.
1912 * If they are being removed, and this rec has a tramp,
1913 * and this rec is in the ops list, then it would be the
1914 * one with the tramp.
1915 */
1916 if (removed_ops) {
1917 if (hash_contains_ip(ip, &removed_ops->old_hash))
1918 return removed_ops;
1919 }
1920
1921 /*
1922 * Need to find the current trampoline for a rec.
1923 * Now, a trampoline is only attached to a rec if there
1924 * was a single 'ops' attached to it. But this can be called
1925 * when we are adding another op to the rec or removing the
1926 * current one. Thus, if the op is being added, we can
1927 * ignore it because it hasn't attached itself to the rec
1928 * yet.
1929 *
1930 * If an ops is being modified (hooking to different functions)
1931 * then we don't care about the new functions that are being
1932 * added, just the old ones (that are probably being removed).
1933 *
1934 * If we are adding an ops to a function that already is using
1935 * a trampoline, it needs to be removed (trampolines are only
1936 * for single ops connected), then an ops that is not being
1937 * modified also needs to be checked.
1938 */
1939 do_for_each_ftrace_op(op, ftrace_ops_list) {
1940
1941 if (!op->trampoline)
1942 continue;
1943
1944 /*
1945 * If the ops is being added, it hasn't gotten to
1946 * the point to be removed from this tree yet.
1947 */
1948 if (op->flags & FTRACE_OPS_FL_ADDING)
1949 continue;
1950
1951
1952 /*
1953 * If the ops is being modified and is in the old
1954 * hash, then it is probably being removed from this
1955 * function.
1956 */
1957 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1958 hash_contains_ip(ip, &op->old_hash))
1959 return op;
1960 /*
1961 * If the ops is not being added or modified, and it's
1962 * in its normal filter hash, then this must be the one
1963 * we want!
1964 */
1965 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
1966 hash_contains_ip(ip, op->func_hash))
1967 return op;
1968
1969 } while_for_each_ftrace_op(op);
1970
1971 return NULL;
1972 }
1973
1974 static struct ftrace_ops *
1975 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1976 {
1977 struct ftrace_ops *op;
1978 unsigned long ip = rec->ip;
1979
1980 do_for_each_ftrace_op(op, ftrace_ops_list) {
1981 /* pass rec in as regs to have non-NULL val */
1982 if (hash_contains_ip(ip, op->func_hash))
1983 return op;
1984 } while_for_each_ftrace_op(op);
1985
1986 return NULL;
1987 }
1988
1989 /**
1990 * ftrace_get_addr_new - Get the call address to set to
1991 * @rec: The ftrace record descriptor
1992 *
1993 * If the record has the FTRACE_FL_REGS set, that means that it
1994 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1995 * is not not set, then it wants to convert to the normal callback.
1996 *
1997 * Returns the address of the trampoline to set to
1998 */
1999 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2000 {
2001 struct ftrace_ops *ops;
2002
2003 /* Trampolines take precedence over regs */
2004 if (rec->flags & FTRACE_FL_TRAMP) {
2005 ops = ftrace_find_tramp_ops_new(rec);
2006 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2007 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2008 (void *)rec->ip, (void *)rec->ip, rec->flags);
2009 /* Ftrace is shutting down, return anything */
2010 return (unsigned long)FTRACE_ADDR;
2011 }
2012 return ops->trampoline;
2013 }
2014
2015 if (rec->flags & FTRACE_FL_REGS)
2016 return (unsigned long)FTRACE_REGS_ADDR;
2017 else
2018 return (unsigned long)FTRACE_ADDR;
2019 }
2020
2021 /**
2022 * ftrace_get_addr_curr - Get the call address that is already there
2023 * @rec: The ftrace record descriptor
2024 *
2025 * The FTRACE_FL_REGS_EN is set when the record already points to
2026 * a function that saves all the regs. Basically the '_EN' version
2027 * represents the current state of the function.
2028 *
2029 * Returns the address of the trampoline that is currently being called
2030 */
2031 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2032 {
2033 struct ftrace_ops *ops;
2034
2035 /* Trampolines take precedence over regs */
2036 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2037 ops = ftrace_find_tramp_ops_curr(rec);
2038 if (FTRACE_WARN_ON(!ops)) {
2039 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2040 (void *)rec->ip, (void *)rec->ip);
2041 /* Ftrace is shutting down, return anything */
2042 return (unsigned long)FTRACE_ADDR;
2043 }
2044 return ops->trampoline;
2045 }
2046
2047 if (rec->flags & FTRACE_FL_REGS_EN)
2048 return (unsigned long)FTRACE_REGS_ADDR;
2049 else
2050 return (unsigned long)FTRACE_ADDR;
2051 }
2052
2053 static int
2054 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2055 {
2056 unsigned long ftrace_old_addr;
2057 unsigned long ftrace_addr;
2058 int ret;
2059
2060 ftrace_addr = ftrace_get_addr_new(rec);
2061
2062 /* This needs to be done before we call ftrace_update_record */
2063 ftrace_old_addr = ftrace_get_addr_curr(rec);
2064
2065 ret = ftrace_update_record(rec, enable);
2066
2067 switch (ret) {
2068 case FTRACE_UPDATE_IGNORE:
2069 return 0;
2070
2071 case FTRACE_UPDATE_MAKE_CALL:
2072 return ftrace_make_call(rec, ftrace_addr);
2073
2074 case FTRACE_UPDATE_MAKE_NOP:
2075 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2076
2077 case FTRACE_UPDATE_MODIFY_CALL:
2078 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2079 }
2080
2081 return -1; /* unknow ftrace bug */
2082 }
2083
2084 void __weak ftrace_replace_code(int enable)
2085 {
2086 struct dyn_ftrace *rec;
2087 struct ftrace_page *pg;
2088 int failed;
2089
2090 if (unlikely(ftrace_disabled))
2091 return;
2092
2093 do_for_each_ftrace_rec(pg, rec) {
2094 failed = __ftrace_replace_code(rec, enable);
2095 if (failed) {
2096 ftrace_bug(failed, rec->ip);
2097 /* Stop processing */
2098 return;
2099 }
2100 } while_for_each_ftrace_rec();
2101 }
2102
2103 struct ftrace_rec_iter {
2104 struct ftrace_page *pg;
2105 int index;
2106 };
2107
2108 /**
2109 * ftrace_rec_iter_start, start up iterating over traced functions
2110 *
2111 * Returns an iterator handle that is used to iterate over all
2112 * the records that represent address locations where functions
2113 * are traced.
2114 *
2115 * May return NULL if no records are available.
2116 */
2117 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2118 {
2119 /*
2120 * We only use a single iterator.
2121 * Protected by the ftrace_lock mutex.
2122 */
2123 static struct ftrace_rec_iter ftrace_rec_iter;
2124 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2125
2126 iter->pg = ftrace_pages_start;
2127 iter->index = 0;
2128
2129 /* Could have empty pages */
2130 while (iter->pg && !iter->pg->index)
2131 iter->pg = iter->pg->next;
2132
2133 if (!iter->pg)
2134 return NULL;
2135
2136 return iter;
2137 }
2138
2139 /**
2140 * ftrace_rec_iter_next, get the next record to process.
2141 * @iter: The handle to the iterator.
2142 *
2143 * Returns the next iterator after the given iterator @iter.
2144 */
2145 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2146 {
2147 iter->index++;
2148
2149 if (iter->index >= iter->pg->index) {
2150 iter->pg = iter->pg->next;
2151 iter->index = 0;
2152
2153 /* Could have empty pages */
2154 while (iter->pg && !iter->pg->index)
2155 iter->pg = iter->pg->next;
2156 }
2157
2158 if (!iter->pg)
2159 return NULL;
2160
2161 return iter;
2162 }
2163
2164 /**
2165 * ftrace_rec_iter_record, get the record at the iterator location
2166 * @iter: The current iterator location
2167 *
2168 * Returns the record that the current @iter is at.
2169 */
2170 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2171 {
2172 return &iter->pg->records[iter->index];
2173 }
2174
2175 static int
2176 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2177 {
2178 unsigned long ip;
2179 int ret;
2180
2181 ip = rec->ip;
2182
2183 if (unlikely(ftrace_disabled))
2184 return 0;
2185
2186 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2187 if (ret) {
2188 ftrace_bug(ret, ip);
2189 return 0;
2190 }
2191 return 1;
2192 }
2193
2194 /*
2195 * archs can override this function if they must do something
2196 * before the modifying code is performed.
2197 */
2198 int __weak ftrace_arch_code_modify_prepare(void)
2199 {
2200 return 0;
2201 }
2202
2203 /*
2204 * archs can override this function if they must do something
2205 * after the modifying code is performed.
2206 */
2207 int __weak ftrace_arch_code_modify_post_process(void)
2208 {
2209 return 0;
2210 }
2211
2212 void ftrace_modify_all_code(int command)
2213 {
2214 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2215 int err = 0;
2216
2217 /*
2218 * If the ftrace_caller calls a ftrace_ops func directly,
2219 * we need to make sure that it only traces functions it
2220 * expects to trace. When doing the switch of functions,
2221 * we need to update to the ftrace_ops_list_func first
2222 * before the transition between old and new calls are set,
2223 * as the ftrace_ops_list_func will check the ops hashes
2224 * to make sure the ops are having the right functions
2225 * traced.
2226 */
2227 if (update) {
2228 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2229 if (FTRACE_WARN_ON(err))
2230 return;
2231 }
2232
2233 if (command & FTRACE_UPDATE_CALLS)
2234 ftrace_replace_code(1);
2235 else if (command & FTRACE_DISABLE_CALLS)
2236 ftrace_replace_code(0);
2237
2238 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2239 function_trace_op = set_function_trace_op;
2240 smp_wmb();
2241 /* If irqs are disabled, we are in stop machine */
2242 if (!irqs_disabled())
2243 smp_call_function(ftrace_sync_ipi, NULL, 1);
2244 err = ftrace_update_ftrace_func(ftrace_trace_function);
2245 if (FTRACE_WARN_ON(err))
2246 return;
2247 }
2248
2249 if (command & FTRACE_START_FUNC_RET)
2250 err = ftrace_enable_ftrace_graph_caller();
2251 else if (command & FTRACE_STOP_FUNC_RET)
2252 err = ftrace_disable_ftrace_graph_caller();
2253 FTRACE_WARN_ON(err);
2254 }
2255
2256 static int __ftrace_modify_code(void *data)
2257 {
2258 int *command = data;
2259
2260 ftrace_modify_all_code(*command);
2261
2262 return 0;
2263 }
2264
2265 /**
2266 * ftrace_run_stop_machine, go back to the stop machine method
2267 * @command: The command to tell ftrace what to do
2268 *
2269 * If an arch needs to fall back to the stop machine method, the
2270 * it can call this function.
2271 */
2272 void ftrace_run_stop_machine(int command)
2273 {
2274 stop_machine(__ftrace_modify_code, &command, NULL);
2275 }
2276
2277 /**
2278 * arch_ftrace_update_code, modify the code to trace or not trace
2279 * @command: The command that needs to be done
2280 *
2281 * Archs can override this function if it does not need to
2282 * run stop_machine() to modify code.
2283 */
2284 void __weak arch_ftrace_update_code(int command)
2285 {
2286 ftrace_run_stop_machine(command);
2287 }
2288
2289 static void ftrace_run_update_code(int command)
2290 {
2291 int ret;
2292
2293 ret = ftrace_arch_code_modify_prepare();
2294 FTRACE_WARN_ON(ret);
2295 if (ret)
2296 return;
2297
2298 /*
2299 * By default we use stop_machine() to modify the code.
2300 * But archs can do what ever they want as long as it
2301 * is safe. The stop_machine() is the safest, but also
2302 * produces the most overhead.
2303 */
2304 arch_ftrace_update_code(command);
2305
2306 ret = ftrace_arch_code_modify_post_process();
2307 FTRACE_WARN_ON(ret);
2308 }
2309
2310 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2311 struct ftrace_hash *old_hash)
2312 {
2313 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2314 ops->old_hash.filter_hash = old_hash;
2315 ftrace_run_update_code(command);
2316 ops->old_hash.filter_hash = NULL;
2317 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2318 }
2319
2320 static ftrace_func_t saved_ftrace_func;
2321 static int ftrace_start_up;
2322
2323 static void control_ops_free(struct ftrace_ops *ops)
2324 {
2325 free_percpu(ops->disabled);
2326 }
2327
2328 static void ftrace_startup_enable(int command)
2329 {
2330 if (saved_ftrace_func != ftrace_trace_function) {
2331 saved_ftrace_func = ftrace_trace_function;
2332 command |= FTRACE_UPDATE_TRACE_FUNC;
2333 }
2334
2335 if (!command || !ftrace_enabled)
2336 return;
2337
2338 ftrace_run_update_code(command);
2339 }
2340
2341 static void ftrace_startup_all(int command)
2342 {
2343 update_all_ops = true;
2344 ftrace_startup_enable(command);
2345 update_all_ops = false;
2346 }
2347
2348 static int ftrace_startup(struct ftrace_ops *ops, int command)
2349 {
2350 int ret;
2351
2352 if (unlikely(ftrace_disabled))
2353 return -ENODEV;
2354
2355 ret = __register_ftrace_function(ops);
2356 if (ret)
2357 return ret;
2358
2359 ftrace_start_up++;
2360 command |= FTRACE_UPDATE_CALLS;
2361
2362 /*
2363 * Note that ftrace probes uses this to start up
2364 * and modify functions it will probe. But we still
2365 * set the ADDING flag for modification, as probes
2366 * do not have trampolines. If they add them in the
2367 * future, then the probes will need to distinguish
2368 * between adding and updating probes.
2369 */
2370 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2371
2372 ftrace_hash_rec_enable(ops, 1);
2373
2374 ftrace_startup_enable(command);
2375
2376 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2377
2378 return 0;
2379 }
2380
2381 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2382 {
2383 int ret;
2384
2385 if (unlikely(ftrace_disabled))
2386 return -ENODEV;
2387
2388 ret = __unregister_ftrace_function(ops);
2389 if (ret)
2390 return ret;
2391
2392 ftrace_start_up--;
2393 /*
2394 * Just warn in case of unbalance, no need to kill ftrace, it's not
2395 * critical but the ftrace_call callers may be never nopped again after
2396 * further ftrace uses.
2397 */
2398 WARN_ON_ONCE(ftrace_start_up < 0);
2399
2400 ftrace_hash_rec_disable(ops, 1);
2401
2402 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2403
2404 command |= FTRACE_UPDATE_CALLS;
2405
2406 if (saved_ftrace_func != ftrace_trace_function) {
2407 saved_ftrace_func = ftrace_trace_function;
2408 command |= FTRACE_UPDATE_TRACE_FUNC;
2409 }
2410
2411 if (!command || !ftrace_enabled) {
2412 /*
2413 * If these are control ops, they still need their
2414 * per_cpu field freed. Since, function tracing is
2415 * not currently active, we can just free them
2416 * without synchronizing all CPUs.
2417 */
2418 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2419 control_ops_free(ops);
2420 return 0;
2421 }
2422
2423 /*
2424 * If the ops uses a trampoline, then it needs to be
2425 * tested first on update.
2426 */
2427 ops->flags |= FTRACE_OPS_FL_REMOVING;
2428 removed_ops = ops;
2429
2430 /* The trampoline logic checks the old hashes */
2431 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2432 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2433
2434 ftrace_run_update_code(command);
2435
2436 /*
2437 * If there's no more ops registered with ftrace, run a
2438 * sanity check to make sure all rec flags are cleared.
2439 */
2440 if (ftrace_ops_list == &ftrace_list_end) {
2441 struct ftrace_page *pg;
2442 struct dyn_ftrace *rec;
2443
2444 do_for_each_ftrace_rec(pg, rec) {
2445 if (FTRACE_WARN_ON_ONCE(rec->flags))
2446 pr_warn(" %pS flags:%lx\n",
2447 (void *)rec->ip, rec->flags);
2448 } while_for_each_ftrace_rec();
2449 }
2450
2451 ops->old_hash.filter_hash = NULL;
2452 ops->old_hash.notrace_hash = NULL;
2453
2454 removed_ops = NULL;
2455 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2456
2457 /*
2458 * Dynamic ops may be freed, we must make sure that all
2459 * callers are done before leaving this function.
2460 * The same goes for freeing the per_cpu data of the control
2461 * ops.
2462 *
2463 * Again, normal synchronize_sched() is not good enough.
2464 * We need to do a hard force of sched synchronization.
2465 * This is because we use preempt_disable() to do RCU, but
2466 * the function tracers can be called where RCU is not watching
2467 * (like before user_exit()). We can not rely on the RCU
2468 * infrastructure to do the synchronization, thus we must do it
2469 * ourselves.
2470 */
2471 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2472 schedule_on_each_cpu(ftrace_sync);
2473
2474 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2475 control_ops_free(ops);
2476 }
2477
2478 return 0;
2479 }
2480
2481 static void ftrace_startup_sysctl(void)
2482 {
2483 if (unlikely(ftrace_disabled))
2484 return;
2485
2486 /* Force update next time */
2487 saved_ftrace_func = NULL;
2488 /* ftrace_start_up is true if we want ftrace running */
2489 if (ftrace_start_up)
2490 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2491 }
2492
2493 static void ftrace_shutdown_sysctl(void)
2494 {
2495 if (unlikely(ftrace_disabled))
2496 return;
2497
2498 /* ftrace_start_up is true if ftrace is running */
2499 if (ftrace_start_up)
2500 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2501 }
2502
2503 static cycle_t ftrace_update_time;
2504 unsigned long ftrace_update_tot_cnt;
2505
2506 static inline int ops_traces_mod(struct ftrace_ops *ops)
2507 {
2508 /*
2509 * Filter_hash being empty will default to trace module.
2510 * But notrace hash requires a test of individual module functions.
2511 */
2512 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2513 ftrace_hash_empty(ops->func_hash->notrace_hash);
2514 }
2515
2516 /*
2517 * Check if the current ops references the record.
2518 *
2519 * If the ops traces all functions, then it was already accounted for.
2520 * If the ops does not trace the current record function, skip it.
2521 * If the ops ignores the function via notrace filter, skip it.
2522 */
2523 static inline bool
2524 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2525 {
2526 /* If ops isn't enabled, ignore it */
2527 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2528 return 0;
2529
2530 /* If ops traces all mods, we already accounted for it */
2531 if (ops_traces_mod(ops))
2532 return 0;
2533
2534 /* The function must be in the filter */
2535 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2536 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2537 return 0;
2538
2539 /* If in notrace hash, we ignore it too */
2540 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2541 return 0;
2542
2543 return 1;
2544 }
2545
2546 static int referenced_filters(struct dyn_ftrace *rec)
2547 {
2548 struct ftrace_ops *ops;
2549 int cnt = 0;
2550
2551 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2552 if (ops_references_rec(ops, rec))
2553 cnt++;
2554 }
2555
2556 return cnt;
2557 }
2558
2559 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2560 {
2561 struct ftrace_page *pg;
2562 struct dyn_ftrace *p;
2563 cycle_t start, stop;
2564 unsigned long update_cnt = 0;
2565 unsigned long ref = 0;
2566 bool test = false;
2567 int i;
2568
2569 /*
2570 * When adding a module, we need to check if tracers are
2571 * currently enabled and if they are set to trace all functions.
2572 * If they are, we need to enable the module functions as well
2573 * as update the reference counts for those function records.
2574 */
2575 if (mod) {
2576 struct ftrace_ops *ops;
2577
2578 for (ops = ftrace_ops_list;
2579 ops != &ftrace_list_end; ops = ops->next) {
2580 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2581 if (ops_traces_mod(ops))
2582 ref++;
2583 else
2584 test = true;
2585 }
2586 }
2587 }
2588
2589 start = ftrace_now(raw_smp_processor_id());
2590
2591 for (pg = new_pgs; pg; pg = pg->next) {
2592
2593 for (i = 0; i < pg->index; i++) {
2594 int cnt = ref;
2595
2596 /* If something went wrong, bail without enabling anything */
2597 if (unlikely(ftrace_disabled))
2598 return -1;
2599
2600 p = &pg->records[i];
2601 if (test)
2602 cnt += referenced_filters(p);
2603 p->flags = cnt;
2604
2605 /*
2606 * Do the initial record conversion from mcount jump
2607 * to the NOP instructions.
2608 */
2609 if (!ftrace_code_disable(mod, p))
2610 break;
2611
2612 update_cnt++;
2613
2614 /*
2615 * If the tracing is enabled, go ahead and enable the record.
2616 *
2617 * The reason not to enable the record immediatelly is the
2618 * inherent check of ftrace_make_nop/ftrace_make_call for
2619 * correct previous instructions. Making first the NOP
2620 * conversion puts the module to the correct state, thus
2621 * passing the ftrace_make_call check.
2622 */
2623 if (ftrace_start_up && cnt) {
2624 int failed = __ftrace_replace_code(p, 1);
2625 if (failed)
2626 ftrace_bug(failed, p->ip);
2627 }
2628 }
2629 }
2630
2631 stop = ftrace_now(raw_smp_processor_id());
2632 ftrace_update_time = stop - start;
2633 ftrace_update_tot_cnt += update_cnt;
2634
2635 return 0;
2636 }
2637
2638 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2639 {
2640 int order;
2641 int cnt;
2642
2643 if (WARN_ON(!count))
2644 return -EINVAL;
2645
2646 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2647
2648 /*
2649 * We want to fill as much as possible. No more than a page
2650 * may be empty.
2651 */
2652 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2653 order--;
2654
2655 again:
2656 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2657
2658 if (!pg->records) {
2659 /* if we can't allocate this size, try something smaller */
2660 if (!order)
2661 return -ENOMEM;
2662 order >>= 1;
2663 goto again;
2664 }
2665
2666 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2667 pg->size = cnt;
2668
2669 if (cnt > count)
2670 cnt = count;
2671
2672 return cnt;
2673 }
2674
2675 static struct ftrace_page *
2676 ftrace_allocate_pages(unsigned long num_to_init)
2677 {
2678 struct ftrace_page *start_pg;
2679 struct ftrace_page *pg;
2680 int order;
2681 int cnt;
2682
2683 if (!num_to_init)
2684 return 0;
2685
2686 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2687 if (!pg)
2688 return NULL;
2689
2690 /*
2691 * Try to allocate as much as possible in one continues
2692 * location that fills in all of the space. We want to
2693 * waste as little space as possible.
2694 */
2695 for (;;) {
2696 cnt = ftrace_allocate_records(pg, num_to_init);
2697 if (cnt < 0)
2698 goto free_pages;
2699
2700 num_to_init -= cnt;
2701 if (!num_to_init)
2702 break;
2703
2704 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2705 if (!pg->next)
2706 goto free_pages;
2707
2708 pg = pg->next;
2709 }
2710
2711 return start_pg;
2712
2713 free_pages:
2714 pg = start_pg;
2715 while (pg) {
2716 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2717 free_pages((unsigned long)pg->records, order);
2718 start_pg = pg->next;
2719 kfree(pg);
2720 pg = start_pg;
2721 }
2722 pr_info("ftrace: FAILED to allocate memory for functions\n");
2723 return NULL;
2724 }
2725
2726 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2727
2728 struct ftrace_iterator {
2729 loff_t pos;
2730 loff_t func_pos;
2731 struct ftrace_page *pg;
2732 struct dyn_ftrace *func;
2733 struct ftrace_func_probe *probe;
2734 struct trace_parser parser;
2735 struct ftrace_hash *hash;
2736 struct ftrace_ops *ops;
2737 int hidx;
2738 int idx;
2739 unsigned flags;
2740 };
2741
2742 static void *
2743 t_hash_next(struct seq_file *m, loff_t *pos)
2744 {
2745 struct ftrace_iterator *iter = m->private;
2746 struct hlist_node *hnd = NULL;
2747 struct hlist_head *hhd;
2748
2749 (*pos)++;
2750 iter->pos = *pos;
2751
2752 if (iter->probe)
2753 hnd = &iter->probe->node;
2754 retry:
2755 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2756 return NULL;
2757
2758 hhd = &ftrace_func_hash[iter->hidx];
2759
2760 if (hlist_empty(hhd)) {
2761 iter->hidx++;
2762 hnd = NULL;
2763 goto retry;
2764 }
2765
2766 if (!hnd)
2767 hnd = hhd->first;
2768 else {
2769 hnd = hnd->next;
2770 if (!hnd) {
2771 iter->hidx++;
2772 goto retry;
2773 }
2774 }
2775
2776 if (WARN_ON_ONCE(!hnd))
2777 return NULL;
2778
2779 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2780
2781 return iter;
2782 }
2783
2784 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2785 {
2786 struct ftrace_iterator *iter = m->private;
2787 void *p = NULL;
2788 loff_t l;
2789
2790 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2791 return NULL;
2792
2793 if (iter->func_pos > *pos)
2794 return NULL;
2795
2796 iter->hidx = 0;
2797 for (l = 0; l <= (*pos - iter->func_pos); ) {
2798 p = t_hash_next(m, &l);
2799 if (!p)
2800 break;
2801 }
2802 if (!p)
2803 return NULL;
2804
2805 /* Only set this if we have an item */
2806 iter->flags |= FTRACE_ITER_HASH;
2807
2808 return iter;
2809 }
2810
2811 static int
2812 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2813 {
2814 struct ftrace_func_probe *rec;
2815
2816 rec = iter->probe;
2817 if (WARN_ON_ONCE(!rec))
2818 return -EIO;
2819
2820 if (rec->ops->print)
2821 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2822
2823 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2824
2825 if (rec->data)
2826 seq_printf(m, ":%p", rec->data);
2827 seq_putc(m, '\n');
2828
2829 return 0;
2830 }
2831
2832 static void *
2833 t_next(struct seq_file *m, void *v, loff_t *pos)
2834 {
2835 struct ftrace_iterator *iter = m->private;
2836 struct ftrace_ops *ops = iter->ops;
2837 struct dyn_ftrace *rec = NULL;
2838
2839 if (unlikely(ftrace_disabled))
2840 return NULL;
2841
2842 if (iter->flags & FTRACE_ITER_HASH)
2843 return t_hash_next(m, pos);
2844
2845 (*pos)++;
2846 iter->pos = iter->func_pos = *pos;
2847
2848 if (iter->flags & FTRACE_ITER_PRINTALL)
2849 return t_hash_start(m, pos);
2850
2851 retry:
2852 if (iter->idx >= iter->pg->index) {
2853 if (iter->pg->next) {
2854 iter->pg = iter->pg->next;
2855 iter->idx = 0;
2856 goto retry;
2857 }
2858 } else {
2859 rec = &iter->pg->records[iter->idx++];
2860 if (((iter->flags & FTRACE_ITER_FILTER) &&
2861 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
2862
2863 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2864 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2865
2866 ((iter->flags & FTRACE_ITER_ENABLED) &&
2867 !(rec->flags & FTRACE_FL_ENABLED))) {
2868
2869 rec = NULL;
2870 goto retry;
2871 }
2872 }
2873
2874 if (!rec)
2875 return t_hash_start(m, pos);
2876
2877 iter->func = rec;
2878
2879 return iter;
2880 }
2881
2882 static void reset_iter_read(struct ftrace_iterator *iter)
2883 {
2884 iter->pos = 0;
2885 iter->func_pos = 0;
2886 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2887 }
2888
2889 static void *t_start(struct seq_file *m, loff_t *pos)
2890 {
2891 struct ftrace_iterator *iter = m->private;
2892 struct ftrace_ops *ops = iter->ops;
2893 void *p = NULL;
2894 loff_t l;
2895
2896 mutex_lock(&ftrace_lock);
2897
2898 if (unlikely(ftrace_disabled))
2899 return NULL;
2900
2901 /*
2902 * If an lseek was done, then reset and start from beginning.
2903 */
2904 if (*pos < iter->pos)
2905 reset_iter_read(iter);
2906
2907 /*
2908 * For set_ftrace_filter reading, if we have the filter
2909 * off, we can short cut and just print out that all
2910 * functions are enabled.
2911 */
2912 if ((iter->flags & FTRACE_ITER_FILTER &&
2913 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2914 (iter->flags & FTRACE_ITER_NOTRACE &&
2915 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2916 if (*pos > 0)
2917 return t_hash_start(m, pos);
2918 iter->flags |= FTRACE_ITER_PRINTALL;
2919 /* reset in case of seek/pread */
2920 iter->flags &= ~FTRACE_ITER_HASH;
2921 return iter;
2922 }
2923
2924 if (iter->flags & FTRACE_ITER_HASH)
2925 return t_hash_start(m, pos);
2926
2927 /*
2928 * Unfortunately, we need to restart at ftrace_pages_start
2929 * every time we let go of the ftrace_mutex. This is because
2930 * those pointers can change without the lock.
2931 */
2932 iter->pg = ftrace_pages_start;
2933 iter->idx = 0;
2934 for (l = 0; l <= *pos; ) {
2935 p = t_next(m, p, &l);
2936 if (!p)
2937 break;
2938 }
2939
2940 if (!p)
2941 return t_hash_start(m, pos);
2942
2943 return iter;
2944 }
2945
2946 static void t_stop(struct seq_file *m, void *p)
2947 {
2948 mutex_unlock(&ftrace_lock);
2949 }
2950
2951 static int t_show(struct seq_file *m, void *v)
2952 {
2953 struct ftrace_iterator *iter = m->private;
2954 struct dyn_ftrace *rec;
2955
2956 if (iter->flags & FTRACE_ITER_HASH)
2957 return t_hash_show(m, iter);
2958
2959 if (iter->flags & FTRACE_ITER_PRINTALL) {
2960 if (iter->flags & FTRACE_ITER_NOTRACE)
2961 seq_printf(m, "#### no functions disabled ####\n");
2962 else
2963 seq_printf(m, "#### all functions enabled ####\n");
2964 return 0;
2965 }
2966
2967 rec = iter->func;
2968
2969 if (!rec)
2970 return 0;
2971
2972 seq_printf(m, "%ps", (void *)rec->ip);
2973 if (iter->flags & FTRACE_ITER_ENABLED) {
2974 seq_printf(m, " (%ld)%s",
2975 ftrace_rec_count(rec),
2976 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2977 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2978 struct ftrace_ops *ops;
2979
2980 ops = ftrace_find_tramp_ops_any(rec);
2981 if (ops)
2982 seq_printf(m, "\ttramp: %pS",
2983 (void *)ops->trampoline);
2984 else
2985 seq_printf(m, "\ttramp: ERROR!");
2986 }
2987 }
2988
2989 seq_printf(m, "\n");
2990
2991 return 0;
2992 }
2993
2994 static const struct seq_operations show_ftrace_seq_ops = {
2995 .start = t_start,
2996 .next = t_next,
2997 .stop = t_stop,
2998 .show = t_show,
2999 };
3000
3001 static int
3002 ftrace_avail_open(struct inode *inode, struct file *file)
3003 {
3004 struct ftrace_iterator *iter;
3005
3006 if (unlikely(ftrace_disabled))
3007 return -ENODEV;
3008
3009 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3010 if (iter) {
3011 iter->pg = ftrace_pages_start;
3012 iter->ops = &global_ops;
3013 }
3014
3015 return iter ? 0 : -ENOMEM;
3016 }
3017
3018 static int
3019 ftrace_enabled_open(struct inode *inode, struct file *file)
3020 {
3021 struct ftrace_iterator *iter;
3022
3023 if (unlikely(ftrace_disabled))
3024 return -ENODEV;
3025
3026 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3027 if (iter) {
3028 iter->pg = ftrace_pages_start;
3029 iter->flags = FTRACE_ITER_ENABLED;
3030 iter->ops = &global_ops;
3031 }
3032
3033 return iter ? 0 : -ENOMEM;
3034 }
3035
3036 /**
3037 * ftrace_regex_open - initialize function tracer filter files
3038 * @ops: The ftrace_ops that hold the hash filters
3039 * @flag: The type of filter to process
3040 * @inode: The inode, usually passed in to your open routine
3041 * @file: The file, usually passed in to your open routine
3042 *
3043 * ftrace_regex_open() initializes the filter files for the
3044 * @ops. Depending on @flag it may process the filter hash or
3045 * the notrace hash of @ops. With this called from the open
3046 * routine, you can use ftrace_filter_write() for the write
3047 * routine if @flag has FTRACE_ITER_FILTER set, or
3048 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3049 * tracing_lseek() should be used as the lseek routine, and
3050 * release must call ftrace_regex_release().
3051 */
3052 int
3053 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3054 struct inode *inode, struct file *file)
3055 {
3056 struct ftrace_iterator *iter;
3057 struct ftrace_hash *hash;
3058 int ret = 0;
3059
3060 ftrace_ops_init(ops);
3061
3062 if (unlikely(ftrace_disabled))
3063 return -ENODEV;
3064
3065 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3066 if (!iter)
3067 return -ENOMEM;
3068
3069 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3070 kfree(iter);
3071 return -ENOMEM;
3072 }
3073
3074 iter->ops = ops;
3075 iter->flags = flag;
3076
3077 mutex_lock(&ops->func_hash->regex_lock);
3078
3079 if (flag & FTRACE_ITER_NOTRACE)
3080 hash = ops->func_hash->notrace_hash;
3081 else
3082 hash = ops->func_hash->filter_hash;
3083
3084 if (file->f_mode & FMODE_WRITE) {
3085 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3086
3087 if (file->f_flags & O_TRUNC)
3088 iter->hash = alloc_ftrace_hash(size_bits);
3089 else
3090 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3091
3092 if (!iter->hash) {
3093 trace_parser_put(&iter->parser);
3094 kfree(iter);
3095 ret = -ENOMEM;
3096 goto out_unlock;
3097 }
3098 }
3099
3100 if (file->f_mode & FMODE_READ) {
3101 iter->pg = ftrace_pages_start;
3102
3103 ret = seq_open(file, &show_ftrace_seq_ops);
3104 if (!ret) {
3105 struct seq_file *m = file->private_data;
3106 m->private = iter;
3107 } else {
3108 /* Failed */
3109 free_ftrace_hash(iter->hash);
3110 trace_parser_put(&iter->parser);
3111 kfree(iter);
3112 }
3113 } else
3114 file->private_data = iter;
3115
3116 out_unlock:
3117 mutex_unlock(&ops->func_hash->regex_lock);
3118
3119 return ret;
3120 }
3121
3122 static int
3123 ftrace_filter_open(struct inode *inode, struct file *file)
3124 {
3125 struct ftrace_ops *ops = inode->i_private;
3126
3127 return ftrace_regex_open(ops,
3128 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3129 inode, file);
3130 }
3131
3132 static int
3133 ftrace_notrace_open(struct inode *inode, struct file *file)
3134 {
3135 struct ftrace_ops *ops = inode->i_private;
3136
3137 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3138 inode, file);
3139 }
3140
3141 static int ftrace_match(char *str, char *regex, int len, int type)
3142 {
3143 int matched = 0;
3144 int slen;
3145
3146 switch (type) {
3147 case MATCH_FULL:
3148 if (strcmp(str, regex) == 0)
3149 matched = 1;
3150 break;
3151 case MATCH_FRONT_ONLY:
3152 if (strncmp(str, regex, len) == 0)
3153 matched = 1;
3154 break;
3155 case MATCH_MIDDLE_ONLY:
3156 if (strstr(str, regex))
3157 matched = 1;
3158 break;
3159 case MATCH_END_ONLY:
3160 slen = strlen(str);
3161 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3162 matched = 1;
3163 break;
3164 }
3165
3166 return matched;
3167 }
3168
3169 static int
3170 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3171 {
3172 struct ftrace_func_entry *entry;
3173 int ret = 0;
3174
3175 entry = ftrace_lookup_ip(hash, rec->ip);
3176 if (not) {
3177 /* Do nothing if it doesn't exist */
3178 if (!entry)
3179 return 0;
3180
3181 free_hash_entry(hash, entry);
3182 } else {
3183 /* Do nothing if it exists */
3184 if (entry)
3185 return 0;
3186
3187 ret = add_hash_entry(hash, rec->ip);
3188 }
3189 return ret;
3190 }
3191
3192 static int
3193 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3194 char *regex, int len, int type)
3195 {
3196 char str[KSYM_SYMBOL_LEN];
3197 char *modname;
3198
3199 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3200
3201 if (mod) {
3202 /* module lookup requires matching the module */
3203 if (!modname || strcmp(modname, mod))
3204 return 0;
3205
3206 /* blank search means to match all funcs in the mod */
3207 if (!len)
3208 return 1;
3209 }
3210
3211 return ftrace_match(str, regex, len, type);
3212 }
3213
3214 static int
3215 match_records(struct ftrace_hash *hash, char *buff,
3216 int len, char *mod, int not)
3217 {
3218 unsigned search_len = 0;
3219 struct ftrace_page *pg;
3220 struct dyn_ftrace *rec;
3221 int type = MATCH_FULL;
3222 char *search = buff;
3223 int found = 0;
3224 int ret;
3225
3226 if (len) {
3227 type = filter_parse_regex(buff, len, &search, &not);
3228 search_len = strlen(search);
3229 }
3230
3231 mutex_lock(&ftrace_lock);
3232
3233 if (unlikely(ftrace_disabled))
3234 goto out_unlock;
3235
3236 do_for_each_ftrace_rec(pg, rec) {
3237 if (ftrace_match_record(rec, mod, search, search_len, type)) {
3238 ret = enter_record(hash, rec, not);
3239 if (ret < 0) {
3240 found = ret;
3241 goto out_unlock;
3242 }
3243 found = 1;
3244 }
3245 } while_for_each_ftrace_rec();
3246 out_unlock:
3247 mutex_unlock(&ftrace_lock);
3248
3249 return found;
3250 }
3251
3252 static int
3253 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3254 {
3255 return match_records(hash, buff, len, NULL, 0);
3256 }
3257
3258 static int
3259 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3260 {
3261 int not = 0;
3262
3263 /* blank or '*' mean the same */
3264 if (strcmp(buff, "*") == 0)
3265 buff[0] = 0;
3266
3267 /* handle the case of 'dont filter this module' */
3268 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3269 buff[0] = 0;
3270 not = 1;
3271 }
3272
3273 return match_records(hash, buff, strlen(buff), mod, not);
3274 }
3275
3276 /*
3277 * We register the module command as a template to show others how
3278 * to register the a command as well.
3279 */
3280
3281 static int
3282 ftrace_mod_callback(struct ftrace_hash *hash,
3283 char *func, char *cmd, char *param, int enable)
3284 {
3285 char *mod;
3286 int ret = -EINVAL;
3287
3288 /*
3289 * cmd == 'mod' because we only registered this func
3290 * for the 'mod' ftrace_func_command.
3291 * But if you register one func with multiple commands,
3292 * you can tell which command was used by the cmd
3293 * parameter.
3294 */
3295
3296 /* we must have a module name */
3297 if (!param)
3298 return ret;
3299
3300 mod = strsep(&param, ":");
3301 if (!strlen(mod))
3302 return ret;
3303
3304 ret = ftrace_match_module_records(hash, func, mod);
3305 if (!ret)
3306 ret = -EINVAL;
3307 if (ret < 0)
3308 return ret;
3309
3310 return 0;
3311 }
3312
3313 static struct ftrace_func_command ftrace_mod_cmd = {
3314 .name = "mod",
3315 .func = ftrace_mod_callback,
3316 };
3317
3318 static int __init ftrace_mod_cmd_init(void)
3319 {
3320 return register_ftrace_command(&ftrace_mod_cmd);
3321 }
3322 core_initcall(ftrace_mod_cmd_init);
3323
3324 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3325 struct ftrace_ops *op, struct pt_regs *pt_regs)
3326 {
3327 struct ftrace_func_probe *entry;
3328 struct hlist_head *hhd;
3329 unsigned long key;
3330
3331 key = hash_long(ip, FTRACE_HASH_BITS);
3332
3333 hhd = &ftrace_func_hash[key];
3334
3335 if (hlist_empty(hhd))
3336 return;
3337
3338 /*
3339 * Disable preemption for these calls to prevent a RCU grace
3340 * period. This syncs the hash iteration and freeing of items
3341 * on the hash. rcu_read_lock is too dangerous here.
3342 */
3343 preempt_disable_notrace();
3344 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3345 if (entry->ip == ip)
3346 entry->ops->func(ip, parent_ip, &entry->data);
3347 }
3348 preempt_enable_notrace();
3349 }
3350
3351 static struct ftrace_ops trace_probe_ops __read_mostly =
3352 {
3353 .func = function_trace_probe_call,
3354 .flags = FTRACE_OPS_FL_INITIALIZED,
3355 INIT_OPS_HASH(trace_probe_ops)
3356 };
3357
3358 static int ftrace_probe_registered;
3359
3360 static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
3361 {
3362 int ret;
3363 int i;
3364
3365 if (ftrace_probe_registered) {
3366 /* still need to update the function call sites */
3367 if (ftrace_enabled)
3368 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3369 old_hash);
3370 return;
3371 }
3372
3373 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3374 struct hlist_head *hhd = &ftrace_func_hash[i];
3375 if (hhd->first)
3376 break;
3377 }
3378 /* Nothing registered? */
3379 if (i == FTRACE_FUNC_HASHSIZE)
3380 return;
3381
3382 ret = ftrace_startup(&trace_probe_ops, 0);
3383
3384 ftrace_probe_registered = 1;
3385 }
3386
3387 static void __disable_ftrace_function_probe(void)
3388 {
3389 int i;
3390
3391 if (!ftrace_probe_registered)
3392 return;
3393
3394 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3395 struct hlist_head *hhd = &ftrace_func_hash[i];
3396 if (hhd->first)
3397 return;
3398 }
3399
3400 /* no more funcs left */
3401 ftrace_shutdown(&trace_probe_ops, 0);
3402
3403 ftrace_probe_registered = 0;
3404 }
3405
3406
3407 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3408 {
3409 if (entry->ops->free)
3410 entry->ops->free(entry->ops, entry->ip, &entry->data);
3411 kfree(entry);
3412 }
3413
3414 int
3415 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3416 void *data)
3417 {
3418 struct ftrace_func_probe *entry;
3419 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3420 struct ftrace_hash *old_hash = *orig_hash;
3421 struct ftrace_hash *hash;
3422 struct ftrace_page *pg;
3423 struct dyn_ftrace *rec;
3424 int type, len, not;
3425 unsigned long key;
3426 int count = 0;
3427 char *search;
3428 int ret;
3429
3430 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3431 len = strlen(search);
3432
3433 /* we do not support '!' for function probes */
3434 if (WARN_ON(not))
3435 return -EINVAL;
3436
3437 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3438
3439 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3440 if (!hash) {
3441 count = -ENOMEM;
3442 goto out;
3443 }
3444
3445 if (unlikely(ftrace_disabled)) {
3446 count = -ENODEV;
3447 goto out;
3448 }
3449
3450 mutex_lock(&ftrace_lock);
3451
3452 do_for_each_ftrace_rec(pg, rec) {
3453
3454 if (!ftrace_match_record(rec, NULL, search, len, type))
3455 continue;
3456
3457 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3458 if (!entry) {
3459 /* If we did not process any, then return error */
3460 if (!count)
3461 count = -ENOMEM;
3462 goto out_unlock;
3463 }
3464
3465 count++;
3466
3467 entry->data = data;
3468
3469 /*
3470 * The caller might want to do something special
3471 * for each function we find. We call the callback
3472 * to give the caller an opportunity to do so.
3473 */
3474 if (ops->init) {
3475 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3476 /* caller does not like this func */
3477 kfree(entry);
3478 continue;
3479 }
3480 }
3481
3482 ret = enter_record(hash, rec, 0);
3483 if (ret < 0) {
3484 kfree(entry);
3485 count = ret;
3486 goto out_unlock;
3487 }
3488
3489 entry->ops = ops;
3490 entry->ip = rec->ip;
3491
3492 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3493 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3494
3495 } while_for_each_ftrace_rec();
3496
3497 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3498
3499 __enable_ftrace_function_probe(old_hash);
3500
3501 if (!ret)
3502 free_ftrace_hash_rcu(old_hash);
3503 else
3504 count = ret;
3505
3506 out_unlock:
3507 mutex_unlock(&ftrace_lock);
3508 out:
3509 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3510 free_ftrace_hash(hash);
3511
3512 return count;
3513 }
3514
3515 enum {
3516 PROBE_TEST_FUNC = 1,
3517 PROBE_TEST_DATA = 2
3518 };
3519
3520 static void
3521 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3522 void *data, int flags)
3523 {
3524 struct ftrace_func_entry *rec_entry;
3525 struct ftrace_func_probe *entry;
3526 struct ftrace_func_probe *p;
3527 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3528 struct ftrace_hash *old_hash = *orig_hash;
3529 struct list_head free_list;
3530 struct ftrace_hash *hash;
3531 struct hlist_node *tmp;
3532 char str[KSYM_SYMBOL_LEN];
3533 int type = MATCH_FULL;
3534 int i, len = 0;
3535 char *search;
3536 int ret;
3537
3538 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3539 glob = NULL;
3540 else if (glob) {
3541 int not;
3542
3543 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3544 len = strlen(search);
3545
3546 /* we do not support '!' for function probes */
3547 if (WARN_ON(not))
3548 return;
3549 }
3550
3551 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3552
3553 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3554 if (!hash)
3555 /* Hmm, should report this somehow */
3556 goto out_unlock;
3557
3558 INIT_LIST_HEAD(&free_list);
3559
3560 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3561 struct hlist_head *hhd = &ftrace_func_hash[i];
3562
3563 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3564
3565 /* break up if statements for readability */
3566 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3567 continue;
3568
3569 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3570 continue;
3571
3572 /* do this last, since it is the most expensive */
3573 if (glob) {
3574 kallsyms_lookup(entry->ip, NULL, NULL,
3575 NULL, str);
3576 if (!ftrace_match(str, glob, len, type))
3577 continue;
3578 }
3579
3580 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3581 /* It is possible more than one entry had this ip */
3582 if (rec_entry)
3583 free_hash_entry(hash, rec_entry);
3584
3585 hlist_del_rcu(&entry->node);
3586 list_add(&entry->free_list, &free_list);
3587 }
3588 }
3589 mutex_lock(&ftrace_lock);
3590 __disable_ftrace_function_probe();
3591 /*
3592 * Remove after the disable is called. Otherwise, if the last
3593 * probe is removed, a null hash means *all enabled*.
3594 */
3595 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3596 synchronize_sched();
3597 if (!ret)
3598 free_ftrace_hash_rcu(old_hash);
3599
3600 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3601 list_del(&entry->free_list);
3602 ftrace_free_entry(entry);
3603 }
3604 mutex_unlock(&ftrace_lock);
3605
3606 out_unlock:
3607 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3608 free_ftrace_hash(hash);
3609 }
3610
3611 void
3612 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3613 void *data)
3614 {
3615 __unregister_ftrace_function_probe(glob, ops, data,
3616 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3617 }
3618
3619 void
3620 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3621 {
3622 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3623 }
3624
3625 void unregister_ftrace_function_probe_all(char *glob)
3626 {
3627 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3628 }
3629
3630 static LIST_HEAD(ftrace_commands);
3631 static DEFINE_MUTEX(ftrace_cmd_mutex);
3632
3633 /*
3634 * Currently we only register ftrace commands from __init, so mark this
3635 * __init too.
3636 */
3637 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3638 {
3639 struct ftrace_func_command *p;
3640 int ret = 0;
3641
3642 mutex_lock(&ftrace_cmd_mutex);
3643 list_for_each_entry(p, &ftrace_commands, list) {
3644 if (strcmp(cmd->name, p->name) == 0) {
3645 ret = -EBUSY;
3646 goto out_unlock;
3647 }
3648 }
3649 list_add(&cmd->list, &ftrace_commands);
3650 out_unlock:
3651 mutex_unlock(&ftrace_cmd_mutex);
3652
3653 return ret;
3654 }
3655
3656 /*
3657 * Currently we only unregister ftrace commands from __init, so mark
3658 * this __init too.
3659 */
3660 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3661 {
3662 struct ftrace_func_command *p, *n;
3663 int ret = -ENODEV;
3664
3665 mutex_lock(&ftrace_cmd_mutex);
3666 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3667 if (strcmp(cmd->name, p->name) == 0) {
3668 ret = 0;
3669 list_del_init(&p->list);
3670 goto out_unlock;
3671 }
3672 }
3673 out_unlock:
3674 mutex_unlock(&ftrace_cmd_mutex);
3675
3676 return ret;
3677 }
3678
3679 static int ftrace_process_regex(struct ftrace_hash *hash,
3680 char *buff, int len, int enable)
3681 {
3682 char *func, *command, *next = buff;
3683 struct ftrace_func_command *p;
3684 int ret = -EINVAL;
3685
3686 func = strsep(&next, ":");
3687
3688 if (!next) {
3689 ret = ftrace_match_records(hash, func, len);
3690 if (!ret)
3691 ret = -EINVAL;
3692 if (ret < 0)
3693 return ret;
3694 return 0;
3695 }
3696
3697 /* command found */
3698
3699 command = strsep(&next, ":");
3700
3701 mutex_lock(&ftrace_cmd_mutex);
3702 list_for_each_entry(p, &ftrace_commands, list) {
3703 if (strcmp(p->name, command) == 0) {
3704 ret = p->func(hash, func, command, next, enable);
3705 goto out_unlock;
3706 }
3707 }
3708 out_unlock:
3709 mutex_unlock(&ftrace_cmd_mutex);
3710
3711 return ret;
3712 }
3713
3714 static ssize_t
3715 ftrace_regex_write(struct file *file, const char __user *ubuf,
3716 size_t cnt, loff_t *ppos, int enable)
3717 {
3718 struct ftrace_iterator *iter;
3719 struct trace_parser *parser;
3720 ssize_t ret, read;
3721
3722 if (!cnt)
3723 return 0;
3724
3725 if (file->f_mode & FMODE_READ) {
3726 struct seq_file *m = file->private_data;
3727 iter = m->private;
3728 } else
3729 iter = file->private_data;
3730
3731 if (unlikely(ftrace_disabled))
3732 return -ENODEV;
3733
3734 /* iter->hash is a local copy, so we don't need regex_lock */
3735
3736 parser = &iter->parser;
3737 read = trace_get_user(parser, ubuf, cnt, ppos);
3738
3739 if (read >= 0 && trace_parser_loaded(parser) &&
3740 !trace_parser_cont(parser)) {
3741 ret = ftrace_process_regex(iter->hash, parser->buffer,
3742 parser->idx, enable);
3743 trace_parser_clear(parser);
3744 if (ret < 0)
3745 goto out;
3746 }
3747
3748 ret = read;
3749 out:
3750 return ret;
3751 }
3752
3753 ssize_t
3754 ftrace_filter_write(struct file *file, const char __user *ubuf,
3755 size_t cnt, loff_t *ppos)
3756 {
3757 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3758 }
3759
3760 ssize_t
3761 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3762 size_t cnt, loff_t *ppos)
3763 {
3764 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3765 }
3766
3767 static int
3768 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3769 {
3770 struct ftrace_func_entry *entry;
3771
3772 if (!ftrace_location(ip))
3773 return -EINVAL;
3774
3775 if (remove) {
3776 entry = ftrace_lookup_ip(hash, ip);
3777 if (!entry)
3778 return -ENOENT;
3779 free_hash_entry(hash, entry);
3780 return 0;
3781 }
3782
3783 return add_hash_entry(hash, ip);
3784 }
3785
3786 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3787 struct ftrace_hash *old_hash)
3788 {
3789 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3790 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3791 }
3792
3793 static int
3794 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3795 unsigned long ip, int remove, int reset, int enable)
3796 {
3797 struct ftrace_hash **orig_hash;
3798 struct ftrace_hash *old_hash;
3799 struct ftrace_hash *hash;
3800 int ret;
3801
3802 if (unlikely(ftrace_disabled))
3803 return -ENODEV;
3804
3805 mutex_lock(&ops->func_hash->regex_lock);
3806
3807 if (enable)
3808 orig_hash = &ops->func_hash->filter_hash;
3809 else
3810 orig_hash = &ops->func_hash->notrace_hash;
3811
3812 if (reset)
3813 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3814 else
3815 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3816
3817 if (!hash) {
3818 ret = -ENOMEM;
3819 goto out_regex_unlock;
3820 }
3821
3822 if (buf && !ftrace_match_records(hash, buf, len)) {
3823 ret = -EINVAL;
3824 goto out_regex_unlock;
3825 }
3826 if (ip) {
3827 ret = ftrace_match_addr(hash, ip, remove);
3828 if (ret < 0)
3829 goto out_regex_unlock;
3830 }
3831
3832 mutex_lock(&ftrace_lock);
3833 old_hash = *orig_hash;
3834 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3835 if (!ret) {
3836 ftrace_ops_update_code(ops, old_hash);
3837 free_ftrace_hash_rcu(old_hash);
3838 }
3839 mutex_unlock(&ftrace_lock);
3840
3841 out_regex_unlock:
3842 mutex_unlock(&ops->func_hash->regex_lock);
3843
3844 free_ftrace_hash(hash);
3845 return ret;
3846 }
3847
3848 static int
3849 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3850 int reset, int enable)
3851 {
3852 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3853 }
3854
3855 /**
3856 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3857 * @ops - the ops to set the filter with
3858 * @ip - the address to add to or remove from the filter.
3859 * @remove - non zero to remove the ip from the filter
3860 * @reset - non zero to reset all filters before applying this filter.
3861 *
3862 * Filters denote which functions should be enabled when tracing is enabled
3863 * If @ip is NULL, it failes to update filter.
3864 */
3865 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3866 int remove, int reset)
3867 {
3868 ftrace_ops_init(ops);
3869 return ftrace_set_addr(ops, ip, remove, reset, 1);
3870 }
3871 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3872
3873 static int
3874 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3875 int reset, int enable)
3876 {
3877 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3878 }
3879
3880 /**
3881 * ftrace_set_filter - set a function to filter on in ftrace
3882 * @ops - the ops to set the filter with
3883 * @buf - the string that holds the function filter text.
3884 * @len - the length of the string.
3885 * @reset - non zero to reset all filters before applying this filter.
3886 *
3887 * Filters denote which functions should be enabled when tracing is enabled.
3888 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3889 */
3890 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3891 int len, int reset)
3892 {
3893 ftrace_ops_init(ops);
3894 return ftrace_set_regex(ops, buf, len, reset, 1);
3895 }
3896 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3897
3898 /**
3899 * ftrace_set_notrace - set a function to not trace in ftrace
3900 * @ops - the ops to set the notrace filter with
3901 * @buf - the string that holds the function notrace text.
3902 * @len - the length of the string.
3903 * @reset - non zero to reset all filters before applying this filter.
3904 *
3905 * Notrace Filters denote which functions should not be enabled when tracing
3906 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3907 * for tracing.
3908 */
3909 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3910 int len, int reset)
3911 {
3912 ftrace_ops_init(ops);
3913 return ftrace_set_regex(ops, buf, len, reset, 0);
3914 }
3915 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3916 /**
3917 * ftrace_set_global_filter - set a function to filter on with global tracers
3918 * @buf - the string that holds the function filter text.
3919 * @len - the length of the string.
3920 * @reset - non zero to reset all filters before applying this filter.
3921 *
3922 * Filters denote which functions should be enabled when tracing is enabled.
3923 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3924 */
3925 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3926 {
3927 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3928 }
3929 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3930
3931 /**
3932 * ftrace_set_global_notrace - set a function to not trace with global tracers
3933 * @buf - the string that holds the function notrace text.
3934 * @len - the length of the string.
3935 * @reset - non zero to reset all filters before applying this filter.
3936 *
3937 * Notrace Filters denote which functions should not be enabled when tracing
3938 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3939 * for tracing.
3940 */
3941 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3942 {
3943 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3944 }
3945 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3946
3947 /*
3948 * command line interface to allow users to set filters on boot up.
3949 */
3950 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3951 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3952 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3953
3954 /* Used by function selftest to not test if filter is set */
3955 bool ftrace_filter_param __initdata;
3956
3957 static int __init set_ftrace_notrace(char *str)
3958 {
3959 ftrace_filter_param = true;
3960 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3961 return 1;
3962 }
3963 __setup("ftrace_notrace=", set_ftrace_notrace);
3964
3965 static int __init set_ftrace_filter(char *str)
3966 {
3967 ftrace_filter_param = true;
3968 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3969 return 1;
3970 }
3971 __setup("ftrace_filter=", set_ftrace_filter);
3972
3973 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3974 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3975 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3976 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3977
3978 static int __init set_graph_function(char *str)
3979 {
3980 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3981 return 1;
3982 }
3983 __setup("ftrace_graph_filter=", set_graph_function);
3984
3985 static int __init set_graph_notrace_function(char *str)
3986 {
3987 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
3988 return 1;
3989 }
3990 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
3991
3992 static void __init set_ftrace_early_graph(char *buf, int enable)
3993 {
3994 int ret;
3995 char *func;
3996 unsigned long *table = ftrace_graph_funcs;
3997 int *count = &ftrace_graph_count;
3998
3999 if (!enable) {
4000 table = ftrace_graph_notrace_funcs;
4001 count = &ftrace_graph_notrace_count;
4002 }
4003
4004 while (buf) {
4005 func = strsep(&buf, ",");
4006 /* we allow only one expression at a time */
4007 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4008 if (ret)
4009 printk(KERN_DEBUG "ftrace: function %s not "
4010 "traceable\n", func);
4011 }
4012 }
4013 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4014
4015 void __init
4016 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4017 {
4018 char *func;
4019
4020 ftrace_ops_init(ops);
4021
4022 while (buf) {
4023 func = strsep(&buf, ",");
4024 ftrace_set_regex(ops, func, strlen(func), 0, enable);
4025 }
4026 }
4027
4028 static void __init set_ftrace_early_filters(void)
4029 {
4030 if (ftrace_filter_buf[0])
4031 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4032 if (ftrace_notrace_buf[0])
4033 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4034 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4035 if (ftrace_graph_buf[0])
4036 set_ftrace_early_graph(ftrace_graph_buf, 1);
4037 if (ftrace_graph_notrace_buf[0])
4038 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4039 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4040 }
4041
4042 int ftrace_regex_release(struct inode *inode, struct file *file)
4043 {
4044 struct seq_file *m = (struct seq_file *)file->private_data;
4045 struct ftrace_iterator *iter;
4046 struct ftrace_hash **orig_hash;
4047 struct ftrace_hash *old_hash;
4048 struct trace_parser *parser;
4049 int filter_hash;
4050 int ret;
4051
4052 if (file->f_mode & FMODE_READ) {
4053 iter = m->private;
4054 seq_release(inode, file);
4055 } else
4056 iter = file->private_data;
4057
4058 parser = &iter->parser;
4059 if (trace_parser_loaded(parser)) {
4060 parser->buffer[parser->idx] = 0;
4061 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4062 }
4063
4064 trace_parser_put(parser);
4065
4066 mutex_lock(&iter->ops->func_hash->regex_lock);
4067
4068 if (file->f_mode & FMODE_WRITE) {
4069 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4070
4071 if (filter_hash)
4072 orig_hash = &iter->ops->func_hash->filter_hash;
4073 else
4074 orig_hash = &iter->ops->func_hash->notrace_hash;
4075
4076 mutex_lock(&ftrace_lock);
4077 old_hash = *orig_hash;
4078 ret = ftrace_hash_move(iter->ops, filter_hash,
4079 orig_hash, iter->hash);
4080 if (!ret) {
4081 ftrace_ops_update_code(iter->ops, old_hash);
4082 free_ftrace_hash_rcu(old_hash);
4083 }
4084 mutex_unlock(&ftrace_lock);
4085 }
4086
4087 mutex_unlock(&iter->ops->func_hash->regex_lock);
4088 free_ftrace_hash(iter->hash);
4089 kfree(iter);
4090
4091 return 0;
4092 }
4093
4094 static const struct file_operations ftrace_avail_fops = {
4095 .open = ftrace_avail_open,
4096 .read = seq_read,
4097 .llseek = seq_lseek,
4098 .release = seq_release_private,
4099 };
4100
4101 static const struct file_operations ftrace_enabled_fops = {
4102 .open = ftrace_enabled_open,
4103 .read = seq_read,
4104 .llseek = seq_lseek,
4105 .release = seq_release_private,
4106 };
4107
4108 static const struct file_operations ftrace_filter_fops = {
4109 .open = ftrace_filter_open,
4110 .read = seq_read,
4111 .write = ftrace_filter_write,
4112 .llseek = tracing_lseek,
4113 .release = ftrace_regex_release,
4114 };
4115
4116 static const struct file_operations ftrace_notrace_fops = {
4117 .open = ftrace_notrace_open,
4118 .read = seq_read,
4119 .write = ftrace_notrace_write,
4120 .llseek = tracing_lseek,
4121 .release = ftrace_regex_release,
4122 };
4123
4124 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4125
4126 static DEFINE_MUTEX(graph_lock);
4127
4128 int ftrace_graph_count;
4129 int ftrace_graph_notrace_count;
4130 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4131 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4132
4133 struct ftrace_graph_data {
4134 unsigned long *table;
4135 size_t size;
4136 int *count;
4137 const struct seq_operations *seq_ops;
4138 };
4139
4140 static void *
4141 __g_next(struct seq_file *m, loff_t *pos)
4142 {
4143 struct ftrace_graph_data *fgd = m->private;
4144
4145 if (*pos >= *fgd->count)
4146 return NULL;
4147 return &fgd->table[*pos];
4148 }
4149
4150 static void *
4151 g_next(struct seq_file *m, void *v, loff_t *pos)
4152 {
4153 (*pos)++;
4154 return __g_next(m, pos);
4155 }
4156
4157 static void *g_start(struct seq_file *m, loff_t *pos)
4158 {
4159 struct ftrace_graph_data *fgd = m->private;
4160
4161 mutex_lock(&graph_lock);
4162
4163 /* Nothing, tell g_show to print all functions are enabled */
4164 if (!*fgd->count && !*pos)
4165 return (void *)1;
4166
4167 return __g_next(m, pos);
4168 }
4169
4170 static void g_stop(struct seq_file *m, void *p)
4171 {
4172 mutex_unlock(&graph_lock);
4173 }
4174
4175 static int g_show(struct seq_file *m, void *v)
4176 {
4177 unsigned long *ptr = v;
4178
4179 if (!ptr)
4180 return 0;
4181
4182 if (ptr == (unsigned long *)1) {
4183 struct ftrace_graph_data *fgd = m->private;
4184
4185 if (fgd->table == ftrace_graph_funcs)
4186 seq_printf(m, "#### all functions enabled ####\n");
4187 else
4188 seq_printf(m, "#### no functions disabled ####\n");
4189 return 0;
4190 }
4191
4192 seq_printf(m, "%ps\n", (void *)*ptr);
4193
4194 return 0;
4195 }
4196
4197 static const struct seq_operations ftrace_graph_seq_ops = {
4198 .start = g_start,
4199 .next = g_next,
4200 .stop = g_stop,
4201 .show = g_show,
4202 };
4203
4204 static int
4205 __ftrace_graph_open(struct inode *inode, struct file *file,
4206 struct ftrace_graph_data *fgd)
4207 {
4208 int ret = 0;
4209
4210 mutex_lock(&graph_lock);
4211 if ((file->f_mode & FMODE_WRITE) &&
4212 (file->f_flags & O_TRUNC)) {
4213 *fgd->count = 0;
4214 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4215 }
4216 mutex_unlock(&graph_lock);
4217
4218 if (file->f_mode & FMODE_READ) {
4219 ret = seq_open(file, fgd->seq_ops);
4220 if (!ret) {
4221 struct seq_file *m = file->private_data;
4222 m->private = fgd;
4223 }
4224 } else
4225 file->private_data = fgd;
4226
4227 return ret;
4228 }
4229
4230 static int
4231 ftrace_graph_open(struct inode *inode, struct file *file)
4232 {
4233 struct ftrace_graph_data *fgd;
4234
4235 if (unlikely(ftrace_disabled))
4236 return -ENODEV;
4237
4238 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4239 if (fgd == NULL)
4240 return -ENOMEM;
4241
4242 fgd->table = ftrace_graph_funcs;
4243 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4244 fgd->count = &ftrace_graph_count;
4245 fgd->seq_ops = &ftrace_graph_seq_ops;
4246
4247 return __ftrace_graph_open(inode, file, fgd);
4248 }
4249
4250 static int
4251 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4252 {
4253 struct ftrace_graph_data *fgd;
4254
4255 if (unlikely(ftrace_disabled))
4256 return -ENODEV;
4257
4258 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4259 if (fgd == NULL)
4260 return -ENOMEM;
4261
4262 fgd->table = ftrace_graph_notrace_funcs;
4263 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4264 fgd->count = &ftrace_graph_notrace_count;
4265 fgd->seq_ops = &ftrace_graph_seq_ops;
4266
4267 return __ftrace_graph_open(inode, file, fgd);
4268 }
4269
4270 static int
4271 ftrace_graph_release(struct inode *inode, struct file *file)
4272 {
4273 if (file->f_mode & FMODE_READ) {
4274 struct seq_file *m = file->private_data;
4275
4276 kfree(m->private);
4277 seq_release(inode, file);
4278 } else {
4279 kfree(file->private_data);
4280 }
4281
4282 return 0;
4283 }
4284
4285 static int
4286 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4287 {
4288 struct dyn_ftrace *rec;
4289 struct ftrace_page *pg;
4290 int search_len;
4291 int fail = 1;
4292 int type, not;
4293 char *search;
4294 bool exists;
4295 int i;
4296
4297 /* decode regex */
4298 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4299 if (!not && *idx >= size)
4300 return -EBUSY;
4301
4302 search_len = strlen(search);
4303
4304 mutex_lock(&ftrace_lock);
4305
4306 if (unlikely(ftrace_disabled)) {
4307 mutex_unlock(&ftrace_lock);
4308 return -ENODEV;
4309 }
4310
4311 do_for_each_ftrace_rec(pg, rec) {
4312
4313 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4314 /* if it is in the array */
4315 exists = false;
4316 for (i = 0; i < *idx; i++) {
4317 if (array[i] == rec->ip) {
4318 exists = true;
4319 break;
4320 }
4321 }
4322
4323 if (!not) {
4324 fail = 0;
4325 if (!exists) {
4326 array[(*idx)++] = rec->ip;
4327 if (*idx >= size)
4328 goto out;
4329 }
4330 } else {
4331 if (exists) {
4332 array[i] = array[--(*idx)];
4333 array[*idx] = 0;
4334 fail = 0;
4335 }
4336 }
4337 }
4338 } while_for_each_ftrace_rec();
4339 out:
4340 mutex_unlock(&ftrace_lock);
4341
4342 if (fail)
4343 return -EINVAL;
4344
4345 return 0;
4346 }
4347
4348 static ssize_t
4349 ftrace_graph_write(struct file *file, const char __user *ubuf,
4350 size_t cnt, loff_t *ppos)
4351 {
4352 struct trace_parser parser;
4353 ssize_t read, ret = 0;
4354 struct ftrace_graph_data *fgd = file->private_data;
4355
4356 if (!cnt)
4357 return 0;
4358
4359 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4360 return -ENOMEM;
4361
4362 read = trace_get_user(&parser, ubuf, cnt, ppos);
4363
4364 if (read >= 0 && trace_parser_loaded((&parser))) {
4365 parser.buffer[parser.idx] = 0;
4366
4367 mutex_lock(&graph_lock);
4368
4369 /* we allow only one expression at a time */
4370 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4371 parser.buffer);
4372
4373 mutex_unlock(&graph_lock);
4374 }
4375
4376 if (!ret)
4377 ret = read;
4378
4379 trace_parser_put(&parser);
4380
4381 return ret;
4382 }
4383
4384 static const struct file_operations ftrace_graph_fops = {
4385 .open = ftrace_graph_open,
4386 .read = seq_read,
4387 .write = ftrace_graph_write,
4388 .llseek = tracing_lseek,
4389 .release = ftrace_graph_release,
4390 };
4391
4392 static const struct file_operations ftrace_graph_notrace_fops = {
4393 .open = ftrace_graph_notrace_open,
4394 .read = seq_read,
4395 .write = ftrace_graph_write,
4396 .llseek = tracing_lseek,
4397 .release = ftrace_graph_release,
4398 };
4399 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4400
4401 void ftrace_create_filter_files(struct ftrace_ops *ops,
4402 struct dentry *parent)
4403 {
4404
4405 trace_create_file("set_ftrace_filter", 0644, parent,
4406 ops, &ftrace_filter_fops);
4407
4408 trace_create_file("set_ftrace_notrace", 0644, parent,
4409 ops, &ftrace_notrace_fops);
4410 }
4411
4412 /*
4413 * The name "destroy_filter_files" is really a misnomer. Although
4414 * in the future, it may actualy delete the files, but this is
4415 * really intended to make sure the ops passed in are disabled
4416 * and that when this function returns, the caller is free to
4417 * free the ops.
4418 *
4419 * The "destroy" name is only to match the "create" name that this
4420 * should be paired with.
4421 */
4422 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4423 {
4424 mutex_lock(&ftrace_lock);
4425 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4426 ftrace_shutdown(ops, 0);
4427 ops->flags |= FTRACE_OPS_FL_DELETED;
4428 mutex_unlock(&ftrace_lock);
4429 }
4430
4431 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4432 {
4433
4434 trace_create_file("available_filter_functions", 0444,
4435 d_tracer, NULL, &ftrace_avail_fops);
4436
4437 trace_create_file("enabled_functions", 0444,
4438 d_tracer, NULL, &ftrace_enabled_fops);
4439
4440 ftrace_create_filter_files(&global_ops, d_tracer);
4441
4442 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4443 trace_create_file("set_graph_function", 0444, d_tracer,
4444 NULL,
4445 &ftrace_graph_fops);
4446 trace_create_file("set_graph_notrace", 0444, d_tracer,
4447 NULL,
4448 &ftrace_graph_notrace_fops);
4449 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4450
4451 return 0;
4452 }
4453
4454 static int ftrace_cmp_ips(const void *a, const void *b)
4455 {
4456 const unsigned long *ipa = a;
4457 const unsigned long *ipb = b;
4458
4459 if (*ipa > *ipb)
4460 return 1;
4461 if (*ipa < *ipb)
4462 return -1;
4463 return 0;
4464 }
4465
4466 static void ftrace_swap_ips(void *a, void *b, int size)
4467 {
4468 unsigned long *ipa = a;
4469 unsigned long *ipb = b;
4470 unsigned long t;
4471
4472 t = *ipa;
4473 *ipa = *ipb;
4474 *ipb = t;
4475 }
4476
4477 static int ftrace_process_locs(struct module *mod,
4478 unsigned long *start,
4479 unsigned long *end)
4480 {
4481 struct ftrace_page *start_pg;
4482 struct ftrace_page *pg;
4483 struct dyn_ftrace *rec;
4484 unsigned long count;
4485 unsigned long *p;
4486 unsigned long addr;
4487 unsigned long flags = 0; /* Shut up gcc */
4488 int ret = -ENOMEM;
4489
4490 count = end - start;
4491
4492 if (!count)
4493 return 0;
4494
4495 sort(start, count, sizeof(*start),
4496 ftrace_cmp_ips, ftrace_swap_ips);
4497
4498 start_pg = ftrace_allocate_pages(count);
4499 if (!start_pg)
4500 return -ENOMEM;
4501
4502 mutex_lock(&ftrace_lock);
4503
4504 /*
4505 * Core and each module needs their own pages, as
4506 * modules will free them when they are removed.
4507 * Force a new page to be allocated for modules.
4508 */
4509 if (!mod) {
4510 WARN_ON(ftrace_pages || ftrace_pages_start);
4511 /* First initialization */
4512 ftrace_pages = ftrace_pages_start = start_pg;
4513 } else {
4514 if (!ftrace_pages)
4515 goto out;
4516
4517 if (WARN_ON(ftrace_pages->next)) {
4518 /* Hmm, we have free pages? */
4519 while (ftrace_pages->next)
4520 ftrace_pages = ftrace_pages->next;
4521 }
4522
4523 ftrace_pages->next = start_pg;
4524 }
4525
4526 p = start;
4527 pg = start_pg;
4528 while (p < end) {
4529 addr = ftrace_call_adjust(*p++);
4530 /*
4531 * Some architecture linkers will pad between
4532 * the different mcount_loc sections of different
4533 * object files to satisfy alignments.
4534 * Skip any NULL pointers.
4535 */
4536 if (!addr)
4537 continue;
4538
4539 if (pg->index == pg->size) {
4540 /* We should have allocated enough */
4541 if (WARN_ON(!pg->next))
4542 break;
4543 pg = pg->next;
4544 }
4545
4546 rec = &pg->records[pg->index++];
4547 rec->ip = addr;
4548 }
4549
4550 /* We should have used all pages */
4551 WARN_ON(pg->next);
4552
4553 /* Assign the last page to ftrace_pages */
4554 ftrace_pages = pg;
4555
4556 /*
4557 * We only need to disable interrupts on start up
4558 * because we are modifying code that an interrupt
4559 * may execute, and the modification is not atomic.
4560 * But for modules, nothing runs the code we modify
4561 * until we are finished with it, and there's no
4562 * reason to cause large interrupt latencies while we do it.
4563 */
4564 if (!mod)
4565 local_irq_save(flags);
4566 ftrace_update_code(mod, start_pg);
4567 if (!mod)
4568 local_irq_restore(flags);
4569 ret = 0;
4570 out:
4571 mutex_unlock(&ftrace_lock);
4572
4573 return ret;
4574 }
4575
4576 #ifdef CONFIG_MODULES
4577
4578 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4579
4580 void ftrace_release_mod(struct module *mod)
4581 {
4582 struct dyn_ftrace *rec;
4583 struct ftrace_page **last_pg;
4584 struct ftrace_page *pg;
4585 int order;
4586
4587 mutex_lock(&ftrace_lock);
4588
4589 if (ftrace_disabled)
4590 goto out_unlock;
4591
4592 /*
4593 * Each module has its own ftrace_pages, remove
4594 * them from the list.
4595 */
4596 last_pg = &ftrace_pages_start;
4597 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4598 rec = &pg->records[0];
4599 if (within_module_core(rec->ip, mod)) {
4600 /*
4601 * As core pages are first, the first
4602 * page should never be a module page.
4603 */
4604 if (WARN_ON(pg == ftrace_pages_start))
4605 goto out_unlock;
4606
4607 /* Check if we are deleting the last page */
4608 if (pg == ftrace_pages)
4609 ftrace_pages = next_to_ftrace_page(last_pg);
4610
4611 *last_pg = pg->next;
4612 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4613 free_pages((unsigned long)pg->records, order);
4614 kfree(pg);
4615 } else
4616 last_pg = &pg->next;
4617 }
4618 out_unlock:
4619 mutex_unlock(&ftrace_lock);
4620 }
4621
4622 static void ftrace_init_module(struct module *mod,
4623 unsigned long *start, unsigned long *end)
4624 {
4625 if (ftrace_disabled || start == end)
4626 return;
4627 ftrace_process_locs(mod, start, end);
4628 }
4629
4630 void ftrace_module_init(struct module *mod)
4631 {
4632 ftrace_init_module(mod, mod->ftrace_callsites,
4633 mod->ftrace_callsites +
4634 mod->num_ftrace_callsites);
4635 }
4636
4637 static int ftrace_module_notify_exit(struct notifier_block *self,
4638 unsigned long val, void *data)
4639 {
4640 struct module *mod = data;
4641
4642 if (val == MODULE_STATE_GOING)
4643 ftrace_release_mod(mod);
4644
4645 return 0;
4646 }
4647 #else
4648 static int ftrace_module_notify_exit(struct notifier_block *self,
4649 unsigned long val, void *data)
4650 {
4651 return 0;
4652 }
4653 #endif /* CONFIG_MODULES */
4654
4655 struct notifier_block ftrace_module_exit_nb = {
4656 .notifier_call = ftrace_module_notify_exit,
4657 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4658 };
4659
4660 void __init ftrace_init(void)
4661 {
4662 extern unsigned long __start_mcount_loc[];
4663 extern unsigned long __stop_mcount_loc[];
4664 unsigned long count, flags;
4665 int ret;
4666
4667 local_irq_save(flags);
4668 ret = ftrace_dyn_arch_init();
4669 local_irq_restore(flags);
4670 if (ret)
4671 goto failed;
4672
4673 count = __stop_mcount_loc - __start_mcount_loc;
4674 if (!count) {
4675 pr_info("ftrace: No functions to be traced?\n");
4676 goto failed;
4677 }
4678
4679 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4680 count, count / ENTRIES_PER_PAGE + 1);
4681
4682 last_ftrace_enabled = ftrace_enabled = 1;
4683
4684 ret = ftrace_process_locs(NULL,
4685 __start_mcount_loc,
4686 __stop_mcount_loc);
4687
4688 ret = register_module_notifier(&ftrace_module_exit_nb);
4689 if (ret)
4690 pr_warning("Failed to register trace ftrace module exit notifier\n");
4691
4692 set_ftrace_early_filters();
4693
4694 return;
4695 failed:
4696 ftrace_disabled = 1;
4697 }
4698
4699 #else
4700
4701 static struct ftrace_ops global_ops = {
4702 .func = ftrace_stub,
4703 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4704 };
4705
4706 static int __init ftrace_nodyn_init(void)
4707 {
4708 ftrace_enabled = 1;
4709 return 0;
4710 }
4711 core_initcall(ftrace_nodyn_init);
4712
4713 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4714 static inline void ftrace_startup_enable(int command) { }
4715 static inline void ftrace_startup_all(int command) { }
4716 /* Keep as macros so we do not need to define the commands */
4717 # define ftrace_startup(ops, command) \
4718 ({ \
4719 int ___ret = __register_ftrace_function(ops); \
4720 if (!___ret) \
4721 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4722 ___ret; \
4723 })
4724 # define ftrace_shutdown(ops, command) \
4725 ({ \
4726 int ___ret = __unregister_ftrace_function(ops); \
4727 if (!___ret) \
4728 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4729 ___ret; \
4730 })
4731
4732 # define ftrace_startup_sysctl() do { } while (0)
4733 # define ftrace_shutdown_sysctl() do { } while (0)
4734
4735 static inline int
4736 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4737 {
4738 return 1;
4739 }
4740
4741 #endif /* CONFIG_DYNAMIC_FTRACE */
4742
4743 __init void ftrace_init_global_array_ops(struct trace_array *tr)
4744 {
4745 tr->ops = &global_ops;
4746 tr->ops->private = tr;
4747 }
4748
4749 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4750 {
4751 /* If we filter on pids, update to use the pid function */
4752 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4753 if (WARN_ON(tr->ops->func != ftrace_stub))
4754 printk("ftrace ops had %pS for function\n",
4755 tr->ops->func);
4756 /* Only the top level instance does pid tracing */
4757 if (!list_empty(&ftrace_pids)) {
4758 set_ftrace_pid_function(func);
4759 func = ftrace_pid_func;
4760 }
4761 }
4762 tr->ops->func = func;
4763 tr->ops->private = tr;
4764 }
4765
4766 void ftrace_reset_array_ops(struct trace_array *tr)
4767 {
4768 tr->ops->func = ftrace_stub;
4769 }
4770
4771 static void
4772 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4773 struct ftrace_ops *op, struct pt_regs *regs)
4774 {
4775 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4776 return;
4777
4778 /*
4779 * Some of the ops may be dynamically allocated,
4780 * they must be freed after a synchronize_sched().
4781 */
4782 preempt_disable_notrace();
4783 trace_recursion_set(TRACE_CONTROL_BIT);
4784
4785 /*
4786 * Control funcs (perf) uses RCU. Only trace if
4787 * RCU is currently active.
4788 */
4789 if (!rcu_is_watching())
4790 goto out;
4791
4792 do_for_each_ftrace_op(op, ftrace_control_list) {
4793 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4794 !ftrace_function_local_disabled(op) &&
4795 ftrace_ops_test(op, ip, regs))
4796 op->func(ip, parent_ip, op, regs);
4797 } while_for_each_ftrace_op(op);
4798 out:
4799 trace_recursion_clear(TRACE_CONTROL_BIT);
4800 preempt_enable_notrace();
4801 }
4802
4803 static struct ftrace_ops control_ops = {
4804 .func = ftrace_ops_control_func,
4805 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4806 INIT_OPS_HASH(control_ops)
4807 };
4808
4809 static inline void
4810 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4811 struct ftrace_ops *ignored, struct pt_regs *regs)
4812 {
4813 struct ftrace_ops *op;
4814 int bit;
4815
4816 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4817 if (bit < 0)
4818 return;
4819
4820 /*
4821 * Some of the ops may be dynamically allocated,
4822 * they must be freed after a synchronize_sched().
4823 */
4824 preempt_disable_notrace();
4825 do_for_each_ftrace_op(op, ftrace_ops_list) {
4826 if (ftrace_ops_test(op, ip, regs)) {
4827 if (FTRACE_WARN_ON(!op->func)) {
4828 pr_warn("op=%p %pS\n", op, op);
4829 goto out;
4830 }
4831 op->func(ip, parent_ip, op, regs);
4832 }
4833 } while_for_each_ftrace_op(op);
4834 out:
4835 preempt_enable_notrace();
4836 trace_clear_recursion(bit);
4837 }
4838
4839 /*
4840 * Some archs only support passing ip and parent_ip. Even though
4841 * the list function ignores the op parameter, we do not want any
4842 * C side effects, where a function is called without the caller
4843 * sending a third parameter.
4844 * Archs are to support both the regs and ftrace_ops at the same time.
4845 * If they support ftrace_ops, it is assumed they support regs.
4846 * If call backs want to use regs, they must either check for regs
4847 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4848 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4849 * An architecture can pass partial regs with ftrace_ops and still
4850 * set the ARCH_SUPPORT_FTARCE_OPS.
4851 */
4852 #if ARCH_SUPPORTS_FTRACE_OPS
4853 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4854 struct ftrace_ops *op, struct pt_regs *regs)
4855 {
4856 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4857 }
4858 #else
4859 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4860 {
4861 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4862 }
4863 #endif
4864
4865 /*
4866 * If there's only one function registered but it does not support
4867 * recursion, this function will be called by the mcount trampoline.
4868 * This function will handle recursion protection.
4869 */
4870 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
4871 struct ftrace_ops *op, struct pt_regs *regs)
4872 {
4873 int bit;
4874
4875 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4876 if (bit < 0)
4877 return;
4878
4879 op->func(ip, parent_ip, op, regs);
4880
4881 trace_clear_recursion(bit);
4882 }
4883
4884 /**
4885 * ftrace_ops_get_func - get the function a trampoline should call
4886 * @ops: the ops to get the function for
4887 *
4888 * Normally the mcount trampoline will call the ops->func, but there
4889 * are times that it should not. For example, if the ops does not
4890 * have its own recursion protection, then it should call the
4891 * ftrace_ops_recurs_func() instead.
4892 *
4893 * Returns the function that the trampoline should call for @ops.
4894 */
4895 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
4896 {
4897 /*
4898 * If this is a dynamic ops or we force list func,
4899 * then it needs to call the list anyway.
4900 */
4901 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
4902 return ftrace_ops_list_func;
4903
4904 /*
4905 * If the func handles its own recursion, call it directly.
4906 * Otherwise call the recursion protected function that
4907 * will call the ftrace ops function.
4908 */
4909 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
4910 return ftrace_ops_recurs_func;
4911
4912 return ops->func;
4913 }
4914
4915 static void clear_ftrace_swapper(void)
4916 {
4917 struct task_struct *p;
4918 int cpu;
4919
4920 get_online_cpus();
4921 for_each_online_cpu(cpu) {
4922 p = idle_task(cpu);
4923 clear_tsk_trace_trace(p);
4924 }
4925 put_online_cpus();
4926 }
4927
4928 static void set_ftrace_swapper(void)
4929 {
4930 struct task_struct *p;
4931 int cpu;
4932
4933 get_online_cpus();
4934 for_each_online_cpu(cpu) {
4935 p = idle_task(cpu);
4936 set_tsk_trace_trace(p);
4937 }
4938 put_online_cpus();
4939 }
4940
4941 static void clear_ftrace_pid(struct pid *pid)
4942 {
4943 struct task_struct *p;
4944
4945 rcu_read_lock();
4946 do_each_pid_task(pid, PIDTYPE_PID, p) {
4947 clear_tsk_trace_trace(p);
4948 } while_each_pid_task(pid, PIDTYPE_PID, p);
4949 rcu_read_unlock();
4950
4951 put_pid(pid);
4952 }
4953
4954 static void set_ftrace_pid(struct pid *pid)
4955 {
4956 struct task_struct *p;
4957
4958 rcu_read_lock();
4959 do_each_pid_task(pid, PIDTYPE_PID, p) {
4960 set_tsk_trace_trace(p);
4961 } while_each_pid_task(pid, PIDTYPE_PID, p);
4962 rcu_read_unlock();
4963 }
4964
4965 static void clear_ftrace_pid_task(struct pid *pid)
4966 {
4967 if (pid == ftrace_swapper_pid)
4968 clear_ftrace_swapper();
4969 else
4970 clear_ftrace_pid(pid);
4971 }
4972
4973 static void set_ftrace_pid_task(struct pid *pid)
4974 {
4975 if (pid == ftrace_swapper_pid)
4976 set_ftrace_swapper();
4977 else
4978 set_ftrace_pid(pid);
4979 }
4980
4981 static int ftrace_pid_add(int p)
4982 {
4983 struct pid *pid;
4984 struct ftrace_pid *fpid;
4985 int ret = -EINVAL;
4986
4987 mutex_lock(&ftrace_lock);
4988
4989 if (!p)
4990 pid = ftrace_swapper_pid;
4991 else
4992 pid = find_get_pid(p);
4993
4994 if (!pid)
4995 goto out;
4996
4997 ret = 0;
4998
4999 list_for_each_entry(fpid, &ftrace_pids, list)
5000 if (fpid->pid == pid)
5001 goto out_put;
5002
5003 ret = -ENOMEM;
5004
5005 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5006 if (!fpid)
5007 goto out_put;
5008
5009 list_add(&fpid->list, &ftrace_pids);
5010 fpid->pid = pid;
5011
5012 set_ftrace_pid_task(pid);
5013
5014 ftrace_update_pid_func();
5015
5016 ftrace_startup_all(0);
5017
5018 mutex_unlock(&ftrace_lock);
5019 return 0;
5020
5021 out_put:
5022 if (pid != ftrace_swapper_pid)
5023 put_pid(pid);
5024
5025 out:
5026 mutex_unlock(&ftrace_lock);
5027 return ret;
5028 }
5029
5030 static void ftrace_pid_reset(void)
5031 {
5032 struct ftrace_pid *fpid, *safe;
5033
5034 mutex_lock(&ftrace_lock);
5035 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5036 struct pid *pid = fpid->pid;
5037
5038 clear_ftrace_pid_task(pid);
5039
5040 list_del(&fpid->list);
5041 kfree(fpid);
5042 }
5043
5044 ftrace_update_pid_func();
5045 ftrace_startup_all(0);
5046
5047 mutex_unlock(&ftrace_lock);
5048 }
5049
5050 static void *fpid_start(struct seq_file *m, loff_t *pos)
5051 {
5052 mutex_lock(&ftrace_lock);
5053
5054 if (list_empty(&ftrace_pids) && (!*pos))
5055 return (void *) 1;
5056
5057 return seq_list_start(&ftrace_pids, *pos);
5058 }
5059
5060 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5061 {
5062 if (v == (void *)1)
5063 return NULL;
5064
5065 return seq_list_next(v, &ftrace_pids, pos);
5066 }
5067
5068 static void fpid_stop(struct seq_file *m, void *p)
5069 {
5070 mutex_unlock(&ftrace_lock);
5071 }
5072
5073 static int fpid_show(struct seq_file *m, void *v)
5074 {
5075 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5076
5077 if (v == (void *)1) {
5078 seq_printf(m, "no pid\n");
5079 return 0;
5080 }
5081
5082 if (fpid->pid == ftrace_swapper_pid)
5083 seq_printf(m, "swapper tasks\n");
5084 else
5085 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5086
5087 return 0;
5088 }
5089
5090 static const struct seq_operations ftrace_pid_sops = {
5091 .start = fpid_start,
5092 .next = fpid_next,
5093 .stop = fpid_stop,
5094 .show = fpid_show,
5095 };
5096
5097 static int
5098 ftrace_pid_open(struct inode *inode, struct file *file)
5099 {
5100 int ret = 0;
5101
5102 if ((file->f_mode & FMODE_WRITE) &&
5103 (file->f_flags & O_TRUNC))
5104 ftrace_pid_reset();
5105
5106 if (file->f_mode & FMODE_READ)
5107 ret = seq_open(file, &ftrace_pid_sops);
5108
5109 return ret;
5110 }
5111
5112 static ssize_t
5113 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5114 size_t cnt, loff_t *ppos)
5115 {
5116 char buf[64], *tmp;
5117 long val;
5118 int ret;
5119
5120 if (cnt >= sizeof(buf))
5121 return -EINVAL;
5122
5123 if (copy_from_user(&buf, ubuf, cnt))
5124 return -EFAULT;
5125
5126 buf[cnt] = 0;
5127
5128 /*
5129 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5130 * to clean the filter quietly.
5131 */
5132 tmp = strstrip(buf);
5133 if (strlen(tmp) == 0)
5134 return 1;
5135
5136 ret = kstrtol(tmp, 10, &val);
5137 if (ret < 0)
5138 return ret;
5139
5140 ret = ftrace_pid_add(val);
5141
5142 return ret ? ret : cnt;
5143 }
5144
5145 static int
5146 ftrace_pid_release(struct inode *inode, struct file *file)
5147 {
5148 if (file->f_mode & FMODE_READ)
5149 seq_release(inode, file);
5150
5151 return 0;
5152 }
5153
5154 static const struct file_operations ftrace_pid_fops = {
5155 .open = ftrace_pid_open,
5156 .write = ftrace_pid_write,
5157 .read = seq_read,
5158 .llseek = tracing_lseek,
5159 .release = ftrace_pid_release,
5160 };
5161
5162 static __init int ftrace_init_debugfs(void)
5163 {
5164 struct dentry *d_tracer;
5165
5166 d_tracer = tracing_init_dentry();
5167 if (!d_tracer)
5168 return 0;
5169
5170 ftrace_init_dyn_debugfs(d_tracer);
5171
5172 trace_create_file("set_ftrace_pid", 0644, d_tracer,
5173 NULL, &ftrace_pid_fops);
5174
5175 ftrace_profile_debugfs(d_tracer);
5176
5177 return 0;
5178 }
5179 fs_initcall(ftrace_init_debugfs);
5180
5181 /**
5182 * ftrace_kill - kill ftrace
5183 *
5184 * This function should be used by panic code. It stops ftrace
5185 * but in a not so nice way. If you need to simply kill ftrace
5186 * from a non-atomic section, use ftrace_kill.
5187 */
5188 void ftrace_kill(void)
5189 {
5190 ftrace_disabled = 1;
5191 ftrace_enabled = 0;
5192 clear_ftrace_function();
5193 }
5194
5195 /**
5196 * Test if ftrace is dead or not.
5197 */
5198 int ftrace_is_dead(void)
5199 {
5200 return ftrace_disabled;
5201 }
5202
5203 /**
5204 * register_ftrace_function - register a function for profiling
5205 * @ops - ops structure that holds the function for profiling.
5206 *
5207 * Register a function to be called by all functions in the
5208 * kernel.
5209 *
5210 * Note: @ops->func and all the functions it calls must be labeled
5211 * with "notrace", otherwise it will go into a
5212 * recursive loop.
5213 */
5214 int register_ftrace_function(struct ftrace_ops *ops)
5215 {
5216 int ret = -1;
5217
5218 ftrace_ops_init(ops);
5219
5220 mutex_lock(&ftrace_lock);
5221
5222 ret = ftrace_startup(ops, 0);
5223
5224 mutex_unlock(&ftrace_lock);
5225
5226 return ret;
5227 }
5228 EXPORT_SYMBOL_GPL(register_ftrace_function);
5229
5230 /**
5231 * unregister_ftrace_function - unregister a function for profiling.
5232 * @ops - ops structure that holds the function to unregister
5233 *
5234 * Unregister a function that was added to be called by ftrace profiling.
5235 */
5236 int unregister_ftrace_function(struct ftrace_ops *ops)
5237 {
5238 int ret;
5239
5240 mutex_lock(&ftrace_lock);
5241 ret = ftrace_shutdown(ops, 0);
5242 mutex_unlock(&ftrace_lock);
5243
5244 return ret;
5245 }
5246 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5247
5248 int
5249 ftrace_enable_sysctl(struct ctl_table *table, int write,
5250 void __user *buffer, size_t *lenp,
5251 loff_t *ppos)
5252 {
5253 int ret = -ENODEV;
5254
5255 mutex_lock(&ftrace_lock);
5256
5257 if (unlikely(ftrace_disabled))
5258 goto out;
5259
5260 ret = proc_dointvec(table, write, buffer, lenp, ppos);
5261
5262 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5263 goto out;
5264
5265 last_ftrace_enabled = !!ftrace_enabled;
5266
5267 if (ftrace_enabled) {
5268
5269 ftrace_startup_sysctl();
5270
5271 /* we are starting ftrace again */
5272 if (ftrace_ops_list != &ftrace_list_end)
5273 update_ftrace_function();
5274
5275 } else {
5276 /* stopping ftrace calls (just send to ftrace_stub) */
5277 ftrace_trace_function = ftrace_stub;
5278
5279 ftrace_shutdown_sysctl();
5280 }
5281
5282 out:
5283 mutex_unlock(&ftrace_lock);
5284 return ret;
5285 }
5286
5287 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5288
5289 static struct ftrace_ops graph_ops = {
5290 .func = ftrace_stub,
5291 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5292 FTRACE_OPS_FL_INITIALIZED |
5293 FTRACE_OPS_FL_STUB,
5294 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5295 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5296 #endif
5297 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5298 };
5299
5300 static int ftrace_graph_active;
5301
5302 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5303 {
5304 return 0;
5305 }
5306
5307 /* The callbacks that hook a function */
5308 trace_func_graph_ret_t ftrace_graph_return =
5309 (trace_func_graph_ret_t)ftrace_stub;
5310 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5311 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5312
5313 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5314 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5315 {
5316 int i;
5317 int ret = 0;
5318 unsigned long flags;
5319 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5320 struct task_struct *g, *t;
5321
5322 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5323 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5324 * sizeof(struct ftrace_ret_stack),
5325 GFP_KERNEL);
5326 if (!ret_stack_list[i]) {
5327 start = 0;
5328 end = i;
5329 ret = -ENOMEM;
5330 goto free;
5331 }
5332 }
5333
5334 read_lock_irqsave(&tasklist_lock, flags);
5335 do_each_thread(g, t) {
5336 if (start == end) {
5337 ret = -EAGAIN;
5338 goto unlock;
5339 }
5340
5341 if (t->ret_stack == NULL) {
5342 atomic_set(&t->tracing_graph_pause, 0);
5343 atomic_set(&t->trace_overrun, 0);
5344 t->curr_ret_stack = -1;
5345 /* Make sure the tasks see the -1 first: */
5346 smp_wmb();
5347 t->ret_stack = ret_stack_list[start++];
5348 }
5349 } while_each_thread(g, t);
5350
5351 unlock:
5352 read_unlock_irqrestore(&tasklist_lock, flags);
5353 free:
5354 for (i = start; i < end; i++)
5355 kfree(ret_stack_list[i]);
5356 return ret;
5357 }
5358
5359 static void
5360 ftrace_graph_probe_sched_switch(void *ignore,
5361 struct task_struct *prev, struct task_struct *next)
5362 {
5363 unsigned long long timestamp;
5364 int index;
5365
5366 /*
5367 * Does the user want to count the time a function was asleep.
5368 * If so, do not update the time stamps.
5369 */
5370 if (trace_flags & TRACE_ITER_SLEEP_TIME)
5371 return;
5372
5373 timestamp = trace_clock_local();
5374
5375 prev->ftrace_timestamp = timestamp;
5376
5377 /* only process tasks that we timestamped */
5378 if (!next->ftrace_timestamp)
5379 return;
5380
5381 /*
5382 * Update all the counters in next to make up for the
5383 * time next was sleeping.
5384 */
5385 timestamp -= next->ftrace_timestamp;
5386
5387 for (index = next->curr_ret_stack; index >= 0; index--)
5388 next->ret_stack[index].calltime += timestamp;
5389 }
5390
5391 /* Allocate a return stack for each task */
5392 static int start_graph_tracing(void)
5393 {
5394 struct ftrace_ret_stack **ret_stack_list;
5395 int ret, cpu;
5396
5397 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5398 sizeof(struct ftrace_ret_stack *),
5399 GFP_KERNEL);
5400
5401 if (!ret_stack_list)
5402 return -ENOMEM;
5403
5404 /* The cpu_boot init_task->ret_stack will never be freed */
5405 for_each_online_cpu(cpu) {
5406 if (!idle_task(cpu)->ret_stack)
5407 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5408 }
5409
5410 do {
5411 ret = alloc_retstack_tasklist(ret_stack_list);
5412 } while (ret == -EAGAIN);
5413
5414 if (!ret) {
5415 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5416 if (ret)
5417 pr_info("ftrace_graph: Couldn't activate tracepoint"
5418 " probe to kernel_sched_switch\n");
5419 }
5420
5421 kfree(ret_stack_list);
5422 return ret;
5423 }
5424
5425 /*
5426 * Hibernation protection.
5427 * The state of the current task is too much unstable during
5428 * suspend/restore to disk. We want to protect against that.
5429 */
5430 static int
5431 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5432 void *unused)
5433 {
5434 switch (state) {
5435 case PM_HIBERNATION_PREPARE:
5436 pause_graph_tracing();
5437 break;
5438
5439 case PM_POST_HIBERNATION:
5440 unpause_graph_tracing();
5441 break;
5442 }
5443 return NOTIFY_DONE;
5444 }
5445
5446 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5447 {
5448 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5449 return 0;
5450 return __ftrace_graph_entry(trace);
5451 }
5452
5453 /*
5454 * The function graph tracer should only trace the functions defined
5455 * by set_ftrace_filter and set_ftrace_notrace. If another function
5456 * tracer ops is registered, the graph tracer requires testing the
5457 * function against the global ops, and not just trace any function
5458 * that any ftrace_ops registered.
5459 */
5460 static void update_function_graph_func(void)
5461 {
5462 struct ftrace_ops *op;
5463 bool do_test = false;
5464
5465 /*
5466 * The graph and global ops share the same set of functions
5467 * to test. If any other ops is on the list, then
5468 * the graph tracing needs to test if its the function
5469 * it should call.
5470 */
5471 do_for_each_ftrace_op(op, ftrace_ops_list) {
5472 if (op != &global_ops && op != &graph_ops &&
5473 op != &ftrace_list_end) {
5474 do_test = true;
5475 /* in double loop, break out with goto */
5476 goto out;
5477 }
5478 } while_for_each_ftrace_op(op);
5479 out:
5480 if (do_test)
5481 ftrace_graph_entry = ftrace_graph_entry_test;
5482 else
5483 ftrace_graph_entry = __ftrace_graph_entry;
5484 }
5485
5486 static struct notifier_block ftrace_suspend_notifier = {
5487 .notifier_call = ftrace_suspend_notifier_call,
5488 };
5489
5490 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5491 trace_func_graph_ent_t entryfunc)
5492 {
5493 int ret = 0;
5494
5495 mutex_lock(&ftrace_lock);
5496
5497 /* we currently allow only one tracer registered at a time */
5498 if (ftrace_graph_active) {
5499 ret = -EBUSY;
5500 goto out;
5501 }
5502
5503 register_pm_notifier(&ftrace_suspend_notifier);
5504
5505 ftrace_graph_active++;
5506 ret = start_graph_tracing();
5507 if (ret) {
5508 ftrace_graph_active--;
5509 goto out;
5510 }
5511
5512 ftrace_graph_return = retfunc;
5513
5514 /*
5515 * Update the indirect function to the entryfunc, and the
5516 * function that gets called to the entry_test first. Then
5517 * call the update fgraph entry function to determine if
5518 * the entryfunc should be called directly or not.
5519 */
5520 __ftrace_graph_entry = entryfunc;
5521 ftrace_graph_entry = ftrace_graph_entry_test;
5522 update_function_graph_func();
5523
5524 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5525
5526 out:
5527 mutex_unlock(&ftrace_lock);
5528 return ret;
5529 }
5530
5531 void unregister_ftrace_graph(void)
5532 {
5533 mutex_lock(&ftrace_lock);
5534
5535 if (unlikely(!ftrace_graph_active))
5536 goto out;
5537
5538 ftrace_graph_active--;
5539 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5540 ftrace_graph_entry = ftrace_graph_entry_stub;
5541 __ftrace_graph_entry = ftrace_graph_entry_stub;
5542 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5543 unregister_pm_notifier(&ftrace_suspend_notifier);
5544 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5545
5546 out:
5547 mutex_unlock(&ftrace_lock);
5548 }
5549
5550 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5551
5552 static void
5553 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5554 {
5555 atomic_set(&t->tracing_graph_pause, 0);
5556 atomic_set(&t->trace_overrun, 0);
5557 t->ftrace_timestamp = 0;
5558 /* make curr_ret_stack visible before we add the ret_stack */
5559 smp_wmb();
5560 t->ret_stack = ret_stack;
5561 }
5562
5563 /*
5564 * Allocate a return stack for the idle task. May be the first
5565 * time through, or it may be done by CPU hotplug online.
5566 */
5567 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5568 {
5569 t->curr_ret_stack = -1;
5570 /*
5571 * The idle task has no parent, it either has its own
5572 * stack or no stack at all.
5573 */
5574 if (t->ret_stack)
5575 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5576
5577 if (ftrace_graph_active) {
5578 struct ftrace_ret_stack *ret_stack;
5579
5580 ret_stack = per_cpu(idle_ret_stack, cpu);
5581 if (!ret_stack) {
5582 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5583 * sizeof(struct ftrace_ret_stack),
5584 GFP_KERNEL);
5585 if (!ret_stack)
5586 return;
5587 per_cpu(idle_ret_stack, cpu) = ret_stack;
5588 }
5589 graph_init_task(t, ret_stack);
5590 }
5591 }
5592
5593 /* Allocate a return stack for newly created task */
5594 void ftrace_graph_init_task(struct task_struct *t)
5595 {
5596 /* Make sure we do not use the parent ret_stack */
5597 t->ret_stack = NULL;
5598 t->curr_ret_stack = -1;
5599
5600 if (ftrace_graph_active) {
5601 struct ftrace_ret_stack *ret_stack;
5602
5603 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5604 * sizeof(struct ftrace_ret_stack),
5605 GFP_KERNEL);
5606 if (!ret_stack)
5607 return;
5608 graph_init_task(t, ret_stack);
5609 }
5610 }
5611
5612 void ftrace_graph_exit_task(struct task_struct *t)
5613 {
5614 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5615
5616 t->ret_stack = NULL;
5617 /* NULL must become visible to IRQs before we free it: */
5618 barrier();
5619
5620 kfree(ret_stack);
5621 }
5622 #endif