]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Originally ported from the -rt patch by: | |
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | |
9 | * | |
10 | * Based on code in the latency_tracer, that is: | |
11 | * | |
12 | * Copyright (C) 2004-2006 Ingo Molnar | |
13 | * Copyright (C) 2004 Nadia Yvette Chambers | |
14 | */ | |
15 | ||
16 | #include <linux/stop_machine.h> | |
17 | #include <linux/clocksource.h> | |
18 | #include <linux/kallsyms.h> | |
19 | #include <linux/seq_file.h> | |
20 | #include <linux/suspend.h> | |
21 | #include <linux/debugfs.h> | |
22 | #include <linux/hardirq.h> | |
23 | #include <linux/kthread.h> | |
24 | #include <linux/uaccess.h> | |
25 | #include <linux/bsearch.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/ftrace.h> | |
28 | #include <linux/sysctl.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/ctype.h> | |
31 | #include <linux/sort.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/hash.h> | |
34 | #include <linux/rcupdate.h> | |
35 | ||
36 | #include <trace/events/sched.h> | |
37 | ||
38 | #include <asm/setup.h> | |
39 | ||
40 | #include "trace_output.h" | |
41 | #include "trace_stat.h" | |
42 | ||
43 | #define FTRACE_WARN_ON(cond) \ | |
44 | ({ \ | |
45 | int ___r = cond; \ | |
46 | if (WARN_ON(___r)) \ | |
47 | ftrace_kill(); \ | |
48 | ___r; \ | |
49 | }) | |
50 | ||
51 | #define FTRACE_WARN_ON_ONCE(cond) \ | |
52 | ({ \ | |
53 | int ___r = cond; \ | |
54 | if (WARN_ON_ONCE(___r)) \ | |
55 | ftrace_kill(); \ | |
56 | ___r; \ | |
57 | }) | |
58 | ||
59 | /* hash bits for specific function selection */ | |
60 | #define FTRACE_HASH_BITS 7 | |
61 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | |
62 | #define FTRACE_HASH_DEFAULT_BITS 10 | |
63 | #define FTRACE_HASH_MAX_BITS 12 | |
64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) | |
66 | ||
67 | #ifdef CONFIG_DYNAMIC_FTRACE | |
68 | #define INIT_OPS_HASH(opsname) \ | |
69 | .func_hash = &opsname.local_hash, \ | |
70 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | |
71 | #define ASSIGN_OPS_HASH(opsname, val) \ | |
72 | .func_hash = val, \ | |
73 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | |
74 | #else | |
75 | #define INIT_OPS_HASH(opsname) | |
76 | #define ASSIGN_OPS_HASH(opsname, val) | |
77 | #endif | |
78 | ||
79 | static struct ftrace_ops ftrace_list_end __read_mostly = { | |
80 | .func = ftrace_stub, | |
81 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | |
82 | INIT_OPS_HASH(ftrace_list_end) | |
83 | }; | |
84 | ||
85 | /* ftrace_enabled is a method to turn ftrace on or off */ | |
86 | int ftrace_enabled __read_mostly; | |
87 | static int last_ftrace_enabled; | |
88 | ||
89 | /* Current function tracing op */ | |
90 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | |
91 | /* What to set function_trace_op to */ | |
92 | static struct ftrace_ops *set_function_trace_op; | |
93 | ||
94 | /* List for set_ftrace_pid's pids. */ | |
95 | LIST_HEAD(ftrace_pids); | |
96 | struct ftrace_pid { | |
97 | struct list_head list; | |
98 | struct pid *pid; | |
99 | }; | |
100 | ||
101 | /* | |
102 | * ftrace_disabled is set when an anomaly is discovered. | |
103 | * ftrace_disabled is much stronger than ftrace_enabled. | |
104 | */ | |
105 | static int ftrace_disabled __read_mostly; | |
106 | ||
107 | static DEFINE_MUTEX(ftrace_lock); | |
108 | ||
109 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | |
110 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | |
111 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | |
112 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | |
113 | static struct ftrace_ops global_ops; | |
114 | static struct ftrace_ops control_ops; | |
115 | ||
116 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | |
117 | struct ftrace_ops *op, struct pt_regs *regs); | |
118 | ||
119 | #if ARCH_SUPPORTS_FTRACE_OPS | |
120 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |
121 | struct ftrace_ops *op, struct pt_regs *regs); | |
122 | #else | |
123 | /* See comment below, where ftrace_ops_list_func is defined */ | |
124 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |
125 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | |
126 | #endif | |
127 | ||
128 | /* | |
129 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | |
130 | * can use rcu_dereference_raw_notrace() is that elements removed from this list | |
131 | * are simply leaked, so there is no need to interact with a grace-period | |
132 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle | |
133 | * concurrent insertions into the ftrace_global_list. | |
134 | * | |
135 | * Silly Alpha and silly pointer-speculation compiler optimizations! | |
136 | */ | |
137 | #define do_for_each_ftrace_op(op, list) \ | |
138 | op = rcu_dereference_raw_notrace(list); \ | |
139 | do | |
140 | ||
141 | /* | |
142 | * Optimized for just a single item in the list (as that is the normal case). | |
143 | */ | |
144 | #define while_for_each_ftrace_op(op) \ | |
145 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ | |
146 | unlikely((op) != &ftrace_list_end)) | |
147 | ||
148 | static inline void ftrace_ops_init(struct ftrace_ops *ops) | |
149 | { | |
150 | #ifdef CONFIG_DYNAMIC_FTRACE | |
151 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | |
152 | mutex_init(&ops->local_hash.regex_lock); | |
153 | ops->func_hash = &ops->local_hash; | |
154 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; | |
155 | } | |
156 | #endif | |
157 | } | |
158 | ||
159 | /** | |
160 | * ftrace_nr_registered_ops - return number of ops registered | |
161 | * | |
162 | * Returns the number of ftrace_ops registered and tracing functions | |
163 | */ | |
164 | int ftrace_nr_registered_ops(void) | |
165 | { | |
166 | struct ftrace_ops *ops; | |
167 | int cnt = 0; | |
168 | ||
169 | mutex_lock(&ftrace_lock); | |
170 | ||
171 | for (ops = ftrace_ops_list; | |
172 | ops != &ftrace_list_end; ops = ops->next) | |
173 | cnt++; | |
174 | ||
175 | mutex_unlock(&ftrace_lock); | |
176 | ||
177 | return cnt; | |
178 | } | |
179 | ||
180 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | |
181 | struct ftrace_ops *op, struct pt_regs *regs) | |
182 | { | |
183 | if (!test_tsk_trace_trace(current)) | |
184 | return; | |
185 | ||
186 | ftrace_pid_function(ip, parent_ip, op, regs); | |
187 | } | |
188 | ||
189 | static void set_ftrace_pid_function(ftrace_func_t func) | |
190 | { | |
191 | /* do not set ftrace_pid_function to itself! */ | |
192 | if (func != ftrace_pid_func) | |
193 | ftrace_pid_function = func; | |
194 | } | |
195 | ||
196 | /** | |
197 | * clear_ftrace_function - reset the ftrace function | |
198 | * | |
199 | * This NULLs the ftrace function and in essence stops | |
200 | * tracing. There may be lag | |
201 | */ | |
202 | void clear_ftrace_function(void) | |
203 | { | |
204 | ftrace_trace_function = ftrace_stub; | |
205 | ftrace_pid_function = ftrace_stub; | |
206 | } | |
207 | ||
208 | static void control_ops_disable_all(struct ftrace_ops *ops) | |
209 | { | |
210 | int cpu; | |
211 | ||
212 | for_each_possible_cpu(cpu) | |
213 | *per_cpu_ptr(ops->disabled, cpu) = 1; | |
214 | } | |
215 | ||
216 | static int control_ops_alloc(struct ftrace_ops *ops) | |
217 | { | |
218 | int __percpu *disabled; | |
219 | ||
220 | disabled = alloc_percpu(int); | |
221 | if (!disabled) | |
222 | return -ENOMEM; | |
223 | ||
224 | ops->disabled = disabled; | |
225 | control_ops_disable_all(ops); | |
226 | return 0; | |
227 | } | |
228 | ||
229 | static void ftrace_sync(struct work_struct *work) | |
230 | { | |
231 | /* | |
232 | * This function is just a stub to implement a hard force | |
233 | * of synchronize_sched(). This requires synchronizing | |
234 | * tasks even in userspace and idle. | |
235 | * | |
236 | * Yes, function tracing is rude. | |
237 | */ | |
238 | } | |
239 | ||
240 | static void ftrace_sync_ipi(void *data) | |
241 | { | |
242 | /* Probably not needed, but do it anyway */ | |
243 | smp_rmb(); | |
244 | } | |
245 | ||
246 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
247 | static void update_function_graph_func(void); | |
248 | #else | |
249 | static inline void update_function_graph_func(void) { } | |
250 | #endif | |
251 | ||
252 | static void update_ftrace_function(void) | |
253 | { | |
254 | ftrace_func_t func; | |
255 | ||
256 | /* | |
257 | * If we are at the end of the list and this ops is | |
258 | * recursion safe and not dynamic and the arch supports passing ops, | |
259 | * then have the mcount trampoline call the function directly. | |
260 | */ | |
261 | if (ftrace_ops_list == &ftrace_list_end || | |
262 | (ftrace_ops_list->next == &ftrace_list_end && | |
263 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && | |
264 | !FTRACE_FORCE_LIST_FUNC)) { | |
265 | /* Set the ftrace_ops that the arch callback uses */ | |
266 | set_function_trace_op = ftrace_ops_list; | |
267 | /* | |
268 | * If the func handles its own recursion, call it directly. | |
269 | * Otherwise call the recursion protected function that | |
270 | * will call the ftrace ops function. | |
271 | */ | |
272 | if (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | |
273 | func = ftrace_ops_list->func; | |
274 | else | |
275 | func = ftrace_ops_recurs_func; | |
276 | } else { | |
277 | /* Just use the default ftrace_ops */ | |
278 | set_function_trace_op = &ftrace_list_end; | |
279 | func = ftrace_ops_list_func; | |
280 | } | |
281 | ||
282 | update_function_graph_func(); | |
283 | ||
284 | /* If there's no change, then do nothing more here */ | |
285 | if (ftrace_trace_function == func) | |
286 | return; | |
287 | ||
288 | /* | |
289 | * If we are using the list function, it doesn't care | |
290 | * about the function_trace_ops. | |
291 | */ | |
292 | if (func == ftrace_ops_list_func) { | |
293 | ftrace_trace_function = func; | |
294 | /* | |
295 | * Don't even bother setting function_trace_ops, | |
296 | * it would be racy to do so anyway. | |
297 | */ | |
298 | return; | |
299 | } | |
300 | ||
301 | #ifndef CONFIG_DYNAMIC_FTRACE | |
302 | /* | |
303 | * For static tracing, we need to be a bit more careful. | |
304 | * The function change takes affect immediately. Thus, | |
305 | * we need to coorditate the setting of the function_trace_ops | |
306 | * with the setting of the ftrace_trace_function. | |
307 | * | |
308 | * Set the function to the list ops, which will call the | |
309 | * function we want, albeit indirectly, but it handles the | |
310 | * ftrace_ops and doesn't depend on function_trace_op. | |
311 | */ | |
312 | ftrace_trace_function = ftrace_ops_list_func; | |
313 | /* | |
314 | * Make sure all CPUs see this. Yes this is slow, but static | |
315 | * tracing is slow and nasty to have enabled. | |
316 | */ | |
317 | schedule_on_each_cpu(ftrace_sync); | |
318 | /* Now all cpus are using the list ops. */ | |
319 | function_trace_op = set_function_trace_op; | |
320 | /* Make sure the function_trace_op is visible on all CPUs */ | |
321 | smp_wmb(); | |
322 | /* Nasty way to force a rmb on all cpus */ | |
323 | smp_call_function(ftrace_sync_ipi, NULL, 1); | |
324 | /* OK, we are all set to update the ftrace_trace_function now! */ | |
325 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | |
326 | ||
327 | ftrace_trace_function = func; | |
328 | } | |
329 | ||
330 | int using_ftrace_ops_list_func(void) | |
331 | { | |
332 | return ftrace_trace_function == ftrace_ops_list_func; | |
333 | } | |
334 | ||
335 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |
336 | { | |
337 | ops->next = *list; | |
338 | /* | |
339 | * We are entering ops into the list but another | |
340 | * CPU might be walking that list. We need to make sure | |
341 | * the ops->next pointer is valid before another CPU sees | |
342 | * the ops pointer included into the list. | |
343 | */ | |
344 | rcu_assign_pointer(*list, ops); | |
345 | } | |
346 | ||
347 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |
348 | { | |
349 | struct ftrace_ops **p; | |
350 | ||
351 | /* | |
352 | * If we are removing the last function, then simply point | |
353 | * to the ftrace_stub. | |
354 | */ | |
355 | if (*list == ops && ops->next == &ftrace_list_end) { | |
356 | *list = &ftrace_list_end; | |
357 | return 0; | |
358 | } | |
359 | ||
360 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) | |
361 | if (*p == ops) | |
362 | break; | |
363 | ||
364 | if (*p != ops) | |
365 | return -1; | |
366 | ||
367 | *p = (*p)->next; | |
368 | return 0; | |
369 | } | |
370 | ||
371 | static void add_ftrace_list_ops(struct ftrace_ops **list, | |
372 | struct ftrace_ops *main_ops, | |
373 | struct ftrace_ops *ops) | |
374 | { | |
375 | int first = *list == &ftrace_list_end; | |
376 | add_ftrace_ops(list, ops); | |
377 | if (first) | |
378 | add_ftrace_ops(&ftrace_ops_list, main_ops); | |
379 | } | |
380 | ||
381 | static int remove_ftrace_list_ops(struct ftrace_ops **list, | |
382 | struct ftrace_ops *main_ops, | |
383 | struct ftrace_ops *ops) | |
384 | { | |
385 | int ret = remove_ftrace_ops(list, ops); | |
386 | if (!ret && *list == &ftrace_list_end) | |
387 | ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); | |
388 | return ret; | |
389 | } | |
390 | ||
391 | static int __register_ftrace_function(struct ftrace_ops *ops) | |
392 | { | |
393 | if (ops->flags & FTRACE_OPS_FL_DELETED) | |
394 | return -EINVAL; | |
395 | ||
396 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
397 | return -EBUSY; | |
398 | ||
399 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS | |
400 | /* | |
401 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | |
402 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | |
403 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. | |
404 | */ | |
405 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && | |
406 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) | |
407 | return -EINVAL; | |
408 | ||
409 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) | |
410 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; | |
411 | #endif | |
412 | ||
413 | if (!core_kernel_data((unsigned long)ops)) | |
414 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | |
415 | ||
416 | if (ops->flags & FTRACE_OPS_FL_CONTROL) { | |
417 | if (control_ops_alloc(ops)) | |
418 | return -ENOMEM; | |
419 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | |
420 | } else | |
421 | add_ftrace_ops(&ftrace_ops_list, ops); | |
422 | ||
423 | if (ftrace_enabled) | |
424 | update_ftrace_function(); | |
425 | ||
426 | return 0; | |
427 | } | |
428 | ||
429 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | |
430 | { | |
431 | int ret; | |
432 | ||
433 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) | |
434 | return -EBUSY; | |
435 | ||
436 | if (ops->flags & FTRACE_OPS_FL_CONTROL) { | |
437 | ret = remove_ftrace_list_ops(&ftrace_control_list, | |
438 | &control_ops, ops); | |
439 | } else | |
440 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | |
441 | ||
442 | if (ret < 0) | |
443 | return ret; | |
444 | ||
445 | if (ftrace_enabled) | |
446 | update_ftrace_function(); | |
447 | ||
448 | return 0; | |
449 | } | |
450 | ||
451 | static void ftrace_update_pid_func(void) | |
452 | { | |
453 | /* Only do something if we are tracing something */ | |
454 | if (ftrace_trace_function == ftrace_stub) | |
455 | return; | |
456 | ||
457 | update_ftrace_function(); | |
458 | } | |
459 | ||
460 | #ifdef CONFIG_FUNCTION_PROFILER | |
461 | struct ftrace_profile { | |
462 | struct hlist_node node; | |
463 | unsigned long ip; | |
464 | unsigned long counter; | |
465 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
466 | unsigned long long time; | |
467 | unsigned long long time_squared; | |
468 | #endif | |
469 | }; | |
470 | ||
471 | struct ftrace_profile_page { | |
472 | struct ftrace_profile_page *next; | |
473 | unsigned long index; | |
474 | struct ftrace_profile records[]; | |
475 | }; | |
476 | ||
477 | struct ftrace_profile_stat { | |
478 | atomic_t disabled; | |
479 | struct hlist_head *hash; | |
480 | struct ftrace_profile_page *pages; | |
481 | struct ftrace_profile_page *start; | |
482 | struct tracer_stat stat; | |
483 | }; | |
484 | ||
485 | #define PROFILE_RECORDS_SIZE \ | |
486 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | |
487 | ||
488 | #define PROFILES_PER_PAGE \ | |
489 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | |
490 | ||
491 | static int ftrace_profile_enabled __read_mostly; | |
492 | ||
493 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | |
494 | static DEFINE_MUTEX(ftrace_profile_lock); | |
495 | ||
496 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); | |
497 | ||
498 | #define FTRACE_PROFILE_HASH_BITS 10 | |
499 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) | |
500 | ||
501 | static void * | |
502 | function_stat_next(void *v, int idx) | |
503 | { | |
504 | struct ftrace_profile *rec = v; | |
505 | struct ftrace_profile_page *pg; | |
506 | ||
507 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | |
508 | ||
509 | again: | |
510 | if (idx != 0) | |
511 | rec++; | |
512 | ||
513 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | |
514 | pg = pg->next; | |
515 | if (!pg) | |
516 | return NULL; | |
517 | rec = &pg->records[0]; | |
518 | if (!rec->counter) | |
519 | goto again; | |
520 | } | |
521 | ||
522 | return rec; | |
523 | } | |
524 | ||
525 | static void *function_stat_start(struct tracer_stat *trace) | |
526 | { | |
527 | struct ftrace_profile_stat *stat = | |
528 | container_of(trace, struct ftrace_profile_stat, stat); | |
529 | ||
530 | if (!stat || !stat->start) | |
531 | return NULL; | |
532 | ||
533 | return function_stat_next(&stat->start->records[0], 0); | |
534 | } | |
535 | ||
536 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
537 | /* function graph compares on total time */ | |
538 | static int function_stat_cmp(void *p1, void *p2) | |
539 | { | |
540 | struct ftrace_profile *a = p1; | |
541 | struct ftrace_profile *b = p2; | |
542 | ||
543 | if (a->time < b->time) | |
544 | return -1; | |
545 | if (a->time > b->time) | |
546 | return 1; | |
547 | else | |
548 | return 0; | |
549 | } | |
550 | #else | |
551 | /* not function graph compares against hits */ | |
552 | static int function_stat_cmp(void *p1, void *p2) | |
553 | { | |
554 | struct ftrace_profile *a = p1; | |
555 | struct ftrace_profile *b = p2; | |
556 | ||
557 | if (a->counter < b->counter) | |
558 | return -1; | |
559 | if (a->counter > b->counter) | |
560 | return 1; | |
561 | else | |
562 | return 0; | |
563 | } | |
564 | #endif | |
565 | ||
566 | static int function_stat_headers(struct seq_file *m) | |
567 | { | |
568 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
569 | seq_printf(m, " Function " | |
570 | "Hit Time Avg s^2\n" | |
571 | " -------- " | |
572 | "--- ---- --- ---\n"); | |
573 | #else | |
574 | seq_printf(m, " Function Hit\n" | |
575 | " -------- ---\n"); | |
576 | #endif | |
577 | return 0; | |
578 | } | |
579 | ||
580 | static int function_stat_show(struct seq_file *m, void *v) | |
581 | { | |
582 | struct ftrace_profile *rec = v; | |
583 | char str[KSYM_SYMBOL_LEN]; | |
584 | int ret = 0; | |
585 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
586 | static struct trace_seq s; | |
587 | unsigned long long avg; | |
588 | unsigned long long stddev; | |
589 | #endif | |
590 | mutex_lock(&ftrace_profile_lock); | |
591 | ||
592 | /* we raced with function_profile_reset() */ | |
593 | if (unlikely(rec->counter == 0)) { | |
594 | ret = -EBUSY; | |
595 | goto out; | |
596 | } | |
597 | ||
598 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | |
599 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | |
600 | ||
601 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
602 | seq_printf(m, " "); | |
603 | avg = rec->time; | |
604 | do_div(avg, rec->counter); | |
605 | ||
606 | /* Sample standard deviation (s^2) */ | |
607 | if (rec->counter <= 1) | |
608 | stddev = 0; | |
609 | else { | |
610 | /* | |
611 | * Apply Welford's method: | |
612 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) | |
613 | */ | |
614 | stddev = rec->counter * rec->time_squared - | |
615 | rec->time * rec->time; | |
616 | ||
617 | /* | |
618 | * Divide only 1000 for ns^2 -> us^2 conversion. | |
619 | * trace_print_graph_duration will divide 1000 again. | |
620 | */ | |
621 | do_div(stddev, rec->counter * (rec->counter - 1) * 1000); | |
622 | } | |
623 | ||
624 | trace_seq_init(&s); | |
625 | trace_print_graph_duration(rec->time, &s); | |
626 | trace_seq_puts(&s, " "); | |
627 | trace_print_graph_duration(avg, &s); | |
628 | trace_seq_puts(&s, " "); | |
629 | trace_print_graph_duration(stddev, &s); | |
630 | trace_print_seq(m, &s); | |
631 | #endif | |
632 | seq_putc(m, '\n'); | |
633 | out: | |
634 | mutex_unlock(&ftrace_profile_lock); | |
635 | ||
636 | return ret; | |
637 | } | |
638 | ||
639 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | |
640 | { | |
641 | struct ftrace_profile_page *pg; | |
642 | ||
643 | pg = stat->pages = stat->start; | |
644 | ||
645 | while (pg) { | |
646 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | |
647 | pg->index = 0; | |
648 | pg = pg->next; | |
649 | } | |
650 | ||
651 | memset(stat->hash, 0, | |
652 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | |
653 | } | |
654 | ||
655 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | |
656 | { | |
657 | struct ftrace_profile_page *pg; | |
658 | int functions; | |
659 | int pages; | |
660 | int i; | |
661 | ||
662 | /* If we already allocated, do nothing */ | |
663 | if (stat->pages) | |
664 | return 0; | |
665 | ||
666 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); | |
667 | if (!stat->pages) | |
668 | return -ENOMEM; | |
669 | ||
670 | #ifdef CONFIG_DYNAMIC_FTRACE | |
671 | functions = ftrace_update_tot_cnt; | |
672 | #else | |
673 | /* | |
674 | * We do not know the number of functions that exist because | |
675 | * dynamic tracing is what counts them. With past experience | |
676 | * we have around 20K functions. That should be more than enough. | |
677 | * It is highly unlikely we will execute every function in | |
678 | * the kernel. | |
679 | */ | |
680 | functions = 20000; | |
681 | #endif | |
682 | ||
683 | pg = stat->start = stat->pages; | |
684 | ||
685 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); | |
686 | ||
687 | for (i = 1; i < pages; i++) { | |
688 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | |
689 | if (!pg->next) | |
690 | goto out_free; | |
691 | pg = pg->next; | |
692 | } | |
693 | ||
694 | return 0; | |
695 | ||
696 | out_free: | |
697 | pg = stat->start; | |
698 | while (pg) { | |
699 | unsigned long tmp = (unsigned long)pg; | |
700 | ||
701 | pg = pg->next; | |
702 | free_page(tmp); | |
703 | } | |
704 | ||
705 | stat->pages = NULL; | |
706 | stat->start = NULL; | |
707 | ||
708 | return -ENOMEM; | |
709 | } | |
710 | ||
711 | static int ftrace_profile_init_cpu(int cpu) | |
712 | { | |
713 | struct ftrace_profile_stat *stat; | |
714 | int size; | |
715 | ||
716 | stat = &per_cpu(ftrace_profile_stats, cpu); | |
717 | ||
718 | if (stat->hash) { | |
719 | /* If the profile is already created, simply reset it */ | |
720 | ftrace_profile_reset(stat); | |
721 | return 0; | |
722 | } | |
723 | ||
724 | /* | |
725 | * We are profiling all functions, but usually only a few thousand | |
726 | * functions are hit. We'll make a hash of 1024 items. | |
727 | */ | |
728 | size = FTRACE_PROFILE_HASH_SIZE; | |
729 | ||
730 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); | |
731 | ||
732 | if (!stat->hash) | |
733 | return -ENOMEM; | |
734 | ||
735 | /* Preallocate the function profiling pages */ | |
736 | if (ftrace_profile_pages_init(stat) < 0) { | |
737 | kfree(stat->hash); | |
738 | stat->hash = NULL; | |
739 | return -ENOMEM; | |
740 | } | |
741 | ||
742 | return 0; | |
743 | } | |
744 | ||
745 | static int ftrace_profile_init(void) | |
746 | { | |
747 | int cpu; | |
748 | int ret = 0; | |
749 | ||
750 | for_each_possible_cpu(cpu) { | |
751 | ret = ftrace_profile_init_cpu(cpu); | |
752 | if (ret) | |
753 | break; | |
754 | } | |
755 | ||
756 | return ret; | |
757 | } | |
758 | ||
759 | /* interrupts must be disabled */ | |
760 | static struct ftrace_profile * | |
761 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |
762 | { | |
763 | struct ftrace_profile *rec; | |
764 | struct hlist_head *hhd; | |
765 | unsigned long key; | |
766 | ||
767 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); | |
768 | hhd = &stat->hash[key]; | |
769 | ||
770 | if (hlist_empty(hhd)) | |
771 | return NULL; | |
772 | ||
773 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { | |
774 | if (rec->ip == ip) | |
775 | return rec; | |
776 | } | |
777 | ||
778 | return NULL; | |
779 | } | |
780 | ||
781 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, | |
782 | struct ftrace_profile *rec) | |
783 | { | |
784 | unsigned long key; | |
785 | ||
786 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); | |
787 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); | |
788 | } | |
789 | ||
790 | /* | |
791 | * The memory is already allocated, this simply finds a new record to use. | |
792 | */ | |
793 | static struct ftrace_profile * | |
794 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | |
795 | { | |
796 | struct ftrace_profile *rec = NULL; | |
797 | ||
798 | /* prevent recursion (from NMIs) */ | |
799 | if (atomic_inc_return(&stat->disabled) != 1) | |
800 | goto out; | |
801 | ||
802 | /* | |
803 | * Try to find the function again since an NMI | |
804 | * could have added it | |
805 | */ | |
806 | rec = ftrace_find_profiled_func(stat, ip); | |
807 | if (rec) | |
808 | goto out; | |
809 | ||
810 | if (stat->pages->index == PROFILES_PER_PAGE) { | |
811 | if (!stat->pages->next) | |
812 | goto out; | |
813 | stat->pages = stat->pages->next; | |
814 | } | |
815 | ||
816 | rec = &stat->pages->records[stat->pages->index++]; | |
817 | rec->ip = ip; | |
818 | ftrace_add_profile(stat, rec); | |
819 | ||
820 | out: | |
821 | atomic_dec(&stat->disabled); | |
822 | ||
823 | return rec; | |
824 | } | |
825 | ||
826 | static void | |
827 | function_profile_call(unsigned long ip, unsigned long parent_ip, | |
828 | struct ftrace_ops *ops, struct pt_regs *regs) | |
829 | { | |
830 | struct ftrace_profile_stat *stat; | |
831 | struct ftrace_profile *rec; | |
832 | unsigned long flags; | |
833 | ||
834 | if (!ftrace_profile_enabled) | |
835 | return; | |
836 | ||
837 | local_irq_save(flags); | |
838 | ||
839 | stat = this_cpu_ptr(&ftrace_profile_stats); | |
840 | if (!stat->hash || !ftrace_profile_enabled) | |
841 | goto out; | |
842 | ||
843 | rec = ftrace_find_profiled_func(stat, ip); | |
844 | if (!rec) { | |
845 | rec = ftrace_profile_alloc(stat, ip); | |
846 | if (!rec) | |
847 | goto out; | |
848 | } | |
849 | ||
850 | rec->counter++; | |
851 | out: | |
852 | local_irq_restore(flags); | |
853 | } | |
854 | ||
855 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
856 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | |
857 | { | |
858 | function_profile_call(trace->func, 0, NULL, NULL); | |
859 | return 1; | |
860 | } | |
861 | ||
862 | static void profile_graph_return(struct ftrace_graph_ret *trace) | |
863 | { | |
864 | struct ftrace_profile_stat *stat; | |
865 | unsigned long long calltime; | |
866 | struct ftrace_profile *rec; | |
867 | unsigned long flags; | |
868 | ||
869 | local_irq_save(flags); | |
870 | stat = this_cpu_ptr(&ftrace_profile_stats); | |
871 | if (!stat->hash || !ftrace_profile_enabled) | |
872 | goto out; | |
873 | ||
874 | /* If the calltime was zero'd ignore it */ | |
875 | if (!trace->calltime) | |
876 | goto out; | |
877 | ||
878 | calltime = trace->rettime - trace->calltime; | |
879 | ||
880 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | |
881 | int index; | |
882 | ||
883 | index = trace->depth; | |
884 | ||
885 | /* Append this call time to the parent time to subtract */ | |
886 | if (index) | |
887 | current->ret_stack[index - 1].subtime += calltime; | |
888 | ||
889 | if (current->ret_stack[index].subtime < calltime) | |
890 | calltime -= current->ret_stack[index].subtime; | |
891 | else | |
892 | calltime = 0; | |
893 | } | |
894 | ||
895 | rec = ftrace_find_profiled_func(stat, trace->func); | |
896 | if (rec) { | |
897 | rec->time += calltime; | |
898 | rec->time_squared += calltime * calltime; | |
899 | } | |
900 | ||
901 | out: | |
902 | local_irq_restore(flags); | |
903 | } | |
904 | ||
905 | static int register_ftrace_profiler(void) | |
906 | { | |
907 | return register_ftrace_graph(&profile_graph_return, | |
908 | &profile_graph_entry); | |
909 | } | |
910 | ||
911 | static void unregister_ftrace_profiler(void) | |
912 | { | |
913 | unregister_ftrace_graph(); | |
914 | } | |
915 | #else | |
916 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | |
917 | .func = function_profile_call, | |
918 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | |
919 | INIT_OPS_HASH(ftrace_profile_ops) | |
920 | }; | |
921 | ||
922 | static int register_ftrace_profiler(void) | |
923 | { | |
924 | return register_ftrace_function(&ftrace_profile_ops); | |
925 | } | |
926 | ||
927 | static void unregister_ftrace_profiler(void) | |
928 | { | |
929 | unregister_ftrace_function(&ftrace_profile_ops); | |
930 | } | |
931 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
932 | ||
933 | static ssize_t | |
934 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | |
935 | size_t cnt, loff_t *ppos) | |
936 | { | |
937 | unsigned long val; | |
938 | int ret; | |
939 | ||
940 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | |
941 | if (ret) | |
942 | return ret; | |
943 | ||
944 | val = !!val; | |
945 | ||
946 | mutex_lock(&ftrace_profile_lock); | |
947 | if (ftrace_profile_enabled ^ val) { | |
948 | if (val) { | |
949 | ret = ftrace_profile_init(); | |
950 | if (ret < 0) { | |
951 | cnt = ret; | |
952 | goto out; | |
953 | } | |
954 | ||
955 | ret = register_ftrace_profiler(); | |
956 | if (ret < 0) { | |
957 | cnt = ret; | |
958 | goto out; | |
959 | } | |
960 | ftrace_profile_enabled = 1; | |
961 | } else { | |
962 | ftrace_profile_enabled = 0; | |
963 | /* | |
964 | * unregister_ftrace_profiler calls stop_machine | |
965 | * so this acts like an synchronize_sched. | |
966 | */ | |
967 | unregister_ftrace_profiler(); | |
968 | } | |
969 | } | |
970 | out: | |
971 | mutex_unlock(&ftrace_profile_lock); | |
972 | ||
973 | *ppos += cnt; | |
974 | ||
975 | return cnt; | |
976 | } | |
977 | ||
978 | static ssize_t | |
979 | ftrace_profile_read(struct file *filp, char __user *ubuf, | |
980 | size_t cnt, loff_t *ppos) | |
981 | { | |
982 | char buf[64]; /* big enough to hold a number */ | |
983 | int r; | |
984 | ||
985 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | |
986 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
987 | } | |
988 | ||
989 | static const struct file_operations ftrace_profile_fops = { | |
990 | .open = tracing_open_generic, | |
991 | .read = ftrace_profile_read, | |
992 | .write = ftrace_profile_write, | |
993 | .llseek = default_llseek, | |
994 | }; | |
995 | ||
996 | /* used to initialize the real stat files */ | |
997 | static struct tracer_stat function_stats __initdata = { | |
998 | .name = "functions", | |
999 | .stat_start = function_stat_start, | |
1000 | .stat_next = function_stat_next, | |
1001 | .stat_cmp = function_stat_cmp, | |
1002 | .stat_headers = function_stat_headers, | |
1003 | .stat_show = function_stat_show | |
1004 | }; | |
1005 | ||
1006 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |
1007 | { | |
1008 | struct ftrace_profile_stat *stat; | |
1009 | struct dentry *entry; | |
1010 | char *name; | |
1011 | int ret; | |
1012 | int cpu; | |
1013 | ||
1014 | for_each_possible_cpu(cpu) { | |
1015 | stat = &per_cpu(ftrace_profile_stats, cpu); | |
1016 | ||
1017 | /* allocate enough for function name + cpu number */ | |
1018 | name = kmalloc(32, GFP_KERNEL); | |
1019 | if (!name) { | |
1020 | /* | |
1021 | * The files created are permanent, if something happens | |
1022 | * we still do not free memory. | |
1023 | */ | |
1024 | WARN(1, | |
1025 | "Could not allocate stat file for cpu %d\n", | |
1026 | cpu); | |
1027 | return; | |
1028 | } | |
1029 | stat->stat = function_stats; | |
1030 | snprintf(name, 32, "function%d", cpu); | |
1031 | stat->stat.name = name; | |
1032 | ret = register_stat_tracer(&stat->stat); | |
1033 | if (ret) { | |
1034 | WARN(1, | |
1035 | "Could not register function stat for cpu %d\n", | |
1036 | cpu); | |
1037 | kfree(name); | |
1038 | return; | |
1039 | } | |
1040 | } | |
1041 | ||
1042 | entry = debugfs_create_file("function_profile_enabled", 0644, | |
1043 | d_tracer, NULL, &ftrace_profile_fops); | |
1044 | if (!entry) | |
1045 | pr_warning("Could not create debugfs " | |
1046 | "'function_profile_enabled' entry\n"); | |
1047 | } | |
1048 | ||
1049 | #else /* CONFIG_FUNCTION_PROFILER */ | |
1050 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |
1051 | { | |
1052 | } | |
1053 | #endif /* CONFIG_FUNCTION_PROFILER */ | |
1054 | ||
1055 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | |
1056 | ||
1057 | #ifdef CONFIG_DYNAMIC_FTRACE | |
1058 | ||
1059 | static struct ftrace_ops *removed_ops; | |
1060 | ||
1061 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | |
1062 | # error Dynamic ftrace depends on MCOUNT_RECORD | |
1063 | #endif | |
1064 | ||
1065 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; | |
1066 | ||
1067 | struct ftrace_func_probe { | |
1068 | struct hlist_node node; | |
1069 | struct ftrace_probe_ops *ops; | |
1070 | unsigned long flags; | |
1071 | unsigned long ip; | |
1072 | void *data; | |
1073 | struct list_head free_list; | |
1074 | }; | |
1075 | ||
1076 | struct ftrace_func_entry { | |
1077 | struct hlist_node hlist; | |
1078 | unsigned long ip; | |
1079 | }; | |
1080 | ||
1081 | struct ftrace_hash { | |
1082 | unsigned long size_bits; | |
1083 | struct hlist_head *buckets; | |
1084 | unsigned long count; | |
1085 | struct rcu_head rcu; | |
1086 | }; | |
1087 | ||
1088 | /* | |
1089 | * We make these constant because no one should touch them, | |
1090 | * but they are used as the default "empty hash", to avoid allocating | |
1091 | * it all the time. These are in a read only section such that if | |
1092 | * anyone does try to modify it, it will cause an exception. | |
1093 | */ | |
1094 | static const struct hlist_head empty_buckets[1]; | |
1095 | static const struct ftrace_hash empty_hash = { | |
1096 | .buckets = (struct hlist_head *)empty_buckets, | |
1097 | }; | |
1098 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | |
1099 | ||
1100 | static struct ftrace_ops global_ops = { | |
1101 | .func = ftrace_stub, | |
1102 | .local_hash.notrace_hash = EMPTY_HASH, | |
1103 | .local_hash.filter_hash = EMPTY_HASH, | |
1104 | INIT_OPS_HASH(global_ops) | |
1105 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | |
1106 | FTRACE_OPS_FL_INITIALIZED, | |
1107 | }; | |
1108 | ||
1109 | struct ftrace_page { | |
1110 | struct ftrace_page *next; | |
1111 | struct dyn_ftrace *records; | |
1112 | int index; | |
1113 | int size; | |
1114 | }; | |
1115 | ||
1116 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) | |
1117 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | |
1118 | ||
1119 | /* estimate from running different kernels */ | |
1120 | #define NR_TO_INIT 10000 | |
1121 | ||
1122 | static struct ftrace_page *ftrace_pages_start; | |
1123 | static struct ftrace_page *ftrace_pages; | |
1124 | ||
1125 | static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) | |
1126 | { | |
1127 | return !hash || !hash->count; | |
1128 | } | |
1129 | ||
1130 | static struct ftrace_func_entry * | |
1131 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |
1132 | { | |
1133 | unsigned long key; | |
1134 | struct ftrace_func_entry *entry; | |
1135 | struct hlist_head *hhd; | |
1136 | ||
1137 | if (ftrace_hash_empty(hash)) | |
1138 | return NULL; | |
1139 | ||
1140 | if (hash->size_bits > 0) | |
1141 | key = hash_long(ip, hash->size_bits); | |
1142 | else | |
1143 | key = 0; | |
1144 | ||
1145 | hhd = &hash->buckets[key]; | |
1146 | ||
1147 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { | |
1148 | if (entry->ip == ip) | |
1149 | return entry; | |
1150 | } | |
1151 | return NULL; | |
1152 | } | |
1153 | ||
1154 | static void __add_hash_entry(struct ftrace_hash *hash, | |
1155 | struct ftrace_func_entry *entry) | |
1156 | { | |
1157 | struct hlist_head *hhd; | |
1158 | unsigned long key; | |
1159 | ||
1160 | if (hash->size_bits) | |
1161 | key = hash_long(entry->ip, hash->size_bits); | |
1162 | else | |
1163 | key = 0; | |
1164 | ||
1165 | hhd = &hash->buckets[key]; | |
1166 | hlist_add_head(&entry->hlist, hhd); | |
1167 | hash->count++; | |
1168 | } | |
1169 | ||
1170 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | |
1171 | { | |
1172 | struct ftrace_func_entry *entry; | |
1173 | ||
1174 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
1175 | if (!entry) | |
1176 | return -ENOMEM; | |
1177 | ||
1178 | entry->ip = ip; | |
1179 | __add_hash_entry(hash, entry); | |
1180 | ||
1181 | return 0; | |
1182 | } | |
1183 | ||
1184 | static void | |
1185 | free_hash_entry(struct ftrace_hash *hash, | |
1186 | struct ftrace_func_entry *entry) | |
1187 | { | |
1188 | hlist_del(&entry->hlist); | |
1189 | kfree(entry); | |
1190 | hash->count--; | |
1191 | } | |
1192 | ||
1193 | static void | |
1194 | remove_hash_entry(struct ftrace_hash *hash, | |
1195 | struct ftrace_func_entry *entry) | |
1196 | { | |
1197 | hlist_del(&entry->hlist); | |
1198 | hash->count--; | |
1199 | } | |
1200 | ||
1201 | static void ftrace_hash_clear(struct ftrace_hash *hash) | |
1202 | { | |
1203 | struct hlist_head *hhd; | |
1204 | struct hlist_node *tn; | |
1205 | struct ftrace_func_entry *entry; | |
1206 | int size = 1 << hash->size_bits; | |
1207 | int i; | |
1208 | ||
1209 | if (!hash->count) | |
1210 | return; | |
1211 | ||
1212 | for (i = 0; i < size; i++) { | |
1213 | hhd = &hash->buckets[i]; | |
1214 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) | |
1215 | free_hash_entry(hash, entry); | |
1216 | } | |
1217 | FTRACE_WARN_ON(hash->count); | |
1218 | } | |
1219 | ||
1220 | static void free_ftrace_hash(struct ftrace_hash *hash) | |
1221 | { | |
1222 | if (!hash || hash == EMPTY_HASH) | |
1223 | return; | |
1224 | ftrace_hash_clear(hash); | |
1225 | kfree(hash->buckets); | |
1226 | kfree(hash); | |
1227 | } | |
1228 | ||
1229 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) | |
1230 | { | |
1231 | struct ftrace_hash *hash; | |
1232 | ||
1233 | hash = container_of(rcu, struct ftrace_hash, rcu); | |
1234 | free_ftrace_hash(hash); | |
1235 | } | |
1236 | ||
1237 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |
1238 | { | |
1239 | if (!hash || hash == EMPTY_HASH) | |
1240 | return; | |
1241 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | |
1242 | } | |
1243 | ||
1244 | void ftrace_free_filter(struct ftrace_ops *ops) | |
1245 | { | |
1246 | ftrace_ops_init(ops); | |
1247 | free_ftrace_hash(ops->func_hash->filter_hash); | |
1248 | free_ftrace_hash(ops->func_hash->notrace_hash); | |
1249 | } | |
1250 | ||
1251 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | |
1252 | { | |
1253 | struct ftrace_hash *hash; | |
1254 | int size; | |
1255 | ||
1256 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | |
1257 | if (!hash) | |
1258 | return NULL; | |
1259 | ||
1260 | size = 1 << size_bits; | |
1261 | hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); | |
1262 | ||
1263 | if (!hash->buckets) { | |
1264 | kfree(hash); | |
1265 | return NULL; | |
1266 | } | |
1267 | ||
1268 | hash->size_bits = size_bits; | |
1269 | ||
1270 | return hash; | |
1271 | } | |
1272 | ||
1273 | static struct ftrace_hash * | |
1274 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |
1275 | { | |
1276 | struct ftrace_func_entry *entry; | |
1277 | struct ftrace_hash *new_hash; | |
1278 | int size; | |
1279 | int ret; | |
1280 | int i; | |
1281 | ||
1282 | new_hash = alloc_ftrace_hash(size_bits); | |
1283 | if (!new_hash) | |
1284 | return NULL; | |
1285 | ||
1286 | /* Empty hash? */ | |
1287 | if (ftrace_hash_empty(hash)) | |
1288 | return new_hash; | |
1289 | ||
1290 | size = 1 << hash->size_bits; | |
1291 | for (i = 0; i < size; i++) { | |
1292 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
1293 | ret = add_hash_entry(new_hash, entry->ip); | |
1294 | if (ret < 0) | |
1295 | goto free_hash; | |
1296 | } | |
1297 | } | |
1298 | ||
1299 | FTRACE_WARN_ON(new_hash->count != hash->count); | |
1300 | ||
1301 | return new_hash; | |
1302 | ||
1303 | free_hash: | |
1304 | free_ftrace_hash(new_hash); | |
1305 | return NULL; | |
1306 | } | |
1307 | ||
1308 | static void | |
1309 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); | |
1310 | static void | |
1311 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); | |
1312 | ||
1313 | static int | |
1314 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | |
1315 | struct ftrace_hash **dst, struct ftrace_hash *src) | |
1316 | { | |
1317 | struct ftrace_func_entry *entry; | |
1318 | struct hlist_node *tn; | |
1319 | struct hlist_head *hhd; | |
1320 | struct ftrace_hash *old_hash; | |
1321 | struct ftrace_hash *new_hash; | |
1322 | int size = src->count; | |
1323 | int bits = 0; | |
1324 | int i; | |
1325 | ||
1326 | /* | |
1327 | * If the new source is empty, just free dst and assign it | |
1328 | * the empty_hash. | |
1329 | */ | |
1330 | if (!src->count) { | |
1331 | new_hash = EMPTY_HASH; | |
1332 | goto update; | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * Make the hash size about 1/2 the # found | |
1337 | */ | |
1338 | for (size /= 2; size; size >>= 1) | |
1339 | bits++; | |
1340 | ||
1341 | /* Don't allocate too much */ | |
1342 | if (bits > FTRACE_HASH_MAX_BITS) | |
1343 | bits = FTRACE_HASH_MAX_BITS; | |
1344 | ||
1345 | new_hash = alloc_ftrace_hash(bits); | |
1346 | if (!new_hash) | |
1347 | return -ENOMEM; | |
1348 | ||
1349 | size = 1 << src->size_bits; | |
1350 | for (i = 0; i < size; i++) { | |
1351 | hhd = &src->buckets[i]; | |
1352 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { | |
1353 | remove_hash_entry(src, entry); | |
1354 | __add_hash_entry(new_hash, entry); | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | update: | |
1359 | /* | |
1360 | * Remove the current set, update the hash and add | |
1361 | * them back. | |
1362 | */ | |
1363 | ftrace_hash_rec_disable_modify(ops, enable); | |
1364 | ||
1365 | old_hash = *dst; | |
1366 | rcu_assign_pointer(*dst, new_hash); | |
1367 | free_ftrace_hash_rcu(old_hash); | |
1368 | ||
1369 | ftrace_hash_rec_enable_modify(ops, enable); | |
1370 | ||
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | /* | |
1375 | * Test the hashes for this ops to see if we want to call | |
1376 | * the ops->func or not. | |
1377 | * | |
1378 | * It's a match if the ip is in the ops->filter_hash or | |
1379 | * the filter_hash does not exist or is empty, | |
1380 | * AND | |
1381 | * the ip is not in the ops->notrace_hash. | |
1382 | * | |
1383 | * This needs to be called with preemption disabled as | |
1384 | * the hashes are freed with call_rcu_sched(). | |
1385 | */ | |
1386 | static int | |
1387 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |
1388 | { | |
1389 | struct ftrace_hash *filter_hash; | |
1390 | struct ftrace_hash *notrace_hash; | |
1391 | int ret; | |
1392 | ||
1393 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | |
1394 | /* | |
1395 | * There's a small race when adding ops that the ftrace handler | |
1396 | * that wants regs, may be called without them. We can not | |
1397 | * allow that handler to be called if regs is NULL. | |
1398 | */ | |
1399 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) | |
1400 | return 0; | |
1401 | #endif | |
1402 | ||
1403 | filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); | |
1404 | notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); | |
1405 | ||
1406 | if ((ftrace_hash_empty(filter_hash) || | |
1407 | ftrace_lookup_ip(filter_hash, ip)) && | |
1408 | (ftrace_hash_empty(notrace_hash) || | |
1409 | !ftrace_lookup_ip(notrace_hash, ip))) | |
1410 | ret = 1; | |
1411 | else | |
1412 | ret = 0; | |
1413 | ||
1414 | return ret; | |
1415 | } | |
1416 | ||
1417 | /* | |
1418 | * This is a double for. Do not use 'break' to break out of the loop, | |
1419 | * you must use a goto. | |
1420 | */ | |
1421 | #define do_for_each_ftrace_rec(pg, rec) \ | |
1422 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | |
1423 | int _____i; \ | |
1424 | for (_____i = 0; _____i < pg->index; _____i++) { \ | |
1425 | rec = &pg->records[_____i]; | |
1426 | ||
1427 | #define while_for_each_ftrace_rec() \ | |
1428 | } \ | |
1429 | } | |
1430 | ||
1431 | ||
1432 | static int ftrace_cmp_recs(const void *a, const void *b) | |
1433 | { | |
1434 | const struct dyn_ftrace *key = a; | |
1435 | const struct dyn_ftrace *rec = b; | |
1436 | ||
1437 | if (key->flags < rec->ip) | |
1438 | return -1; | |
1439 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) | |
1440 | return 1; | |
1441 | return 0; | |
1442 | } | |
1443 | ||
1444 | static unsigned long ftrace_location_range(unsigned long start, unsigned long end) | |
1445 | { | |
1446 | struct ftrace_page *pg; | |
1447 | struct dyn_ftrace *rec; | |
1448 | struct dyn_ftrace key; | |
1449 | ||
1450 | key.ip = start; | |
1451 | key.flags = end; /* overload flags, as it is unsigned long */ | |
1452 | ||
1453 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | |
1454 | if (end < pg->records[0].ip || | |
1455 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) | |
1456 | continue; | |
1457 | rec = bsearch(&key, pg->records, pg->index, | |
1458 | sizeof(struct dyn_ftrace), | |
1459 | ftrace_cmp_recs); | |
1460 | if (rec) | |
1461 | return rec->ip; | |
1462 | } | |
1463 | ||
1464 | return 0; | |
1465 | } | |
1466 | ||
1467 | /** | |
1468 | * ftrace_location - return true if the ip giving is a traced location | |
1469 | * @ip: the instruction pointer to check | |
1470 | * | |
1471 | * Returns rec->ip if @ip given is a pointer to a ftrace location. | |
1472 | * That is, the instruction that is either a NOP or call to | |
1473 | * the function tracer. It checks the ftrace internal tables to | |
1474 | * determine if the address belongs or not. | |
1475 | */ | |
1476 | unsigned long ftrace_location(unsigned long ip) | |
1477 | { | |
1478 | return ftrace_location_range(ip, ip); | |
1479 | } | |
1480 | ||
1481 | /** | |
1482 | * ftrace_text_reserved - return true if range contains an ftrace location | |
1483 | * @start: start of range to search | |
1484 | * @end: end of range to search (inclusive). @end points to the last byte to check. | |
1485 | * | |
1486 | * Returns 1 if @start and @end contains a ftrace location. | |
1487 | * That is, the instruction that is either a NOP or call to | |
1488 | * the function tracer. It checks the ftrace internal tables to | |
1489 | * determine if the address belongs or not. | |
1490 | */ | |
1491 | int ftrace_text_reserved(const void *start, const void *end) | |
1492 | { | |
1493 | unsigned long ret; | |
1494 | ||
1495 | ret = ftrace_location_range((unsigned long)start, | |
1496 | (unsigned long)end); | |
1497 | ||
1498 | return (int)!!ret; | |
1499 | } | |
1500 | ||
1501 | /* Test if ops registered to this rec needs regs */ | |
1502 | static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |
1503 | { | |
1504 | struct ftrace_ops *ops; | |
1505 | bool keep_regs = false; | |
1506 | ||
1507 | for (ops = ftrace_ops_list; | |
1508 | ops != &ftrace_list_end; ops = ops->next) { | |
1509 | /* pass rec in as regs to have non-NULL val */ | |
1510 | if (ftrace_ops_test(ops, rec->ip, rec)) { | |
1511 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
1512 | keep_regs = true; | |
1513 | break; | |
1514 | } | |
1515 | } | |
1516 | } | |
1517 | ||
1518 | return keep_regs; | |
1519 | } | |
1520 | ||
1521 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | |
1522 | struct dyn_ftrace *rec) | |
1523 | { | |
1524 | /* If TRAMP is not set, no ops should have a trampoline for this */ | |
1525 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
1526 | return; | |
1527 | ||
1528 | rec->flags &= ~FTRACE_FL_TRAMP; | |
1529 | ||
1530 | if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && | |
1531 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || | |
1532 | ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | |
1533 | return; | |
1534 | /* | |
1535 | * The tramp_hash entry will be removed at time | |
1536 | * of update. | |
1537 | */ | |
1538 | ops->nr_trampolines--; | |
1539 | } | |
1540 | ||
1541 | static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) | |
1542 | { | |
1543 | struct ftrace_ops *op; | |
1544 | ||
1545 | /* If TRAMP is not set, no ops should have a trampoline for this */ | |
1546 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
1547 | return; | |
1548 | ||
1549 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1550 | /* | |
1551 | * This function is called to clear other tramps | |
1552 | * not the one that is being updated. | |
1553 | */ | |
1554 | if (op == ops) | |
1555 | continue; | |
1556 | if (op->nr_trampolines) | |
1557 | ftrace_remove_tramp(op, rec); | |
1558 | } while_for_each_ftrace_op(op); | |
1559 | } | |
1560 | ||
1561 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |
1562 | int filter_hash, | |
1563 | bool inc) | |
1564 | { | |
1565 | struct ftrace_hash *hash; | |
1566 | struct ftrace_hash *other_hash; | |
1567 | struct ftrace_page *pg; | |
1568 | struct dyn_ftrace *rec; | |
1569 | int count = 0; | |
1570 | int all = 0; | |
1571 | ||
1572 | /* Only update if the ops has been registered */ | |
1573 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
1574 | return; | |
1575 | ||
1576 | /* | |
1577 | * In the filter_hash case: | |
1578 | * If the count is zero, we update all records. | |
1579 | * Otherwise we just update the items in the hash. | |
1580 | * | |
1581 | * In the notrace_hash case: | |
1582 | * We enable the update in the hash. | |
1583 | * As disabling notrace means enabling the tracing, | |
1584 | * and enabling notrace means disabling, the inc variable | |
1585 | * gets inversed. | |
1586 | */ | |
1587 | if (filter_hash) { | |
1588 | hash = ops->func_hash->filter_hash; | |
1589 | other_hash = ops->func_hash->notrace_hash; | |
1590 | if (ftrace_hash_empty(hash)) | |
1591 | all = 1; | |
1592 | } else { | |
1593 | inc = !inc; | |
1594 | hash = ops->func_hash->notrace_hash; | |
1595 | other_hash = ops->func_hash->filter_hash; | |
1596 | /* | |
1597 | * If the notrace hash has no items, | |
1598 | * then there's nothing to do. | |
1599 | */ | |
1600 | if (ftrace_hash_empty(hash)) | |
1601 | return; | |
1602 | } | |
1603 | ||
1604 | do_for_each_ftrace_rec(pg, rec) { | |
1605 | int in_other_hash = 0; | |
1606 | int in_hash = 0; | |
1607 | int match = 0; | |
1608 | ||
1609 | if (all) { | |
1610 | /* | |
1611 | * Only the filter_hash affects all records. | |
1612 | * Update if the record is not in the notrace hash. | |
1613 | */ | |
1614 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) | |
1615 | match = 1; | |
1616 | } else { | |
1617 | in_hash = !!ftrace_lookup_ip(hash, rec->ip); | |
1618 | in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); | |
1619 | ||
1620 | /* | |
1621 | * If filter_hash is set, we want to match all functions | |
1622 | * that are in the hash but not in the other hash. | |
1623 | * | |
1624 | * If filter_hash is not set, then we are decrementing. | |
1625 | * That means we match anything that is in the hash | |
1626 | * and also in the other_hash. That is, we need to turn | |
1627 | * off functions in the other hash because they are disabled | |
1628 | * by this hash. | |
1629 | */ | |
1630 | if (filter_hash && in_hash && !in_other_hash) | |
1631 | match = 1; | |
1632 | else if (!filter_hash && in_hash && | |
1633 | (in_other_hash || ftrace_hash_empty(other_hash))) | |
1634 | match = 1; | |
1635 | } | |
1636 | if (!match) | |
1637 | continue; | |
1638 | ||
1639 | if (inc) { | |
1640 | rec->flags++; | |
1641 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) | |
1642 | return; | |
1643 | ||
1644 | /* | |
1645 | * If there's only a single callback registered to a | |
1646 | * function, and the ops has a trampoline registered | |
1647 | * for it, then we can call it directly. | |
1648 | */ | |
1649 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) { | |
1650 | rec->flags |= FTRACE_FL_TRAMP; | |
1651 | ops->nr_trampolines++; | |
1652 | } else { | |
1653 | /* | |
1654 | * If we are adding another function callback | |
1655 | * to this function, and the previous had a | |
1656 | * custom trampoline in use, then we need to go | |
1657 | * back to the default trampoline. | |
1658 | */ | |
1659 | ftrace_clear_tramps(rec, ops); | |
1660 | } | |
1661 | ||
1662 | /* | |
1663 | * If any ops wants regs saved for this function | |
1664 | * then all ops will get saved regs. | |
1665 | */ | |
1666 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | |
1667 | rec->flags |= FTRACE_FL_REGS; | |
1668 | } else { | |
1669 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) | |
1670 | return; | |
1671 | rec->flags--; | |
1672 | ||
1673 | if (ops->trampoline && !ftrace_rec_count(rec)) | |
1674 | ftrace_remove_tramp(ops, rec); | |
1675 | ||
1676 | /* | |
1677 | * If the rec had REGS enabled and the ops that is | |
1678 | * being removed had REGS set, then see if there is | |
1679 | * still any ops for this record that wants regs. | |
1680 | * If not, we can stop recording them. | |
1681 | */ | |
1682 | if (ftrace_rec_count(rec) > 0 && | |
1683 | rec->flags & FTRACE_FL_REGS && | |
1684 | ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
1685 | if (!test_rec_ops_needs_regs(rec)) | |
1686 | rec->flags &= ~FTRACE_FL_REGS; | |
1687 | } | |
1688 | ||
1689 | /* | |
1690 | * flags will be cleared in ftrace_check_record() | |
1691 | * if rec count is zero. | |
1692 | */ | |
1693 | } | |
1694 | count++; | |
1695 | /* Shortcut, if we handled all records, we are done. */ | |
1696 | if (!all && count == hash->count) | |
1697 | return; | |
1698 | } while_for_each_ftrace_rec(); | |
1699 | } | |
1700 | ||
1701 | static void ftrace_hash_rec_disable(struct ftrace_ops *ops, | |
1702 | int filter_hash) | |
1703 | { | |
1704 | __ftrace_hash_rec_update(ops, filter_hash, 0); | |
1705 | } | |
1706 | ||
1707 | static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | |
1708 | int filter_hash) | |
1709 | { | |
1710 | __ftrace_hash_rec_update(ops, filter_hash, 1); | |
1711 | } | |
1712 | ||
1713 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, | |
1714 | int filter_hash, int inc) | |
1715 | { | |
1716 | struct ftrace_ops *op; | |
1717 | ||
1718 | __ftrace_hash_rec_update(ops, filter_hash, inc); | |
1719 | ||
1720 | if (ops->func_hash != &global_ops.local_hash) | |
1721 | return; | |
1722 | ||
1723 | /* | |
1724 | * If the ops shares the global_ops hash, then we need to update | |
1725 | * all ops that are enabled and use this hash. | |
1726 | */ | |
1727 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1728 | /* Already done */ | |
1729 | if (op == ops) | |
1730 | continue; | |
1731 | if (op->func_hash == &global_ops.local_hash) | |
1732 | __ftrace_hash_rec_update(op, filter_hash, inc); | |
1733 | } while_for_each_ftrace_op(op); | |
1734 | } | |
1735 | ||
1736 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, | |
1737 | int filter_hash) | |
1738 | { | |
1739 | ftrace_hash_rec_update_modify(ops, filter_hash, 0); | |
1740 | } | |
1741 | ||
1742 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | |
1743 | int filter_hash) | |
1744 | { | |
1745 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | |
1746 | } | |
1747 | ||
1748 | static void print_ip_ins(const char *fmt, unsigned char *p) | |
1749 | { | |
1750 | int i; | |
1751 | ||
1752 | printk(KERN_CONT "%s", fmt); | |
1753 | ||
1754 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | |
1755 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | |
1756 | } | |
1757 | ||
1758 | /** | |
1759 | * ftrace_bug - report and shutdown function tracer | |
1760 | * @failed: The failed type (EFAULT, EINVAL, EPERM) | |
1761 | * @ip: The address that failed | |
1762 | * | |
1763 | * The arch code that enables or disables the function tracing | |
1764 | * can call ftrace_bug() when it has detected a problem in | |
1765 | * modifying the code. @failed should be one of either: | |
1766 | * EFAULT - if the problem happens on reading the @ip address | |
1767 | * EINVAL - if what is read at @ip is not what was expected | |
1768 | * EPERM - if the problem happens on writting to the @ip address | |
1769 | */ | |
1770 | void ftrace_bug(int failed, unsigned long ip) | |
1771 | { | |
1772 | switch (failed) { | |
1773 | case -EFAULT: | |
1774 | FTRACE_WARN_ON_ONCE(1); | |
1775 | pr_info("ftrace faulted on modifying "); | |
1776 | print_ip_sym(ip); | |
1777 | break; | |
1778 | case -EINVAL: | |
1779 | FTRACE_WARN_ON_ONCE(1); | |
1780 | pr_info("ftrace failed to modify "); | |
1781 | print_ip_sym(ip); | |
1782 | print_ip_ins(" actual: ", (unsigned char *)ip); | |
1783 | printk(KERN_CONT "\n"); | |
1784 | break; | |
1785 | case -EPERM: | |
1786 | FTRACE_WARN_ON_ONCE(1); | |
1787 | pr_info("ftrace faulted on writing "); | |
1788 | print_ip_sym(ip); | |
1789 | break; | |
1790 | default: | |
1791 | FTRACE_WARN_ON_ONCE(1); | |
1792 | pr_info("ftrace faulted on unknown error "); | |
1793 | print_ip_sym(ip); | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |
1798 | { | |
1799 | unsigned long flag = 0UL; | |
1800 | ||
1801 | /* | |
1802 | * If we are updating calls: | |
1803 | * | |
1804 | * If the record has a ref count, then we need to enable it | |
1805 | * because someone is using it. | |
1806 | * | |
1807 | * Otherwise we make sure its disabled. | |
1808 | * | |
1809 | * If we are disabling calls, then disable all records that | |
1810 | * are enabled. | |
1811 | */ | |
1812 | if (enable && ftrace_rec_count(rec)) | |
1813 | flag = FTRACE_FL_ENABLED; | |
1814 | ||
1815 | /* | |
1816 | * If enabling and the REGS flag does not match the REGS_EN, or | |
1817 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore | |
1818 | * this record. Set flags to fail the compare against ENABLED. | |
1819 | */ | |
1820 | if (flag) { | |
1821 | if (!(rec->flags & FTRACE_FL_REGS) != | |
1822 | !(rec->flags & FTRACE_FL_REGS_EN)) | |
1823 | flag |= FTRACE_FL_REGS; | |
1824 | ||
1825 | if (!(rec->flags & FTRACE_FL_TRAMP) != | |
1826 | !(rec->flags & FTRACE_FL_TRAMP_EN)) | |
1827 | flag |= FTRACE_FL_TRAMP; | |
1828 | } | |
1829 | ||
1830 | /* If the state of this record hasn't changed, then do nothing */ | |
1831 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | |
1832 | return FTRACE_UPDATE_IGNORE; | |
1833 | ||
1834 | if (flag) { | |
1835 | /* Save off if rec is being enabled (for return value) */ | |
1836 | flag ^= rec->flags & FTRACE_FL_ENABLED; | |
1837 | ||
1838 | if (update) { | |
1839 | rec->flags |= FTRACE_FL_ENABLED; | |
1840 | if (flag & FTRACE_FL_REGS) { | |
1841 | if (rec->flags & FTRACE_FL_REGS) | |
1842 | rec->flags |= FTRACE_FL_REGS_EN; | |
1843 | else | |
1844 | rec->flags &= ~FTRACE_FL_REGS_EN; | |
1845 | } | |
1846 | if (flag & FTRACE_FL_TRAMP) { | |
1847 | if (rec->flags & FTRACE_FL_TRAMP) | |
1848 | rec->flags |= FTRACE_FL_TRAMP_EN; | |
1849 | else | |
1850 | rec->flags &= ~FTRACE_FL_TRAMP_EN; | |
1851 | } | |
1852 | } | |
1853 | ||
1854 | /* | |
1855 | * If this record is being updated from a nop, then | |
1856 | * return UPDATE_MAKE_CALL. | |
1857 | * Otherwise, | |
1858 | * return UPDATE_MODIFY_CALL to tell the caller to convert | |
1859 | * from the save regs, to a non-save regs function or | |
1860 | * vice versa, or from a trampoline call. | |
1861 | */ | |
1862 | if (flag & FTRACE_FL_ENABLED) | |
1863 | return FTRACE_UPDATE_MAKE_CALL; | |
1864 | ||
1865 | return FTRACE_UPDATE_MODIFY_CALL; | |
1866 | } | |
1867 | ||
1868 | if (update) { | |
1869 | /* If there's no more users, clear all flags */ | |
1870 | if (!ftrace_rec_count(rec)) | |
1871 | rec->flags = 0; | |
1872 | else | |
1873 | /* Just disable the record (keep REGS state) */ | |
1874 | rec->flags &= ~FTRACE_FL_ENABLED; | |
1875 | } | |
1876 | ||
1877 | return FTRACE_UPDATE_MAKE_NOP; | |
1878 | } | |
1879 | ||
1880 | /** | |
1881 | * ftrace_update_record, set a record that now is tracing or not | |
1882 | * @rec: the record to update | |
1883 | * @enable: set to 1 if the record is tracing, zero to force disable | |
1884 | * | |
1885 | * The records that represent all functions that can be traced need | |
1886 | * to be updated when tracing has been enabled. | |
1887 | */ | |
1888 | int ftrace_update_record(struct dyn_ftrace *rec, int enable) | |
1889 | { | |
1890 | return ftrace_check_record(rec, enable, 1); | |
1891 | } | |
1892 | ||
1893 | /** | |
1894 | * ftrace_test_record, check if the record has been enabled or not | |
1895 | * @rec: the record to test | |
1896 | * @enable: set to 1 to check if enabled, 0 if it is disabled | |
1897 | * | |
1898 | * The arch code may need to test if a record is already set to | |
1899 | * tracing to determine how to modify the function code that it | |
1900 | * represents. | |
1901 | */ | |
1902 | int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |
1903 | { | |
1904 | return ftrace_check_record(rec, enable, 0); | |
1905 | } | |
1906 | ||
1907 | static struct ftrace_ops * | |
1908 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | |
1909 | { | |
1910 | struct ftrace_ops *op; | |
1911 | ||
1912 | /* Removed ops need to be tested first */ | |
1913 | if (removed_ops && removed_ops->tramp_hash) { | |
1914 | if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip)) | |
1915 | return removed_ops; | |
1916 | } | |
1917 | ||
1918 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1919 | if (!op->tramp_hash) | |
1920 | continue; | |
1921 | ||
1922 | if (ftrace_lookup_ip(op->tramp_hash, rec->ip)) | |
1923 | return op; | |
1924 | ||
1925 | } while_for_each_ftrace_op(op); | |
1926 | ||
1927 | return NULL; | |
1928 | } | |
1929 | ||
1930 | static struct ftrace_ops * | |
1931 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) | |
1932 | { | |
1933 | struct ftrace_ops *op; | |
1934 | ||
1935 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1936 | /* pass rec in as regs to have non-NULL val */ | |
1937 | if (ftrace_ops_test(op, rec->ip, rec)) | |
1938 | return op; | |
1939 | } while_for_each_ftrace_op(op); | |
1940 | ||
1941 | return NULL; | |
1942 | } | |
1943 | ||
1944 | /** | |
1945 | * ftrace_get_addr_new - Get the call address to set to | |
1946 | * @rec: The ftrace record descriptor | |
1947 | * | |
1948 | * If the record has the FTRACE_FL_REGS set, that means that it | |
1949 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS | |
1950 | * is not not set, then it wants to convert to the normal callback. | |
1951 | * | |
1952 | * Returns the address of the trampoline to set to | |
1953 | */ | |
1954 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |
1955 | { | |
1956 | struct ftrace_ops *ops; | |
1957 | ||
1958 | /* Trampolines take precedence over regs */ | |
1959 | if (rec->flags & FTRACE_FL_TRAMP) { | |
1960 | ops = ftrace_find_tramp_ops_new(rec); | |
1961 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | |
1962 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", | |
1963 | (void *)rec->ip, (void *)rec->ip, rec->flags); | |
1964 | /* Ftrace is shutting down, return anything */ | |
1965 | return (unsigned long)FTRACE_ADDR; | |
1966 | } | |
1967 | return ops->trampoline; | |
1968 | } | |
1969 | ||
1970 | if (rec->flags & FTRACE_FL_REGS) | |
1971 | return (unsigned long)FTRACE_REGS_ADDR; | |
1972 | else | |
1973 | return (unsigned long)FTRACE_ADDR; | |
1974 | } | |
1975 | ||
1976 | /** | |
1977 | * ftrace_get_addr_curr - Get the call address that is already there | |
1978 | * @rec: The ftrace record descriptor | |
1979 | * | |
1980 | * The FTRACE_FL_REGS_EN is set when the record already points to | |
1981 | * a function that saves all the regs. Basically the '_EN' version | |
1982 | * represents the current state of the function. | |
1983 | * | |
1984 | * Returns the address of the trampoline that is currently being called | |
1985 | */ | |
1986 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) | |
1987 | { | |
1988 | struct ftrace_ops *ops; | |
1989 | ||
1990 | /* Trampolines take precedence over regs */ | |
1991 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | |
1992 | ops = ftrace_find_tramp_ops_curr(rec); | |
1993 | if (FTRACE_WARN_ON(!ops)) { | |
1994 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | |
1995 | (void *)rec->ip, (void *)rec->ip); | |
1996 | /* Ftrace is shutting down, return anything */ | |
1997 | return (unsigned long)FTRACE_ADDR; | |
1998 | } | |
1999 | return ops->trampoline; | |
2000 | } | |
2001 | ||
2002 | if (rec->flags & FTRACE_FL_REGS_EN) | |
2003 | return (unsigned long)FTRACE_REGS_ADDR; | |
2004 | else | |
2005 | return (unsigned long)FTRACE_ADDR; | |
2006 | } | |
2007 | ||
2008 | static int | |
2009 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |
2010 | { | |
2011 | unsigned long ftrace_old_addr; | |
2012 | unsigned long ftrace_addr; | |
2013 | int ret; | |
2014 | ||
2015 | ftrace_addr = ftrace_get_addr_new(rec); | |
2016 | ||
2017 | /* This needs to be done before we call ftrace_update_record */ | |
2018 | ftrace_old_addr = ftrace_get_addr_curr(rec); | |
2019 | ||
2020 | ret = ftrace_update_record(rec, enable); | |
2021 | ||
2022 | switch (ret) { | |
2023 | case FTRACE_UPDATE_IGNORE: | |
2024 | return 0; | |
2025 | ||
2026 | case FTRACE_UPDATE_MAKE_CALL: | |
2027 | return ftrace_make_call(rec, ftrace_addr); | |
2028 | ||
2029 | case FTRACE_UPDATE_MAKE_NOP: | |
2030 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); | |
2031 | ||
2032 | case FTRACE_UPDATE_MODIFY_CALL: | |
2033 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | |
2034 | } | |
2035 | ||
2036 | return -1; /* unknow ftrace bug */ | |
2037 | } | |
2038 | ||
2039 | void __weak ftrace_replace_code(int enable) | |
2040 | { | |
2041 | struct dyn_ftrace *rec; | |
2042 | struct ftrace_page *pg; | |
2043 | int failed; | |
2044 | ||
2045 | if (unlikely(ftrace_disabled)) | |
2046 | return; | |
2047 | ||
2048 | do_for_each_ftrace_rec(pg, rec) { | |
2049 | failed = __ftrace_replace_code(rec, enable); | |
2050 | if (failed) { | |
2051 | ftrace_bug(failed, rec->ip); | |
2052 | /* Stop processing */ | |
2053 | return; | |
2054 | } | |
2055 | } while_for_each_ftrace_rec(); | |
2056 | } | |
2057 | ||
2058 | struct ftrace_rec_iter { | |
2059 | struct ftrace_page *pg; | |
2060 | int index; | |
2061 | }; | |
2062 | ||
2063 | /** | |
2064 | * ftrace_rec_iter_start, start up iterating over traced functions | |
2065 | * | |
2066 | * Returns an iterator handle that is used to iterate over all | |
2067 | * the records that represent address locations where functions | |
2068 | * are traced. | |
2069 | * | |
2070 | * May return NULL if no records are available. | |
2071 | */ | |
2072 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) | |
2073 | { | |
2074 | /* | |
2075 | * We only use a single iterator. | |
2076 | * Protected by the ftrace_lock mutex. | |
2077 | */ | |
2078 | static struct ftrace_rec_iter ftrace_rec_iter; | |
2079 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; | |
2080 | ||
2081 | iter->pg = ftrace_pages_start; | |
2082 | iter->index = 0; | |
2083 | ||
2084 | /* Could have empty pages */ | |
2085 | while (iter->pg && !iter->pg->index) | |
2086 | iter->pg = iter->pg->next; | |
2087 | ||
2088 | if (!iter->pg) | |
2089 | return NULL; | |
2090 | ||
2091 | return iter; | |
2092 | } | |
2093 | ||
2094 | /** | |
2095 | * ftrace_rec_iter_next, get the next record to process. | |
2096 | * @iter: The handle to the iterator. | |
2097 | * | |
2098 | * Returns the next iterator after the given iterator @iter. | |
2099 | */ | |
2100 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) | |
2101 | { | |
2102 | iter->index++; | |
2103 | ||
2104 | if (iter->index >= iter->pg->index) { | |
2105 | iter->pg = iter->pg->next; | |
2106 | iter->index = 0; | |
2107 | ||
2108 | /* Could have empty pages */ | |
2109 | while (iter->pg && !iter->pg->index) | |
2110 | iter->pg = iter->pg->next; | |
2111 | } | |
2112 | ||
2113 | if (!iter->pg) | |
2114 | return NULL; | |
2115 | ||
2116 | return iter; | |
2117 | } | |
2118 | ||
2119 | /** | |
2120 | * ftrace_rec_iter_record, get the record at the iterator location | |
2121 | * @iter: The current iterator location | |
2122 | * | |
2123 | * Returns the record that the current @iter is at. | |
2124 | */ | |
2125 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) | |
2126 | { | |
2127 | return &iter->pg->records[iter->index]; | |
2128 | } | |
2129 | ||
2130 | static int | |
2131 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |
2132 | { | |
2133 | unsigned long ip; | |
2134 | int ret; | |
2135 | ||
2136 | ip = rec->ip; | |
2137 | ||
2138 | if (unlikely(ftrace_disabled)) | |
2139 | return 0; | |
2140 | ||
2141 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | |
2142 | if (ret) { | |
2143 | ftrace_bug(ret, ip); | |
2144 | return 0; | |
2145 | } | |
2146 | return 1; | |
2147 | } | |
2148 | ||
2149 | /* | |
2150 | * archs can override this function if they must do something | |
2151 | * before the modifying code is performed. | |
2152 | */ | |
2153 | int __weak ftrace_arch_code_modify_prepare(void) | |
2154 | { | |
2155 | return 0; | |
2156 | } | |
2157 | ||
2158 | /* | |
2159 | * archs can override this function if they must do something | |
2160 | * after the modifying code is performed. | |
2161 | */ | |
2162 | int __weak ftrace_arch_code_modify_post_process(void) | |
2163 | { | |
2164 | return 0; | |
2165 | } | |
2166 | ||
2167 | void ftrace_modify_all_code(int command) | |
2168 | { | |
2169 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | |
2170 | int err = 0; | |
2171 | ||
2172 | /* | |
2173 | * If the ftrace_caller calls a ftrace_ops func directly, | |
2174 | * we need to make sure that it only traces functions it | |
2175 | * expects to trace. When doing the switch of functions, | |
2176 | * we need to update to the ftrace_ops_list_func first | |
2177 | * before the transition between old and new calls are set, | |
2178 | * as the ftrace_ops_list_func will check the ops hashes | |
2179 | * to make sure the ops are having the right functions | |
2180 | * traced. | |
2181 | */ | |
2182 | if (update) { | |
2183 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); | |
2184 | if (FTRACE_WARN_ON(err)) | |
2185 | return; | |
2186 | } | |
2187 | ||
2188 | if (command & FTRACE_UPDATE_CALLS) | |
2189 | ftrace_replace_code(1); | |
2190 | else if (command & FTRACE_DISABLE_CALLS) | |
2191 | ftrace_replace_code(0); | |
2192 | ||
2193 | if (update && ftrace_trace_function != ftrace_ops_list_func) { | |
2194 | function_trace_op = set_function_trace_op; | |
2195 | smp_wmb(); | |
2196 | /* If irqs are disabled, we are in stop machine */ | |
2197 | if (!irqs_disabled()) | |
2198 | smp_call_function(ftrace_sync_ipi, NULL, 1); | |
2199 | err = ftrace_update_ftrace_func(ftrace_trace_function); | |
2200 | if (FTRACE_WARN_ON(err)) | |
2201 | return; | |
2202 | } | |
2203 | ||
2204 | if (command & FTRACE_START_FUNC_RET) | |
2205 | err = ftrace_enable_ftrace_graph_caller(); | |
2206 | else if (command & FTRACE_STOP_FUNC_RET) | |
2207 | err = ftrace_disable_ftrace_graph_caller(); | |
2208 | FTRACE_WARN_ON(err); | |
2209 | } | |
2210 | ||
2211 | static int __ftrace_modify_code(void *data) | |
2212 | { | |
2213 | int *command = data; | |
2214 | ||
2215 | ftrace_modify_all_code(*command); | |
2216 | ||
2217 | return 0; | |
2218 | } | |
2219 | ||
2220 | /** | |
2221 | * ftrace_run_stop_machine, go back to the stop machine method | |
2222 | * @command: The command to tell ftrace what to do | |
2223 | * | |
2224 | * If an arch needs to fall back to the stop machine method, the | |
2225 | * it can call this function. | |
2226 | */ | |
2227 | void ftrace_run_stop_machine(int command) | |
2228 | { | |
2229 | stop_machine(__ftrace_modify_code, &command, NULL); | |
2230 | } | |
2231 | ||
2232 | /** | |
2233 | * arch_ftrace_update_code, modify the code to trace or not trace | |
2234 | * @command: The command that needs to be done | |
2235 | * | |
2236 | * Archs can override this function if it does not need to | |
2237 | * run stop_machine() to modify code. | |
2238 | */ | |
2239 | void __weak arch_ftrace_update_code(int command) | |
2240 | { | |
2241 | ftrace_run_stop_machine(command); | |
2242 | } | |
2243 | ||
2244 | static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | |
2245 | { | |
2246 | struct ftrace_page *pg; | |
2247 | struct dyn_ftrace *rec; | |
2248 | int size, bits; | |
2249 | int ret; | |
2250 | ||
2251 | size = ops->nr_trampolines; | |
2252 | bits = 0; | |
2253 | /* | |
2254 | * Make the hash size about 1/2 the # found | |
2255 | */ | |
2256 | for (size /= 2; size; size >>= 1) | |
2257 | bits++; | |
2258 | ||
2259 | ops->tramp_hash = alloc_ftrace_hash(bits); | |
2260 | /* | |
2261 | * TODO: a failed allocation is going to screw up | |
2262 | * the accounting of what needs to be modified | |
2263 | * and not. For now, we kill ftrace if we fail | |
2264 | * to allocate here. But there are ways around this, | |
2265 | * but that will take a little more work. | |
2266 | */ | |
2267 | if (!ops->tramp_hash) | |
2268 | return -ENOMEM; | |
2269 | ||
2270 | do_for_each_ftrace_rec(pg, rec) { | |
2271 | if (ftrace_rec_count(rec) == 1 && | |
2272 | ftrace_ops_test(ops, rec->ip, rec)) { | |
2273 | ||
2274 | /* | |
2275 | * If another ops adds to a rec, the rec will | |
2276 | * lose its trampoline and never get it back | |
2277 | * until all ops are off of it. | |
2278 | */ | |
2279 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
2280 | continue; | |
2281 | ||
2282 | /* This record had better have a trampoline */ | |
2283 | if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN))) | |
2284 | return -1; | |
2285 | ||
2286 | ret = add_hash_entry(ops->tramp_hash, rec->ip); | |
2287 | if (ret < 0) | |
2288 | return ret; | |
2289 | } | |
2290 | } while_for_each_ftrace_rec(); | |
2291 | ||
2292 | /* The number of recs in the hash must match nr_trampolines */ | |
2293 | if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) | |
2294 | pr_warn("count=%ld trampolines=%d\n", | |
2295 | ops->tramp_hash->count, | |
2296 | ops->nr_trampolines); | |
2297 | ||
2298 | return 0; | |
2299 | } | |
2300 | ||
2301 | static int ftrace_save_tramp_hashes(void) | |
2302 | { | |
2303 | struct ftrace_ops *op; | |
2304 | int ret; | |
2305 | ||
2306 | /* | |
2307 | * Now that any trampoline is being used, we need to save the | |
2308 | * hashes for the ops that have them. This allows the mapping | |
2309 | * back from the record to the ops that has the trampoline to | |
2310 | * know what code is being replaced. Modifying code must always | |
2311 | * verify what it is changing. | |
2312 | */ | |
2313 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2314 | ||
2315 | /* The tramp_hash is recreated each time. */ | |
2316 | free_ftrace_hash(op->tramp_hash); | |
2317 | op->tramp_hash = NULL; | |
2318 | ||
2319 | if (op->nr_trampolines) { | |
2320 | ret = ftrace_save_ops_tramp_hash(op); | |
2321 | if (ret) | |
2322 | return ret; | |
2323 | } | |
2324 | ||
2325 | } while_for_each_ftrace_op(op); | |
2326 | ||
2327 | return 0; | |
2328 | } | |
2329 | ||
2330 | static void ftrace_run_update_code(int command) | |
2331 | { | |
2332 | int ret; | |
2333 | ||
2334 | ret = ftrace_arch_code_modify_prepare(); | |
2335 | FTRACE_WARN_ON(ret); | |
2336 | if (ret) | |
2337 | return; | |
2338 | ||
2339 | /* | |
2340 | * By default we use stop_machine() to modify the code. | |
2341 | * But archs can do what ever they want as long as it | |
2342 | * is safe. The stop_machine() is the safest, but also | |
2343 | * produces the most overhead. | |
2344 | */ | |
2345 | arch_ftrace_update_code(command); | |
2346 | ||
2347 | ret = ftrace_arch_code_modify_post_process(); | |
2348 | FTRACE_WARN_ON(ret); | |
2349 | ||
2350 | ret = ftrace_save_tramp_hashes(); | |
2351 | FTRACE_WARN_ON(ret); | |
2352 | } | |
2353 | ||
2354 | static ftrace_func_t saved_ftrace_func; | |
2355 | static int ftrace_start_up; | |
2356 | ||
2357 | static void control_ops_free(struct ftrace_ops *ops) | |
2358 | { | |
2359 | free_percpu(ops->disabled); | |
2360 | } | |
2361 | ||
2362 | static void ftrace_startup_enable(int command) | |
2363 | { | |
2364 | if (saved_ftrace_func != ftrace_trace_function) { | |
2365 | saved_ftrace_func = ftrace_trace_function; | |
2366 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
2367 | } | |
2368 | ||
2369 | if (!command || !ftrace_enabled) | |
2370 | return; | |
2371 | ||
2372 | ftrace_run_update_code(command); | |
2373 | } | |
2374 | ||
2375 | static int ftrace_startup(struct ftrace_ops *ops, int command) | |
2376 | { | |
2377 | int ret; | |
2378 | ||
2379 | if (unlikely(ftrace_disabled)) | |
2380 | return -ENODEV; | |
2381 | ||
2382 | ret = __register_ftrace_function(ops); | |
2383 | if (ret) | |
2384 | return ret; | |
2385 | ||
2386 | ftrace_start_up++; | |
2387 | command |= FTRACE_UPDATE_CALLS; | |
2388 | ||
2389 | ops->flags |= FTRACE_OPS_FL_ENABLED; | |
2390 | ||
2391 | ftrace_hash_rec_enable(ops, 1); | |
2392 | ||
2393 | ftrace_startup_enable(command); | |
2394 | ||
2395 | return 0; | |
2396 | } | |
2397 | ||
2398 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |
2399 | { | |
2400 | int ret; | |
2401 | ||
2402 | if (unlikely(ftrace_disabled)) | |
2403 | return -ENODEV; | |
2404 | ||
2405 | ret = __unregister_ftrace_function(ops); | |
2406 | if (ret) | |
2407 | return ret; | |
2408 | ||
2409 | ftrace_start_up--; | |
2410 | /* | |
2411 | * Just warn in case of unbalance, no need to kill ftrace, it's not | |
2412 | * critical but the ftrace_call callers may be never nopped again after | |
2413 | * further ftrace uses. | |
2414 | */ | |
2415 | WARN_ON_ONCE(ftrace_start_up < 0); | |
2416 | ||
2417 | ftrace_hash_rec_disable(ops, 1); | |
2418 | ||
2419 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | |
2420 | ||
2421 | command |= FTRACE_UPDATE_CALLS; | |
2422 | ||
2423 | if (saved_ftrace_func != ftrace_trace_function) { | |
2424 | saved_ftrace_func = ftrace_trace_function; | |
2425 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
2426 | } | |
2427 | ||
2428 | if (!command || !ftrace_enabled) { | |
2429 | /* | |
2430 | * If these are control ops, they still need their | |
2431 | * per_cpu field freed. Since, function tracing is | |
2432 | * not currently active, we can just free them | |
2433 | * without synchronizing all CPUs. | |
2434 | */ | |
2435 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | |
2436 | control_ops_free(ops); | |
2437 | return 0; | |
2438 | } | |
2439 | ||
2440 | /* | |
2441 | * If the ops uses a trampoline, then it needs to be | |
2442 | * tested first on update. | |
2443 | */ | |
2444 | removed_ops = ops; | |
2445 | ||
2446 | ftrace_run_update_code(command); | |
2447 | ||
2448 | removed_ops = NULL; | |
2449 | ||
2450 | /* | |
2451 | * Dynamic ops may be freed, we must make sure that all | |
2452 | * callers are done before leaving this function. | |
2453 | * The same goes for freeing the per_cpu data of the control | |
2454 | * ops. | |
2455 | * | |
2456 | * Again, normal synchronize_sched() is not good enough. | |
2457 | * We need to do a hard force of sched synchronization. | |
2458 | * This is because we use preempt_disable() to do RCU, but | |
2459 | * the function tracers can be called where RCU is not watching | |
2460 | * (like before user_exit()). We can not rely on the RCU | |
2461 | * infrastructure to do the synchronization, thus we must do it | |
2462 | * ourselves. | |
2463 | */ | |
2464 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | |
2465 | schedule_on_each_cpu(ftrace_sync); | |
2466 | ||
2467 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | |
2468 | control_ops_free(ops); | |
2469 | } | |
2470 | ||
2471 | return 0; | |
2472 | } | |
2473 | ||
2474 | static void ftrace_startup_sysctl(void) | |
2475 | { | |
2476 | if (unlikely(ftrace_disabled)) | |
2477 | return; | |
2478 | ||
2479 | /* Force update next time */ | |
2480 | saved_ftrace_func = NULL; | |
2481 | /* ftrace_start_up is true if we want ftrace running */ | |
2482 | if (ftrace_start_up) | |
2483 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | |
2484 | } | |
2485 | ||
2486 | static void ftrace_shutdown_sysctl(void) | |
2487 | { | |
2488 | if (unlikely(ftrace_disabled)) | |
2489 | return; | |
2490 | ||
2491 | /* ftrace_start_up is true if ftrace is running */ | |
2492 | if (ftrace_start_up) | |
2493 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | |
2494 | } | |
2495 | ||
2496 | static cycle_t ftrace_update_time; | |
2497 | unsigned long ftrace_update_tot_cnt; | |
2498 | ||
2499 | static inline int ops_traces_mod(struct ftrace_ops *ops) | |
2500 | { | |
2501 | /* | |
2502 | * Filter_hash being empty will default to trace module. | |
2503 | * But notrace hash requires a test of individual module functions. | |
2504 | */ | |
2505 | return ftrace_hash_empty(ops->func_hash->filter_hash) && | |
2506 | ftrace_hash_empty(ops->func_hash->notrace_hash); | |
2507 | } | |
2508 | ||
2509 | /* | |
2510 | * Check if the current ops references the record. | |
2511 | * | |
2512 | * If the ops traces all functions, then it was already accounted for. | |
2513 | * If the ops does not trace the current record function, skip it. | |
2514 | * If the ops ignores the function via notrace filter, skip it. | |
2515 | */ | |
2516 | static inline bool | |
2517 | ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |
2518 | { | |
2519 | /* If ops isn't enabled, ignore it */ | |
2520 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
2521 | return 0; | |
2522 | ||
2523 | /* If ops traces all mods, we already accounted for it */ | |
2524 | if (ops_traces_mod(ops)) | |
2525 | return 0; | |
2526 | ||
2527 | /* The function must be in the filter */ | |
2528 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && | |
2529 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) | |
2530 | return 0; | |
2531 | ||
2532 | /* If in notrace hash, we ignore it too */ | |
2533 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | |
2534 | return 0; | |
2535 | ||
2536 | return 1; | |
2537 | } | |
2538 | ||
2539 | static int referenced_filters(struct dyn_ftrace *rec) | |
2540 | { | |
2541 | struct ftrace_ops *ops; | |
2542 | int cnt = 0; | |
2543 | ||
2544 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | |
2545 | if (ops_references_rec(ops, rec)) | |
2546 | cnt++; | |
2547 | } | |
2548 | ||
2549 | return cnt; | |
2550 | } | |
2551 | ||
2552 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) | |
2553 | { | |
2554 | struct ftrace_page *pg; | |
2555 | struct dyn_ftrace *p; | |
2556 | cycle_t start, stop; | |
2557 | unsigned long update_cnt = 0; | |
2558 | unsigned long ref = 0; | |
2559 | bool test = false; | |
2560 | int i; | |
2561 | ||
2562 | /* | |
2563 | * When adding a module, we need to check if tracers are | |
2564 | * currently enabled and if they are set to trace all functions. | |
2565 | * If they are, we need to enable the module functions as well | |
2566 | * as update the reference counts for those function records. | |
2567 | */ | |
2568 | if (mod) { | |
2569 | struct ftrace_ops *ops; | |
2570 | ||
2571 | for (ops = ftrace_ops_list; | |
2572 | ops != &ftrace_list_end; ops = ops->next) { | |
2573 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | |
2574 | if (ops_traces_mod(ops)) | |
2575 | ref++; | |
2576 | else | |
2577 | test = true; | |
2578 | } | |
2579 | } | |
2580 | } | |
2581 | ||
2582 | start = ftrace_now(raw_smp_processor_id()); | |
2583 | ||
2584 | for (pg = new_pgs; pg; pg = pg->next) { | |
2585 | ||
2586 | for (i = 0; i < pg->index; i++) { | |
2587 | int cnt = ref; | |
2588 | ||
2589 | /* If something went wrong, bail without enabling anything */ | |
2590 | if (unlikely(ftrace_disabled)) | |
2591 | return -1; | |
2592 | ||
2593 | p = &pg->records[i]; | |
2594 | if (test) | |
2595 | cnt += referenced_filters(p); | |
2596 | p->flags = cnt; | |
2597 | ||
2598 | /* | |
2599 | * Do the initial record conversion from mcount jump | |
2600 | * to the NOP instructions. | |
2601 | */ | |
2602 | if (!ftrace_code_disable(mod, p)) | |
2603 | break; | |
2604 | ||
2605 | update_cnt++; | |
2606 | ||
2607 | /* | |
2608 | * If the tracing is enabled, go ahead and enable the record. | |
2609 | * | |
2610 | * The reason not to enable the record immediatelly is the | |
2611 | * inherent check of ftrace_make_nop/ftrace_make_call for | |
2612 | * correct previous instructions. Making first the NOP | |
2613 | * conversion puts the module to the correct state, thus | |
2614 | * passing the ftrace_make_call check. | |
2615 | */ | |
2616 | if (ftrace_start_up && cnt) { | |
2617 | int failed = __ftrace_replace_code(p, 1); | |
2618 | if (failed) | |
2619 | ftrace_bug(failed, p->ip); | |
2620 | } | |
2621 | } | |
2622 | } | |
2623 | ||
2624 | stop = ftrace_now(raw_smp_processor_id()); | |
2625 | ftrace_update_time = stop - start; | |
2626 | ftrace_update_tot_cnt += update_cnt; | |
2627 | ||
2628 | return 0; | |
2629 | } | |
2630 | ||
2631 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) | |
2632 | { | |
2633 | int order; | |
2634 | int cnt; | |
2635 | ||
2636 | if (WARN_ON(!count)) | |
2637 | return -EINVAL; | |
2638 | ||
2639 | order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); | |
2640 | ||
2641 | /* | |
2642 | * We want to fill as much as possible. No more than a page | |
2643 | * may be empty. | |
2644 | */ | |
2645 | while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) | |
2646 | order--; | |
2647 | ||
2648 | again: | |
2649 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
2650 | ||
2651 | if (!pg->records) { | |
2652 | /* if we can't allocate this size, try something smaller */ | |
2653 | if (!order) | |
2654 | return -ENOMEM; | |
2655 | order >>= 1; | |
2656 | goto again; | |
2657 | } | |
2658 | ||
2659 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; | |
2660 | pg->size = cnt; | |
2661 | ||
2662 | if (cnt > count) | |
2663 | cnt = count; | |
2664 | ||
2665 | return cnt; | |
2666 | } | |
2667 | ||
2668 | static struct ftrace_page * | |
2669 | ftrace_allocate_pages(unsigned long num_to_init) | |
2670 | { | |
2671 | struct ftrace_page *start_pg; | |
2672 | struct ftrace_page *pg; | |
2673 | int order; | |
2674 | int cnt; | |
2675 | ||
2676 | if (!num_to_init) | |
2677 | return 0; | |
2678 | ||
2679 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); | |
2680 | if (!pg) | |
2681 | return NULL; | |
2682 | ||
2683 | /* | |
2684 | * Try to allocate as much as possible in one continues | |
2685 | * location that fills in all of the space. We want to | |
2686 | * waste as little space as possible. | |
2687 | */ | |
2688 | for (;;) { | |
2689 | cnt = ftrace_allocate_records(pg, num_to_init); | |
2690 | if (cnt < 0) | |
2691 | goto free_pages; | |
2692 | ||
2693 | num_to_init -= cnt; | |
2694 | if (!num_to_init) | |
2695 | break; | |
2696 | ||
2697 | pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); | |
2698 | if (!pg->next) | |
2699 | goto free_pages; | |
2700 | ||
2701 | pg = pg->next; | |
2702 | } | |
2703 | ||
2704 | return start_pg; | |
2705 | ||
2706 | free_pages: | |
2707 | pg = start_pg; | |
2708 | while (pg) { | |
2709 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | |
2710 | free_pages((unsigned long)pg->records, order); | |
2711 | start_pg = pg->next; | |
2712 | kfree(pg); | |
2713 | pg = start_pg; | |
2714 | } | |
2715 | pr_info("ftrace: FAILED to allocate memory for functions\n"); | |
2716 | return NULL; | |
2717 | } | |
2718 | ||
2719 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | |
2720 | ||
2721 | struct ftrace_iterator { | |
2722 | loff_t pos; | |
2723 | loff_t func_pos; | |
2724 | struct ftrace_page *pg; | |
2725 | struct dyn_ftrace *func; | |
2726 | struct ftrace_func_probe *probe; | |
2727 | struct trace_parser parser; | |
2728 | struct ftrace_hash *hash; | |
2729 | struct ftrace_ops *ops; | |
2730 | int hidx; | |
2731 | int idx; | |
2732 | unsigned flags; | |
2733 | }; | |
2734 | ||
2735 | static void * | |
2736 | t_hash_next(struct seq_file *m, loff_t *pos) | |
2737 | { | |
2738 | struct ftrace_iterator *iter = m->private; | |
2739 | struct hlist_node *hnd = NULL; | |
2740 | struct hlist_head *hhd; | |
2741 | ||
2742 | (*pos)++; | |
2743 | iter->pos = *pos; | |
2744 | ||
2745 | if (iter->probe) | |
2746 | hnd = &iter->probe->node; | |
2747 | retry: | |
2748 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | |
2749 | return NULL; | |
2750 | ||
2751 | hhd = &ftrace_func_hash[iter->hidx]; | |
2752 | ||
2753 | if (hlist_empty(hhd)) { | |
2754 | iter->hidx++; | |
2755 | hnd = NULL; | |
2756 | goto retry; | |
2757 | } | |
2758 | ||
2759 | if (!hnd) | |
2760 | hnd = hhd->first; | |
2761 | else { | |
2762 | hnd = hnd->next; | |
2763 | if (!hnd) { | |
2764 | iter->hidx++; | |
2765 | goto retry; | |
2766 | } | |
2767 | } | |
2768 | ||
2769 | if (WARN_ON_ONCE(!hnd)) | |
2770 | return NULL; | |
2771 | ||
2772 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); | |
2773 | ||
2774 | return iter; | |
2775 | } | |
2776 | ||
2777 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | |
2778 | { | |
2779 | struct ftrace_iterator *iter = m->private; | |
2780 | void *p = NULL; | |
2781 | loff_t l; | |
2782 | ||
2783 | if (!(iter->flags & FTRACE_ITER_DO_HASH)) | |
2784 | return NULL; | |
2785 | ||
2786 | if (iter->func_pos > *pos) | |
2787 | return NULL; | |
2788 | ||
2789 | iter->hidx = 0; | |
2790 | for (l = 0; l <= (*pos - iter->func_pos); ) { | |
2791 | p = t_hash_next(m, &l); | |
2792 | if (!p) | |
2793 | break; | |
2794 | } | |
2795 | if (!p) | |
2796 | return NULL; | |
2797 | ||
2798 | /* Only set this if we have an item */ | |
2799 | iter->flags |= FTRACE_ITER_HASH; | |
2800 | ||
2801 | return iter; | |
2802 | } | |
2803 | ||
2804 | static int | |
2805 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) | |
2806 | { | |
2807 | struct ftrace_func_probe *rec; | |
2808 | ||
2809 | rec = iter->probe; | |
2810 | if (WARN_ON_ONCE(!rec)) | |
2811 | return -EIO; | |
2812 | ||
2813 | if (rec->ops->print) | |
2814 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | |
2815 | ||
2816 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); | |
2817 | ||
2818 | if (rec->data) | |
2819 | seq_printf(m, ":%p", rec->data); | |
2820 | seq_putc(m, '\n'); | |
2821 | ||
2822 | return 0; | |
2823 | } | |
2824 | ||
2825 | static void * | |
2826 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
2827 | { | |
2828 | struct ftrace_iterator *iter = m->private; | |
2829 | struct ftrace_ops *ops = iter->ops; | |
2830 | struct dyn_ftrace *rec = NULL; | |
2831 | ||
2832 | if (unlikely(ftrace_disabled)) | |
2833 | return NULL; | |
2834 | ||
2835 | if (iter->flags & FTRACE_ITER_HASH) | |
2836 | return t_hash_next(m, pos); | |
2837 | ||
2838 | (*pos)++; | |
2839 | iter->pos = iter->func_pos = *pos; | |
2840 | ||
2841 | if (iter->flags & FTRACE_ITER_PRINTALL) | |
2842 | return t_hash_start(m, pos); | |
2843 | ||
2844 | retry: | |
2845 | if (iter->idx >= iter->pg->index) { | |
2846 | if (iter->pg->next) { | |
2847 | iter->pg = iter->pg->next; | |
2848 | iter->idx = 0; | |
2849 | goto retry; | |
2850 | } | |
2851 | } else { | |
2852 | rec = &iter->pg->records[iter->idx++]; | |
2853 | if (((iter->flags & FTRACE_ITER_FILTER) && | |
2854 | !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || | |
2855 | ||
2856 | ((iter->flags & FTRACE_ITER_NOTRACE) && | |
2857 | !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || | |
2858 | ||
2859 | ((iter->flags & FTRACE_ITER_ENABLED) && | |
2860 | !(rec->flags & FTRACE_FL_ENABLED))) { | |
2861 | ||
2862 | rec = NULL; | |
2863 | goto retry; | |
2864 | } | |
2865 | } | |
2866 | ||
2867 | if (!rec) | |
2868 | return t_hash_start(m, pos); | |
2869 | ||
2870 | iter->func = rec; | |
2871 | ||
2872 | return iter; | |
2873 | } | |
2874 | ||
2875 | static void reset_iter_read(struct ftrace_iterator *iter) | |
2876 | { | |
2877 | iter->pos = 0; | |
2878 | iter->func_pos = 0; | |
2879 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); | |
2880 | } | |
2881 | ||
2882 | static void *t_start(struct seq_file *m, loff_t *pos) | |
2883 | { | |
2884 | struct ftrace_iterator *iter = m->private; | |
2885 | struct ftrace_ops *ops = iter->ops; | |
2886 | void *p = NULL; | |
2887 | loff_t l; | |
2888 | ||
2889 | mutex_lock(&ftrace_lock); | |
2890 | ||
2891 | if (unlikely(ftrace_disabled)) | |
2892 | return NULL; | |
2893 | ||
2894 | /* | |
2895 | * If an lseek was done, then reset and start from beginning. | |
2896 | */ | |
2897 | if (*pos < iter->pos) | |
2898 | reset_iter_read(iter); | |
2899 | ||
2900 | /* | |
2901 | * For set_ftrace_filter reading, if we have the filter | |
2902 | * off, we can short cut and just print out that all | |
2903 | * functions are enabled. | |
2904 | */ | |
2905 | if ((iter->flags & FTRACE_ITER_FILTER && | |
2906 | ftrace_hash_empty(ops->func_hash->filter_hash)) || | |
2907 | (iter->flags & FTRACE_ITER_NOTRACE && | |
2908 | ftrace_hash_empty(ops->func_hash->notrace_hash))) { | |
2909 | if (*pos > 0) | |
2910 | return t_hash_start(m, pos); | |
2911 | iter->flags |= FTRACE_ITER_PRINTALL; | |
2912 | /* reset in case of seek/pread */ | |
2913 | iter->flags &= ~FTRACE_ITER_HASH; | |
2914 | return iter; | |
2915 | } | |
2916 | ||
2917 | if (iter->flags & FTRACE_ITER_HASH) | |
2918 | return t_hash_start(m, pos); | |
2919 | ||
2920 | /* | |
2921 | * Unfortunately, we need to restart at ftrace_pages_start | |
2922 | * every time we let go of the ftrace_mutex. This is because | |
2923 | * those pointers can change without the lock. | |
2924 | */ | |
2925 | iter->pg = ftrace_pages_start; | |
2926 | iter->idx = 0; | |
2927 | for (l = 0; l <= *pos; ) { | |
2928 | p = t_next(m, p, &l); | |
2929 | if (!p) | |
2930 | break; | |
2931 | } | |
2932 | ||
2933 | if (!p) | |
2934 | return t_hash_start(m, pos); | |
2935 | ||
2936 | return iter; | |
2937 | } | |
2938 | ||
2939 | static void t_stop(struct seq_file *m, void *p) | |
2940 | { | |
2941 | mutex_unlock(&ftrace_lock); | |
2942 | } | |
2943 | ||
2944 | static int t_show(struct seq_file *m, void *v) | |
2945 | { | |
2946 | struct ftrace_iterator *iter = m->private; | |
2947 | struct dyn_ftrace *rec; | |
2948 | ||
2949 | if (iter->flags & FTRACE_ITER_HASH) | |
2950 | return t_hash_show(m, iter); | |
2951 | ||
2952 | if (iter->flags & FTRACE_ITER_PRINTALL) { | |
2953 | if (iter->flags & FTRACE_ITER_NOTRACE) | |
2954 | seq_printf(m, "#### no functions disabled ####\n"); | |
2955 | else | |
2956 | seq_printf(m, "#### all functions enabled ####\n"); | |
2957 | return 0; | |
2958 | } | |
2959 | ||
2960 | rec = iter->func; | |
2961 | ||
2962 | if (!rec) | |
2963 | return 0; | |
2964 | ||
2965 | seq_printf(m, "%ps", (void *)rec->ip); | |
2966 | if (iter->flags & FTRACE_ITER_ENABLED) { | |
2967 | seq_printf(m, " (%ld)%s", | |
2968 | ftrace_rec_count(rec), | |
2969 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | |
2970 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | |
2971 | struct ftrace_ops *ops; | |
2972 | ||
2973 | ops = ftrace_find_tramp_ops_curr(rec); | |
2974 | if (ops && ops->trampoline) | |
2975 | seq_printf(m, "\ttramp: %pS", | |
2976 | (void *)ops->trampoline); | |
2977 | else | |
2978 | seq_printf(m, "\ttramp: ERROR!"); | |
2979 | } | |
2980 | } | |
2981 | ||
2982 | seq_printf(m, "\n"); | |
2983 | ||
2984 | return 0; | |
2985 | } | |
2986 | ||
2987 | static const struct seq_operations show_ftrace_seq_ops = { | |
2988 | .start = t_start, | |
2989 | .next = t_next, | |
2990 | .stop = t_stop, | |
2991 | .show = t_show, | |
2992 | }; | |
2993 | ||
2994 | static int | |
2995 | ftrace_avail_open(struct inode *inode, struct file *file) | |
2996 | { | |
2997 | struct ftrace_iterator *iter; | |
2998 | ||
2999 | if (unlikely(ftrace_disabled)) | |
3000 | return -ENODEV; | |
3001 | ||
3002 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | |
3003 | if (iter) { | |
3004 | iter->pg = ftrace_pages_start; | |
3005 | iter->ops = &global_ops; | |
3006 | } | |
3007 | ||
3008 | return iter ? 0 : -ENOMEM; | |
3009 | } | |
3010 | ||
3011 | static int | |
3012 | ftrace_enabled_open(struct inode *inode, struct file *file) | |
3013 | { | |
3014 | struct ftrace_iterator *iter; | |
3015 | ||
3016 | if (unlikely(ftrace_disabled)) | |
3017 | return -ENODEV; | |
3018 | ||
3019 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | |
3020 | if (iter) { | |
3021 | iter->pg = ftrace_pages_start; | |
3022 | iter->flags = FTRACE_ITER_ENABLED; | |
3023 | iter->ops = &global_ops; | |
3024 | } | |
3025 | ||
3026 | return iter ? 0 : -ENOMEM; | |
3027 | } | |
3028 | ||
3029 | /** | |
3030 | * ftrace_regex_open - initialize function tracer filter files | |
3031 | * @ops: The ftrace_ops that hold the hash filters | |
3032 | * @flag: The type of filter to process | |
3033 | * @inode: The inode, usually passed in to your open routine | |
3034 | * @file: The file, usually passed in to your open routine | |
3035 | * | |
3036 | * ftrace_regex_open() initializes the filter files for the | |
3037 | * @ops. Depending on @flag it may process the filter hash or | |
3038 | * the notrace hash of @ops. With this called from the open | |
3039 | * routine, you can use ftrace_filter_write() for the write | |
3040 | * routine if @flag has FTRACE_ITER_FILTER set, or | |
3041 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | |
3042 | * tracing_lseek() should be used as the lseek routine, and | |
3043 | * release must call ftrace_regex_release(). | |
3044 | */ | |
3045 | int | |
3046 | ftrace_regex_open(struct ftrace_ops *ops, int flag, | |
3047 | struct inode *inode, struct file *file) | |
3048 | { | |
3049 | struct ftrace_iterator *iter; | |
3050 | struct ftrace_hash *hash; | |
3051 | int ret = 0; | |
3052 | ||
3053 | ftrace_ops_init(ops); | |
3054 | ||
3055 | if (unlikely(ftrace_disabled)) | |
3056 | return -ENODEV; | |
3057 | ||
3058 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
3059 | if (!iter) | |
3060 | return -ENOMEM; | |
3061 | ||
3062 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { | |
3063 | kfree(iter); | |
3064 | return -ENOMEM; | |
3065 | } | |
3066 | ||
3067 | iter->ops = ops; | |
3068 | iter->flags = flag; | |
3069 | ||
3070 | mutex_lock(&ops->func_hash->regex_lock); | |
3071 | ||
3072 | if (flag & FTRACE_ITER_NOTRACE) | |
3073 | hash = ops->func_hash->notrace_hash; | |
3074 | else | |
3075 | hash = ops->func_hash->filter_hash; | |
3076 | ||
3077 | if (file->f_mode & FMODE_WRITE) { | |
3078 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | |
3079 | ||
3080 | if (file->f_flags & O_TRUNC) | |
3081 | iter->hash = alloc_ftrace_hash(size_bits); | |
3082 | else | |
3083 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); | |
3084 | ||
3085 | if (!iter->hash) { | |
3086 | trace_parser_put(&iter->parser); | |
3087 | kfree(iter); | |
3088 | ret = -ENOMEM; | |
3089 | goto out_unlock; | |
3090 | } | |
3091 | } | |
3092 | ||
3093 | if (file->f_mode & FMODE_READ) { | |
3094 | iter->pg = ftrace_pages_start; | |
3095 | ||
3096 | ret = seq_open(file, &show_ftrace_seq_ops); | |
3097 | if (!ret) { | |
3098 | struct seq_file *m = file->private_data; | |
3099 | m->private = iter; | |
3100 | } else { | |
3101 | /* Failed */ | |
3102 | free_ftrace_hash(iter->hash); | |
3103 | trace_parser_put(&iter->parser); | |
3104 | kfree(iter); | |
3105 | } | |
3106 | } else | |
3107 | file->private_data = iter; | |
3108 | ||
3109 | out_unlock: | |
3110 | mutex_unlock(&ops->func_hash->regex_lock); | |
3111 | ||
3112 | return ret; | |
3113 | } | |
3114 | ||
3115 | static int | |
3116 | ftrace_filter_open(struct inode *inode, struct file *file) | |
3117 | { | |
3118 | struct ftrace_ops *ops = inode->i_private; | |
3119 | ||
3120 | return ftrace_regex_open(ops, | |
3121 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | |
3122 | inode, file); | |
3123 | } | |
3124 | ||
3125 | static int | |
3126 | ftrace_notrace_open(struct inode *inode, struct file *file) | |
3127 | { | |
3128 | struct ftrace_ops *ops = inode->i_private; | |
3129 | ||
3130 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | |
3131 | inode, file); | |
3132 | } | |
3133 | ||
3134 | static int ftrace_match(char *str, char *regex, int len, int type) | |
3135 | { | |
3136 | int matched = 0; | |
3137 | int slen; | |
3138 | ||
3139 | switch (type) { | |
3140 | case MATCH_FULL: | |
3141 | if (strcmp(str, regex) == 0) | |
3142 | matched = 1; | |
3143 | break; | |
3144 | case MATCH_FRONT_ONLY: | |
3145 | if (strncmp(str, regex, len) == 0) | |
3146 | matched = 1; | |
3147 | break; | |
3148 | case MATCH_MIDDLE_ONLY: | |
3149 | if (strstr(str, regex)) | |
3150 | matched = 1; | |
3151 | break; | |
3152 | case MATCH_END_ONLY: | |
3153 | slen = strlen(str); | |
3154 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) | |
3155 | matched = 1; | |
3156 | break; | |
3157 | } | |
3158 | ||
3159 | return matched; | |
3160 | } | |
3161 | ||
3162 | static int | |
3163 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) | |
3164 | { | |
3165 | struct ftrace_func_entry *entry; | |
3166 | int ret = 0; | |
3167 | ||
3168 | entry = ftrace_lookup_ip(hash, rec->ip); | |
3169 | if (not) { | |
3170 | /* Do nothing if it doesn't exist */ | |
3171 | if (!entry) | |
3172 | return 0; | |
3173 | ||
3174 | free_hash_entry(hash, entry); | |
3175 | } else { | |
3176 | /* Do nothing if it exists */ | |
3177 | if (entry) | |
3178 | return 0; | |
3179 | ||
3180 | ret = add_hash_entry(hash, rec->ip); | |
3181 | } | |
3182 | return ret; | |
3183 | } | |
3184 | ||
3185 | static int | |
3186 | ftrace_match_record(struct dyn_ftrace *rec, char *mod, | |
3187 | char *regex, int len, int type) | |
3188 | { | |
3189 | char str[KSYM_SYMBOL_LEN]; | |
3190 | char *modname; | |
3191 | ||
3192 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | |
3193 | ||
3194 | if (mod) { | |
3195 | /* module lookup requires matching the module */ | |
3196 | if (!modname || strcmp(modname, mod)) | |
3197 | return 0; | |
3198 | ||
3199 | /* blank search means to match all funcs in the mod */ | |
3200 | if (!len) | |
3201 | return 1; | |
3202 | } | |
3203 | ||
3204 | return ftrace_match(str, regex, len, type); | |
3205 | } | |
3206 | ||
3207 | static int | |
3208 | match_records(struct ftrace_hash *hash, char *buff, | |
3209 | int len, char *mod, int not) | |
3210 | { | |
3211 | unsigned search_len = 0; | |
3212 | struct ftrace_page *pg; | |
3213 | struct dyn_ftrace *rec; | |
3214 | int type = MATCH_FULL; | |
3215 | char *search = buff; | |
3216 | int found = 0; | |
3217 | int ret; | |
3218 | ||
3219 | if (len) { | |
3220 | type = filter_parse_regex(buff, len, &search, ¬); | |
3221 | search_len = strlen(search); | |
3222 | } | |
3223 | ||
3224 | mutex_lock(&ftrace_lock); | |
3225 | ||
3226 | if (unlikely(ftrace_disabled)) | |
3227 | goto out_unlock; | |
3228 | ||
3229 | do_for_each_ftrace_rec(pg, rec) { | |
3230 | if (ftrace_match_record(rec, mod, search, search_len, type)) { | |
3231 | ret = enter_record(hash, rec, not); | |
3232 | if (ret < 0) { | |
3233 | found = ret; | |
3234 | goto out_unlock; | |
3235 | } | |
3236 | found = 1; | |
3237 | } | |
3238 | } while_for_each_ftrace_rec(); | |
3239 | out_unlock: | |
3240 | mutex_unlock(&ftrace_lock); | |
3241 | ||
3242 | return found; | |
3243 | } | |
3244 | ||
3245 | static int | |
3246 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) | |
3247 | { | |
3248 | return match_records(hash, buff, len, NULL, 0); | |
3249 | } | |
3250 | ||
3251 | static int | |
3252 | ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) | |
3253 | { | |
3254 | int not = 0; | |
3255 | ||
3256 | /* blank or '*' mean the same */ | |
3257 | if (strcmp(buff, "*") == 0) | |
3258 | buff[0] = 0; | |
3259 | ||
3260 | /* handle the case of 'dont filter this module' */ | |
3261 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { | |
3262 | buff[0] = 0; | |
3263 | not = 1; | |
3264 | } | |
3265 | ||
3266 | return match_records(hash, buff, strlen(buff), mod, not); | |
3267 | } | |
3268 | ||
3269 | /* | |
3270 | * We register the module command as a template to show others how | |
3271 | * to register the a command as well. | |
3272 | */ | |
3273 | ||
3274 | static int | |
3275 | ftrace_mod_callback(struct ftrace_hash *hash, | |
3276 | char *func, char *cmd, char *param, int enable) | |
3277 | { | |
3278 | char *mod; | |
3279 | int ret = -EINVAL; | |
3280 | ||
3281 | /* | |
3282 | * cmd == 'mod' because we only registered this func | |
3283 | * for the 'mod' ftrace_func_command. | |
3284 | * But if you register one func with multiple commands, | |
3285 | * you can tell which command was used by the cmd | |
3286 | * parameter. | |
3287 | */ | |
3288 | ||
3289 | /* we must have a module name */ | |
3290 | if (!param) | |
3291 | return ret; | |
3292 | ||
3293 | mod = strsep(¶m, ":"); | |
3294 | if (!strlen(mod)) | |
3295 | return ret; | |
3296 | ||
3297 | ret = ftrace_match_module_records(hash, func, mod); | |
3298 | if (!ret) | |
3299 | ret = -EINVAL; | |
3300 | if (ret < 0) | |
3301 | return ret; | |
3302 | ||
3303 | return 0; | |
3304 | } | |
3305 | ||
3306 | static struct ftrace_func_command ftrace_mod_cmd = { | |
3307 | .name = "mod", | |
3308 | .func = ftrace_mod_callback, | |
3309 | }; | |
3310 | ||
3311 | static int __init ftrace_mod_cmd_init(void) | |
3312 | { | |
3313 | return register_ftrace_command(&ftrace_mod_cmd); | |
3314 | } | |
3315 | core_initcall(ftrace_mod_cmd_init); | |
3316 | ||
3317 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | |
3318 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
3319 | { | |
3320 | struct ftrace_func_probe *entry; | |
3321 | struct hlist_head *hhd; | |
3322 | unsigned long key; | |
3323 | ||
3324 | key = hash_long(ip, FTRACE_HASH_BITS); | |
3325 | ||
3326 | hhd = &ftrace_func_hash[key]; | |
3327 | ||
3328 | if (hlist_empty(hhd)) | |
3329 | return; | |
3330 | ||
3331 | /* | |
3332 | * Disable preemption for these calls to prevent a RCU grace | |
3333 | * period. This syncs the hash iteration and freeing of items | |
3334 | * on the hash. rcu_read_lock is too dangerous here. | |
3335 | */ | |
3336 | preempt_disable_notrace(); | |
3337 | hlist_for_each_entry_rcu_notrace(entry, hhd, node) { | |
3338 | if (entry->ip == ip) | |
3339 | entry->ops->func(ip, parent_ip, &entry->data); | |
3340 | } | |
3341 | preempt_enable_notrace(); | |
3342 | } | |
3343 | ||
3344 | static struct ftrace_ops trace_probe_ops __read_mostly = | |
3345 | { | |
3346 | .func = function_trace_probe_call, | |
3347 | .flags = FTRACE_OPS_FL_INITIALIZED, | |
3348 | INIT_OPS_HASH(trace_probe_ops) | |
3349 | }; | |
3350 | ||
3351 | static int ftrace_probe_registered; | |
3352 | ||
3353 | static void __enable_ftrace_function_probe(void) | |
3354 | { | |
3355 | int ret; | |
3356 | int i; | |
3357 | ||
3358 | if (ftrace_probe_registered) { | |
3359 | /* still need to update the function call sites */ | |
3360 | if (ftrace_enabled) | |
3361 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | |
3362 | return; | |
3363 | } | |
3364 | ||
3365 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | |
3366 | struct hlist_head *hhd = &ftrace_func_hash[i]; | |
3367 | if (hhd->first) | |
3368 | break; | |
3369 | } | |
3370 | /* Nothing registered? */ | |
3371 | if (i == FTRACE_FUNC_HASHSIZE) | |
3372 | return; | |
3373 | ||
3374 | ret = ftrace_startup(&trace_probe_ops, 0); | |
3375 | ||
3376 | ftrace_probe_registered = 1; | |
3377 | } | |
3378 | ||
3379 | static void __disable_ftrace_function_probe(void) | |
3380 | { | |
3381 | int i; | |
3382 | ||
3383 | if (!ftrace_probe_registered) | |
3384 | return; | |
3385 | ||
3386 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | |
3387 | struct hlist_head *hhd = &ftrace_func_hash[i]; | |
3388 | if (hhd->first) | |
3389 | return; | |
3390 | } | |
3391 | ||
3392 | /* no more funcs left */ | |
3393 | ftrace_shutdown(&trace_probe_ops, 0); | |
3394 | ||
3395 | ftrace_probe_registered = 0; | |
3396 | } | |
3397 | ||
3398 | ||
3399 | static void ftrace_free_entry(struct ftrace_func_probe *entry) | |
3400 | { | |
3401 | if (entry->ops->free) | |
3402 | entry->ops->free(entry->ops, entry->ip, &entry->data); | |
3403 | kfree(entry); | |
3404 | } | |
3405 | ||
3406 | int | |
3407 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |
3408 | void *data) | |
3409 | { | |
3410 | struct ftrace_func_probe *entry; | |
3411 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | |
3412 | struct ftrace_hash *hash; | |
3413 | struct ftrace_page *pg; | |
3414 | struct dyn_ftrace *rec; | |
3415 | int type, len, not; | |
3416 | unsigned long key; | |
3417 | int count = 0; | |
3418 | char *search; | |
3419 | int ret; | |
3420 | ||
3421 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | |
3422 | len = strlen(search); | |
3423 | ||
3424 | /* we do not support '!' for function probes */ | |
3425 | if (WARN_ON(not)) | |
3426 | return -EINVAL; | |
3427 | ||
3428 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | |
3429 | ||
3430 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | |
3431 | if (!hash) { | |
3432 | count = -ENOMEM; | |
3433 | goto out; | |
3434 | } | |
3435 | ||
3436 | if (unlikely(ftrace_disabled)) { | |
3437 | count = -ENODEV; | |
3438 | goto out; | |
3439 | } | |
3440 | ||
3441 | mutex_lock(&ftrace_lock); | |
3442 | ||
3443 | do_for_each_ftrace_rec(pg, rec) { | |
3444 | ||
3445 | if (!ftrace_match_record(rec, NULL, search, len, type)) | |
3446 | continue; | |
3447 | ||
3448 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
3449 | if (!entry) { | |
3450 | /* If we did not process any, then return error */ | |
3451 | if (!count) | |
3452 | count = -ENOMEM; | |
3453 | goto out_unlock; | |
3454 | } | |
3455 | ||
3456 | count++; | |
3457 | ||
3458 | entry->data = data; | |
3459 | ||
3460 | /* | |
3461 | * The caller might want to do something special | |
3462 | * for each function we find. We call the callback | |
3463 | * to give the caller an opportunity to do so. | |
3464 | */ | |
3465 | if (ops->init) { | |
3466 | if (ops->init(ops, rec->ip, &entry->data) < 0) { | |
3467 | /* caller does not like this func */ | |
3468 | kfree(entry); | |
3469 | continue; | |
3470 | } | |
3471 | } | |
3472 | ||
3473 | ret = enter_record(hash, rec, 0); | |
3474 | if (ret < 0) { | |
3475 | kfree(entry); | |
3476 | count = ret; | |
3477 | goto out_unlock; | |
3478 | } | |
3479 | ||
3480 | entry->ops = ops; | |
3481 | entry->ip = rec->ip; | |
3482 | ||
3483 | key = hash_long(entry->ip, FTRACE_HASH_BITS); | |
3484 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | |
3485 | ||
3486 | } while_for_each_ftrace_rec(); | |
3487 | ||
3488 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | |
3489 | if (ret < 0) | |
3490 | count = ret; | |
3491 | ||
3492 | __enable_ftrace_function_probe(); | |
3493 | ||
3494 | out_unlock: | |
3495 | mutex_unlock(&ftrace_lock); | |
3496 | out: | |
3497 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); | |
3498 | free_ftrace_hash(hash); | |
3499 | ||
3500 | return count; | |
3501 | } | |
3502 | ||
3503 | enum { | |
3504 | PROBE_TEST_FUNC = 1, | |
3505 | PROBE_TEST_DATA = 2 | |
3506 | }; | |
3507 | ||
3508 | static void | |
3509 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |
3510 | void *data, int flags) | |
3511 | { | |
3512 | struct ftrace_func_entry *rec_entry; | |
3513 | struct ftrace_func_probe *entry; | |
3514 | struct ftrace_func_probe *p; | |
3515 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | |
3516 | struct list_head free_list; | |
3517 | struct ftrace_hash *hash; | |
3518 | struct hlist_node *tmp; | |
3519 | char str[KSYM_SYMBOL_LEN]; | |
3520 | int type = MATCH_FULL; | |
3521 | int i, len = 0; | |
3522 | char *search; | |
3523 | ||
3524 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) | |
3525 | glob = NULL; | |
3526 | else if (glob) { | |
3527 | int not; | |
3528 | ||
3529 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); | |
3530 | len = strlen(search); | |
3531 | ||
3532 | /* we do not support '!' for function probes */ | |
3533 | if (WARN_ON(not)) | |
3534 | return; | |
3535 | } | |
3536 | ||
3537 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | |
3538 | ||
3539 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | |
3540 | if (!hash) | |
3541 | /* Hmm, should report this somehow */ | |
3542 | goto out_unlock; | |
3543 | ||
3544 | INIT_LIST_HEAD(&free_list); | |
3545 | ||
3546 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | |
3547 | struct hlist_head *hhd = &ftrace_func_hash[i]; | |
3548 | ||
3549 | hlist_for_each_entry_safe(entry, tmp, hhd, node) { | |
3550 | ||
3551 | /* break up if statements for readability */ | |
3552 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | |
3553 | continue; | |
3554 | ||
3555 | if ((flags & PROBE_TEST_DATA) && entry->data != data) | |
3556 | continue; | |
3557 | ||
3558 | /* do this last, since it is the most expensive */ | |
3559 | if (glob) { | |
3560 | kallsyms_lookup(entry->ip, NULL, NULL, | |
3561 | NULL, str); | |
3562 | if (!ftrace_match(str, glob, len, type)) | |
3563 | continue; | |
3564 | } | |
3565 | ||
3566 | rec_entry = ftrace_lookup_ip(hash, entry->ip); | |
3567 | /* It is possible more than one entry had this ip */ | |
3568 | if (rec_entry) | |
3569 | free_hash_entry(hash, rec_entry); | |
3570 | ||
3571 | hlist_del_rcu(&entry->node); | |
3572 | list_add(&entry->free_list, &free_list); | |
3573 | } | |
3574 | } | |
3575 | mutex_lock(&ftrace_lock); | |
3576 | __disable_ftrace_function_probe(); | |
3577 | /* | |
3578 | * Remove after the disable is called. Otherwise, if the last | |
3579 | * probe is removed, a null hash means *all enabled*. | |
3580 | */ | |
3581 | ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | |
3582 | synchronize_sched(); | |
3583 | list_for_each_entry_safe(entry, p, &free_list, free_list) { | |
3584 | list_del(&entry->free_list); | |
3585 | ftrace_free_entry(entry); | |
3586 | } | |
3587 | mutex_unlock(&ftrace_lock); | |
3588 | ||
3589 | out_unlock: | |
3590 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); | |
3591 | free_ftrace_hash(hash); | |
3592 | } | |
3593 | ||
3594 | void | |
3595 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |
3596 | void *data) | |
3597 | { | |
3598 | __unregister_ftrace_function_probe(glob, ops, data, | |
3599 | PROBE_TEST_FUNC | PROBE_TEST_DATA); | |
3600 | } | |
3601 | ||
3602 | void | |
3603 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) | |
3604 | { | |
3605 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); | |
3606 | } | |
3607 | ||
3608 | void unregister_ftrace_function_probe_all(char *glob) | |
3609 | { | |
3610 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); | |
3611 | } | |
3612 | ||
3613 | static LIST_HEAD(ftrace_commands); | |
3614 | static DEFINE_MUTEX(ftrace_cmd_mutex); | |
3615 | ||
3616 | /* | |
3617 | * Currently we only register ftrace commands from __init, so mark this | |
3618 | * __init too. | |
3619 | */ | |
3620 | __init int register_ftrace_command(struct ftrace_func_command *cmd) | |
3621 | { | |
3622 | struct ftrace_func_command *p; | |
3623 | int ret = 0; | |
3624 | ||
3625 | mutex_lock(&ftrace_cmd_mutex); | |
3626 | list_for_each_entry(p, &ftrace_commands, list) { | |
3627 | if (strcmp(cmd->name, p->name) == 0) { | |
3628 | ret = -EBUSY; | |
3629 | goto out_unlock; | |
3630 | } | |
3631 | } | |
3632 | list_add(&cmd->list, &ftrace_commands); | |
3633 | out_unlock: | |
3634 | mutex_unlock(&ftrace_cmd_mutex); | |
3635 | ||
3636 | return ret; | |
3637 | } | |
3638 | ||
3639 | /* | |
3640 | * Currently we only unregister ftrace commands from __init, so mark | |
3641 | * this __init too. | |
3642 | */ | |
3643 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) | |
3644 | { | |
3645 | struct ftrace_func_command *p, *n; | |
3646 | int ret = -ENODEV; | |
3647 | ||
3648 | mutex_lock(&ftrace_cmd_mutex); | |
3649 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { | |
3650 | if (strcmp(cmd->name, p->name) == 0) { | |
3651 | ret = 0; | |
3652 | list_del_init(&p->list); | |
3653 | goto out_unlock; | |
3654 | } | |
3655 | } | |
3656 | out_unlock: | |
3657 | mutex_unlock(&ftrace_cmd_mutex); | |
3658 | ||
3659 | return ret; | |
3660 | } | |
3661 | ||
3662 | static int ftrace_process_regex(struct ftrace_hash *hash, | |
3663 | char *buff, int len, int enable) | |
3664 | { | |
3665 | char *func, *command, *next = buff; | |
3666 | struct ftrace_func_command *p; | |
3667 | int ret = -EINVAL; | |
3668 | ||
3669 | func = strsep(&next, ":"); | |
3670 | ||
3671 | if (!next) { | |
3672 | ret = ftrace_match_records(hash, func, len); | |
3673 | if (!ret) | |
3674 | ret = -EINVAL; | |
3675 | if (ret < 0) | |
3676 | return ret; | |
3677 | return 0; | |
3678 | } | |
3679 | ||
3680 | /* command found */ | |
3681 | ||
3682 | command = strsep(&next, ":"); | |
3683 | ||
3684 | mutex_lock(&ftrace_cmd_mutex); | |
3685 | list_for_each_entry(p, &ftrace_commands, list) { | |
3686 | if (strcmp(p->name, command) == 0) { | |
3687 | ret = p->func(hash, func, command, next, enable); | |
3688 | goto out_unlock; | |
3689 | } | |
3690 | } | |
3691 | out_unlock: | |
3692 | mutex_unlock(&ftrace_cmd_mutex); | |
3693 | ||
3694 | return ret; | |
3695 | } | |
3696 | ||
3697 | static ssize_t | |
3698 | ftrace_regex_write(struct file *file, const char __user *ubuf, | |
3699 | size_t cnt, loff_t *ppos, int enable) | |
3700 | { | |
3701 | struct ftrace_iterator *iter; | |
3702 | struct trace_parser *parser; | |
3703 | ssize_t ret, read; | |
3704 | ||
3705 | if (!cnt) | |
3706 | return 0; | |
3707 | ||
3708 | if (file->f_mode & FMODE_READ) { | |
3709 | struct seq_file *m = file->private_data; | |
3710 | iter = m->private; | |
3711 | } else | |
3712 | iter = file->private_data; | |
3713 | ||
3714 | if (unlikely(ftrace_disabled)) | |
3715 | return -ENODEV; | |
3716 | ||
3717 | /* iter->hash is a local copy, so we don't need regex_lock */ | |
3718 | ||
3719 | parser = &iter->parser; | |
3720 | read = trace_get_user(parser, ubuf, cnt, ppos); | |
3721 | ||
3722 | if (read >= 0 && trace_parser_loaded(parser) && | |
3723 | !trace_parser_cont(parser)) { | |
3724 | ret = ftrace_process_regex(iter->hash, parser->buffer, | |
3725 | parser->idx, enable); | |
3726 | trace_parser_clear(parser); | |
3727 | if (ret < 0) | |
3728 | goto out; | |
3729 | } | |
3730 | ||
3731 | ret = read; | |
3732 | out: | |
3733 | return ret; | |
3734 | } | |
3735 | ||
3736 | ssize_t | |
3737 | ftrace_filter_write(struct file *file, const char __user *ubuf, | |
3738 | size_t cnt, loff_t *ppos) | |
3739 | { | |
3740 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); | |
3741 | } | |
3742 | ||
3743 | ssize_t | |
3744 | ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
3745 | size_t cnt, loff_t *ppos) | |
3746 | { | |
3747 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | |
3748 | } | |
3749 | ||
3750 | static int | |
3751 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |
3752 | { | |
3753 | struct ftrace_func_entry *entry; | |
3754 | ||
3755 | if (!ftrace_location(ip)) | |
3756 | return -EINVAL; | |
3757 | ||
3758 | if (remove) { | |
3759 | entry = ftrace_lookup_ip(hash, ip); | |
3760 | if (!entry) | |
3761 | return -ENOENT; | |
3762 | free_hash_entry(hash, entry); | |
3763 | return 0; | |
3764 | } | |
3765 | ||
3766 | return add_hash_entry(hash, ip); | |
3767 | } | |
3768 | ||
3769 | static void ftrace_ops_update_code(struct ftrace_ops *ops) | |
3770 | { | |
3771 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | |
3772 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | |
3773 | } | |
3774 | ||
3775 | static int | |
3776 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |
3777 | unsigned long ip, int remove, int reset, int enable) | |
3778 | { | |
3779 | struct ftrace_hash **orig_hash; | |
3780 | struct ftrace_hash *hash; | |
3781 | int ret; | |
3782 | ||
3783 | if (unlikely(ftrace_disabled)) | |
3784 | return -ENODEV; | |
3785 | ||
3786 | mutex_lock(&ops->func_hash->regex_lock); | |
3787 | ||
3788 | if (enable) | |
3789 | orig_hash = &ops->func_hash->filter_hash; | |
3790 | else | |
3791 | orig_hash = &ops->func_hash->notrace_hash; | |
3792 | ||
3793 | if (reset) | |
3794 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | |
3795 | else | |
3796 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | |
3797 | ||
3798 | if (!hash) { | |
3799 | ret = -ENOMEM; | |
3800 | goto out_regex_unlock; | |
3801 | } | |
3802 | ||
3803 | if (buf && !ftrace_match_records(hash, buf, len)) { | |
3804 | ret = -EINVAL; | |
3805 | goto out_regex_unlock; | |
3806 | } | |
3807 | if (ip) { | |
3808 | ret = ftrace_match_addr(hash, ip, remove); | |
3809 | if (ret < 0) | |
3810 | goto out_regex_unlock; | |
3811 | } | |
3812 | ||
3813 | mutex_lock(&ftrace_lock); | |
3814 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | |
3815 | if (!ret) | |
3816 | ftrace_ops_update_code(ops); | |
3817 | ||
3818 | mutex_unlock(&ftrace_lock); | |
3819 | ||
3820 | out_regex_unlock: | |
3821 | mutex_unlock(&ops->func_hash->regex_lock); | |
3822 | ||
3823 | free_ftrace_hash(hash); | |
3824 | return ret; | |
3825 | } | |
3826 | ||
3827 | static int | |
3828 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, | |
3829 | int reset, int enable) | |
3830 | { | |
3831 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); | |
3832 | } | |
3833 | ||
3834 | /** | |
3835 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address | |
3836 | * @ops - the ops to set the filter with | |
3837 | * @ip - the address to add to or remove from the filter. | |
3838 | * @remove - non zero to remove the ip from the filter | |
3839 | * @reset - non zero to reset all filters before applying this filter. | |
3840 | * | |
3841 | * Filters denote which functions should be enabled when tracing is enabled | |
3842 | * If @ip is NULL, it failes to update filter. | |
3843 | */ | |
3844 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | |
3845 | int remove, int reset) | |
3846 | { | |
3847 | ftrace_ops_init(ops); | |
3848 | return ftrace_set_addr(ops, ip, remove, reset, 1); | |
3849 | } | |
3850 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); | |
3851 | ||
3852 | static int | |
3853 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |
3854 | int reset, int enable) | |
3855 | { | |
3856 | return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); | |
3857 | } | |
3858 | ||
3859 | /** | |
3860 | * ftrace_set_filter - set a function to filter on in ftrace | |
3861 | * @ops - the ops to set the filter with | |
3862 | * @buf - the string that holds the function filter text. | |
3863 | * @len - the length of the string. | |
3864 | * @reset - non zero to reset all filters before applying this filter. | |
3865 | * | |
3866 | * Filters denote which functions should be enabled when tracing is enabled. | |
3867 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
3868 | */ | |
3869 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, | |
3870 | int len, int reset) | |
3871 | { | |
3872 | ftrace_ops_init(ops); | |
3873 | return ftrace_set_regex(ops, buf, len, reset, 1); | |
3874 | } | |
3875 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | |
3876 | ||
3877 | /** | |
3878 | * ftrace_set_notrace - set a function to not trace in ftrace | |
3879 | * @ops - the ops to set the notrace filter with | |
3880 | * @buf - the string that holds the function notrace text. | |
3881 | * @len - the length of the string. | |
3882 | * @reset - non zero to reset all filters before applying this filter. | |
3883 | * | |
3884 | * Notrace Filters denote which functions should not be enabled when tracing | |
3885 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | |
3886 | * for tracing. | |
3887 | */ | |
3888 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | |
3889 | int len, int reset) | |
3890 | { | |
3891 | ftrace_ops_init(ops); | |
3892 | return ftrace_set_regex(ops, buf, len, reset, 0); | |
3893 | } | |
3894 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | |
3895 | /** | |
3896 | * ftrace_set_global_filter - set a function to filter on with global tracers | |
3897 | * @buf - the string that holds the function filter text. | |
3898 | * @len - the length of the string. | |
3899 | * @reset - non zero to reset all filters before applying this filter. | |
3900 | * | |
3901 | * Filters denote which functions should be enabled when tracing is enabled. | |
3902 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
3903 | */ | |
3904 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) | |
3905 | { | |
3906 | ftrace_set_regex(&global_ops, buf, len, reset, 1); | |
3907 | } | |
3908 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); | |
3909 | ||
3910 | /** | |
3911 | * ftrace_set_global_notrace - set a function to not trace with global tracers | |
3912 | * @buf - the string that holds the function notrace text. | |
3913 | * @len - the length of the string. | |
3914 | * @reset - non zero to reset all filters before applying this filter. | |
3915 | * | |
3916 | * Notrace Filters denote which functions should not be enabled when tracing | |
3917 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | |
3918 | * for tracing. | |
3919 | */ | |
3920 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) | |
3921 | { | |
3922 | ftrace_set_regex(&global_ops, buf, len, reset, 0); | |
3923 | } | |
3924 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); | |
3925 | ||
3926 | /* | |
3927 | * command line interface to allow users to set filters on boot up. | |
3928 | */ | |
3929 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | |
3930 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | |
3931 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | |
3932 | ||
3933 | /* Used by function selftest to not test if filter is set */ | |
3934 | bool ftrace_filter_param __initdata; | |
3935 | ||
3936 | static int __init set_ftrace_notrace(char *str) | |
3937 | { | |
3938 | ftrace_filter_param = true; | |
3939 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | |
3940 | return 1; | |
3941 | } | |
3942 | __setup("ftrace_notrace=", set_ftrace_notrace); | |
3943 | ||
3944 | static int __init set_ftrace_filter(char *str) | |
3945 | { | |
3946 | ftrace_filter_param = true; | |
3947 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | |
3948 | return 1; | |
3949 | } | |
3950 | __setup("ftrace_filter=", set_ftrace_filter); | |
3951 | ||
3952 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
3953 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | |
3954 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | |
3955 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); | |
3956 | ||
3957 | static int __init set_graph_function(char *str) | |
3958 | { | |
3959 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | |
3960 | return 1; | |
3961 | } | |
3962 | __setup("ftrace_graph_filter=", set_graph_function); | |
3963 | ||
3964 | static int __init set_graph_notrace_function(char *str) | |
3965 | { | |
3966 | strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); | |
3967 | return 1; | |
3968 | } | |
3969 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); | |
3970 | ||
3971 | static void __init set_ftrace_early_graph(char *buf, int enable) | |
3972 | { | |
3973 | int ret; | |
3974 | char *func; | |
3975 | unsigned long *table = ftrace_graph_funcs; | |
3976 | int *count = &ftrace_graph_count; | |
3977 | ||
3978 | if (!enable) { | |
3979 | table = ftrace_graph_notrace_funcs; | |
3980 | count = &ftrace_graph_notrace_count; | |
3981 | } | |
3982 | ||
3983 | while (buf) { | |
3984 | func = strsep(&buf, ","); | |
3985 | /* we allow only one expression at a time */ | |
3986 | ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func); | |
3987 | if (ret) | |
3988 | printk(KERN_DEBUG "ftrace: function %s not " | |
3989 | "traceable\n", func); | |
3990 | } | |
3991 | } | |
3992 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
3993 | ||
3994 | void __init | |
3995 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) | |
3996 | { | |
3997 | char *func; | |
3998 | ||
3999 | ftrace_ops_init(ops); | |
4000 | ||
4001 | while (buf) { | |
4002 | func = strsep(&buf, ","); | |
4003 | ftrace_set_regex(ops, func, strlen(func), 0, enable); | |
4004 | } | |
4005 | } | |
4006 | ||
4007 | static void __init set_ftrace_early_filters(void) | |
4008 | { | |
4009 | if (ftrace_filter_buf[0]) | |
4010 | ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); | |
4011 | if (ftrace_notrace_buf[0]) | |
4012 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); | |
4013 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
4014 | if (ftrace_graph_buf[0]) | |
4015 | set_ftrace_early_graph(ftrace_graph_buf, 1); | |
4016 | if (ftrace_graph_notrace_buf[0]) | |
4017 | set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); | |
4018 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
4019 | } | |
4020 | ||
4021 | int ftrace_regex_release(struct inode *inode, struct file *file) | |
4022 | { | |
4023 | struct seq_file *m = (struct seq_file *)file->private_data; | |
4024 | struct ftrace_iterator *iter; | |
4025 | struct ftrace_hash **orig_hash; | |
4026 | struct trace_parser *parser; | |
4027 | int filter_hash; | |
4028 | int ret; | |
4029 | ||
4030 | if (file->f_mode & FMODE_READ) { | |
4031 | iter = m->private; | |
4032 | seq_release(inode, file); | |
4033 | } else | |
4034 | iter = file->private_data; | |
4035 | ||
4036 | parser = &iter->parser; | |
4037 | if (trace_parser_loaded(parser)) { | |
4038 | parser->buffer[parser->idx] = 0; | |
4039 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); | |
4040 | } | |
4041 | ||
4042 | trace_parser_put(parser); | |
4043 | ||
4044 | mutex_lock(&iter->ops->func_hash->regex_lock); | |
4045 | ||
4046 | if (file->f_mode & FMODE_WRITE) { | |
4047 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | |
4048 | ||
4049 | if (filter_hash) | |
4050 | orig_hash = &iter->ops->func_hash->filter_hash; | |
4051 | else | |
4052 | orig_hash = &iter->ops->func_hash->notrace_hash; | |
4053 | ||
4054 | mutex_lock(&ftrace_lock); | |
4055 | ret = ftrace_hash_move(iter->ops, filter_hash, | |
4056 | orig_hash, iter->hash); | |
4057 | if (!ret) | |
4058 | ftrace_ops_update_code(iter->ops); | |
4059 | ||
4060 | mutex_unlock(&ftrace_lock); | |
4061 | } | |
4062 | ||
4063 | mutex_unlock(&iter->ops->func_hash->regex_lock); | |
4064 | free_ftrace_hash(iter->hash); | |
4065 | kfree(iter); | |
4066 | ||
4067 | return 0; | |
4068 | } | |
4069 | ||
4070 | static const struct file_operations ftrace_avail_fops = { | |
4071 | .open = ftrace_avail_open, | |
4072 | .read = seq_read, | |
4073 | .llseek = seq_lseek, | |
4074 | .release = seq_release_private, | |
4075 | }; | |
4076 | ||
4077 | static const struct file_operations ftrace_enabled_fops = { | |
4078 | .open = ftrace_enabled_open, | |
4079 | .read = seq_read, | |
4080 | .llseek = seq_lseek, | |
4081 | .release = seq_release_private, | |
4082 | }; | |
4083 | ||
4084 | static const struct file_operations ftrace_filter_fops = { | |
4085 | .open = ftrace_filter_open, | |
4086 | .read = seq_read, | |
4087 | .write = ftrace_filter_write, | |
4088 | .llseek = tracing_lseek, | |
4089 | .release = ftrace_regex_release, | |
4090 | }; | |
4091 | ||
4092 | static const struct file_operations ftrace_notrace_fops = { | |
4093 | .open = ftrace_notrace_open, | |
4094 | .read = seq_read, | |
4095 | .write = ftrace_notrace_write, | |
4096 | .llseek = tracing_lseek, | |
4097 | .release = ftrace_regex_release, | |
4098 | }; | |
4099 | ||
4100 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
4101 | ||
4102 | static DEFINE_MUTEX(graph_lock); | |
4103 | ||
4104 | int ftrace_graph_count; | |
4105 | int ftrace_graph_notrace_count; | |
4106 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | |
4107 | unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | |
4108 | ||
4109 | struct ftrace_graph_data { | |
4110 | unsigned long *table; | |
4111 | size_t size; | |
4112 | int *count; | |
4113 | const struct seq_operations *seq_ops; | |
4114 | }; | |
4115 | ||
4116 | static void * | |
4117 | __g_next(struct seq_file *m, loff_t *pos) | |
4118 | { | |
4119 | struct ftrace_graph_data *fgd = m->private; | |
4120 | ||
4121 | if (*pos >= *fgd->count) | |
4122 | return NULL; | |
4123 | return &fgd->table[*pos]; | |
4124 | } | |
4125 | ||
4126 | static void * | |
4127 | g_next(struct seq_file *m, void *v, loff_t *pos) | |
4128 | { | |
4129 | (*pos)++; | |
4130 | return __g_next(m, pos); | |
4131 | } | |
4132 | ||
4133 | static void *g_start(struct seq_file *m, loff_t *pos) | |
4134 | { | |
4135 | struct ftrace_graph_data *fgd = m->private; | |
4136 | ||
4137 | mutex_lock(&graph_lock); | |
4138 | ||
4139 | /* Nothing, tell g_show to print all functions are enabled */ | |
4140 | if (!*fgd->count && !*pos) | |
4141 | return (void *)1; | |
4142 | ||
4143 | return __g_next(m, pos); | |
4144 | } | |
4145 | ||
4146 | static void g_stop(struct seq_file *m, void *p) | |
4147 | { | |
4148 | mutex_unlock(&graph_lock); | |
4149 | } | |
4150 | ||
4151 | static int g_show(struct seq_file *m, void *v) | |
4152 | { | |
4153 | unsigned long *ptr = v; | |
4154 | ||
4155 | if (!ptr) | |
4156 | return 0; | |
4157 | ||
4158 | if (ptr == (unsigned long *)1) { | |
4159 | struct ftrace_graph_data *fgd = m->private; | |
4160 | ||
4161 | if (fgd->table == ftrace_graph_funcs) | |
4162 | seq_printf(m, "#### all functions enabled ####\n"); | |
4163 | else | |
4164 | seq_printf(m, "#### no functions disabled ####\n"); | |
4165 | return 0; | |
4166 | } | |
4167 | ||
4168 | seq_printf(m, "%ps\n", (void *)*ptr); | |
4169 | ||
4170 | return 0; | |
4171 | } | |
4172 | ||
4173 | static const struct seq_operations ftrace_graph_seq_ops = { | |
4174 | .start = g_start, | |
4175 | .next = g_next, | |
4176 | .stop = g_stop, | |
4177 | .show = g_show, | |
4178 | }; | |
4179 | ||
4180 | static int | |
4181 | __ftrace_graph_open(struct inode *inode, struct file *file, | |
4182 | struct ftrace_graph_data *fgd) | |
4183 | { | |
4184 | int ret = 0; | |
4185 | ||
4186 | mutex_lock(&graph_lock); | |
4187 | if ((file->f_mode & FMODE_WRITE) && | |
4188 | (file->f_flags & O_TRUNC)) { | |
4189 | *fgd->count = 0; | |
4190 | memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); | |
4191 | } | |
4192 | mutex_unlock(&graph_lock); | |
4193 | ||
4194 | if (file->f_mode & FMODE_READ) { | |
4195 | ret = seq_open(file, fgd->seq_ops); | |
4196 | if (!ret) { | |
4197 | struct seq_file *m = file->private_data; | |
4198 | m->private = fgd; | |
4199 | } | |
4200 | } else | |
4201 | file->private_data = fgd; | |
4202 | ||
4203 | return ret; | |
4204 | } | |
4205 | ||
4206 | static int | |
4207 | ftrace_graph_open(struct inode *inode, struct file *file) | |
4208 | { | |
4209 | struct ftrace_graph_data *fgd; | |
4210 | ||
4211 | if (unlikely(ftrace_disabled)) | |
4212 | return -ENODEV; | |
4213 | ||
4214 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | |
4215 | if (fgd == NULL) | |
4216 | return -ENOMEM; | |
4217 | ||
4218 | fgd->table = ftrace_graph_funcs; | |
4219 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | |
4220 | fgd->count = &ftrace_graph_count; | |
4221 | fgd->seq_ops = &ftrace_graph_seq_ops; | |
4222 | ||
4223 | return __ftrace_graph_open(inode, file, fgd); | |
4224 | } | |
4225 | ||
4226 | static int | |
4227 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) | |
4228 | { | |
4229 | struct ftrace_graph_data *fgd; | |
4230 | ||
4231 | if (unlikely(ftrace_disabled)) | |
4232 | return -ENODEV; | |
4233 | ||
4234 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | |
4235 | if (fgd == NULL) | |
4236 | return -ENOMEM; | |
4237 | ||
4238 | fgd->table = ftrace_graph_notrace_funcs; | |
4239 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | |
4240 | fgd->count = &ftrace_graph_notrace_count; | |
4241 | fgd->seq_ops = &ftrace_graph_seq_ops; | |
4242 | ||
4243 | return __ftrace_graph_open(inode, file, fgd); | |
4244 | } | |
4245 | ||
4246 | static int | |
4247 | ftrace_graph_release(struct inode *inode, struct file *file) | |
4248 | { | |
4249 | if (file->f_mode & FMODE_READ) { | |
4250 | struct seq_file *m = file->private_data; | |
4251 | ||
4252 | kfree(m->private); | |
4253 | seq_release(inode, file); | |
4254 | } else { | |
4255 | kfree(file->private_data); | |
4256 | } | |
4257 | ||
4258 | return 0; | |
4259 | } | |
4260 | ||
4261 | static int | |
4262 | ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) | |
4263 | { | |
4264 | struct dyn_ftrace *rec; | |
4265 | struct ftrace_page *pg; | |
4266 | int search_len; | |
4267 | int fail = 1; | |
4268 | int type, not; | |
4269 | char *search; | |
4270 | bool exists; | |
4271 | int i; | |
4272 | ||
4273 | /* decode regex */ | |
4274 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); | |
4275 | if (!not && *idx >= size) | |
4276 | return -EBUSY; | |
4277 | ||
4278 | search_len = strlen(search); | |
4279 | ||
4280 | mutex_lock(&ftrace_lock); | |
4281 | ||
4282 | if (unlikely(ftrace_disabled)) { | |
4283 | mutex_unlock(&ftrace_lock); | |
4284 | return -ENODEV; | |
4285 | } | |
4286 | ||
4287 | do_for_each_ftrace_rec(pg, rec) { | |
4288 | ||
4289 | if (ftrace_match_record(rec, NULL, search, search_len, type)) { | |
4290 | /* if it is in the array */ | |
4291 | exists = false; | |
4292 | for (i = 0; i < *idx; i++) { | |
4293 | if (array[i] == rec->ip) { | |
4294 | exists = true; | |
4295 | break; | |
4296 | } | |
4297 | } | |
4298 | ||
4299 | if (!not) { | |
4300 | fail = 0; | |
4301 | if (!exists) { | |
4302 | array[(*idx)++] = rec->ip; | |
4303 | if (*idx >= size) | |
4304 | goto out; | |
4305 | } | |
4306 | } else { | |
4307 | if (exists) { | |
4308 | array[i] = array[--(*idx)]; | |
4309 | array[*idx] = 0; | |
4310 | fail = 0; | |
4311 | } | |
4312 | } | |
4313 | } | |
4314 | } while_for_each_ftrace_rec(); | |
4315 | out: | |
4316 | mutex_unlock(&ftrace_lock); | |
4317 | ||
4318 | if (fail) | |
4319 | return -EINVAL; | |
4320 | ||
4321 | return 0; | |
4322 | } | |
4323 | ||
4324 | static ssize_t | |
4325 | ftrace_graph_write(struct file *file, const char __user *ubuf, | |
4326 | size_t cnt, loff_t *ppos) | |
4327 | { | |
4328 | struct trace_parser parser; | |
4329 | ssize_t read, ret = 0; | |
4330 | struct ftrace_graph_data *fgd = file->private_data; | |
4331 | ||
4332 | if (!cnt) | |
4333 | return 0; | |
4334 | ||
4335 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) | |
4336 | return -ENOMEM; | |
4337 | ||
4338 | read = trace_get_user(&parser, ubuf, cnt, ppos); | |
4339 | ||
4340 | if (read >= 0 && trace_parser_loaded((&parser))) { | |
4341 | parser.buffer[parser.idx] = 0; | |
4342 | ||
4343 | mutex_lock(&graph_lock); | |
4344 | ||
4345 | /* we allow only one expression at a time */ | |
4346 | ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, | |
4347 | parser.buffer); | |
4348 | ||
4349 | mutex_unlock(&graph_lock); | |
4350 | } | |
4351 | ||
4352 | if (!ret) | |
4353 | ret = read; | |
4354 | ||
4355 | trace_parser_put(&parser); | |
4356 | ||
4357 | return ret; | |
4358 | } | |
4359 | ||
4360 | static const struct file_operations ftrace_graph_fops = { | |
4361 | .open = ftrace_graph_open, | |
4362 | .read = seq_read, | |
4363 | .write = ftrace_graph_write, | |
4364 | .llseek = tracing_lseek, | |
4365 | .release = ftrace_graph_release, | |
4366 | }; | |
4367 | ||
4368 | static const struct file_operations ftrace_graph_notrace_fops = { | |
4369 | .open = ftrace_graph_notrace_open, | |
4370 | .read = seq_read, | |
4371 | .write = ftrace_graph_write, | |
4372 | .llseek = tracing_lseek, | |
4373 | .release = ftrace_graph_release, | |
4374 | }; | |
4375 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
4376 | ||
4377 | void ftrace_create_filter_files(struct ftrace_ops *ops, | |
4378 | struct dentry *parent) | |
4379 | { | |
4380 | ||
4381 | trace_create_file("set_ftrace_filter", 0644, parent, | |
4382 | ops, &ftrace_filter_fops); | |
4383 | ||
4384 | trace_create_file("set_ftrace_notrace", 0644, parent, | |
4385 | ops, &ftrace_notrace_fops); | |
4386 | } | |
4387 | ||
4388 | /* | |
4389 | * The name "destroy_filter_files" is really a misnomer. Although | |
4390 | * in the future, it may actualy delete the files, but this is | |
4391 | * really intended to make sure the ops passed in are disabled | |
4392 | * and that when this function returns, the caller is free to | |
4393 | * free the ops. | |
4394 | * | |
4395 | * The "destroy" name is only to match the "create" name that this | |
4396 | * should be paired with. | |
4397 | */ | |
4398 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | |
4399 | { | |
4400 | mutex_lock(&ftrace_lock); | |
4401 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | |
4402 | ftrace_shutdown(ops, 0); | |
4403 | ops->flags |= FTRACE_OPS_FL_DELETED; | |
4404 | mutex_unlock(&ftrace_lock); | |
4405 | } | |
4406 | ||
4407 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |
4408 | { | |
4409 | ||
4410 | trace_create_file("available_filter_functions", 0444, | |
4411 | d_tracer, NULL, &ftrace_avail_fops); | |
4412 | ||
4413 | trace_create_file("enabled_functions", 0444, | |
4414 | d_tracer, NULL, &ftrace_enabled_fops); | |
4415 | ||
4416 | ftrace_create_filter_files(&global_ops, d_tracer); | |
4417 | ||
4418 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
4419 | trace_create_file("set_graph_function", 0444, d_tracer, | |
4420 | NULL, | |
4421 | &ftrace_graph_fops); | |
4422 | trace_create_file("set_graph_notrace", 0444, d_tracer, | |
4423 | NULL, | |
4424 | &ftrace_graph_notrace_fops); | |
4425 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
4426 | ||
4427 | return 0; | |
4428 | } | |
4429 | ||
4430 | static int ftrace_cmp_ips(const void *a, const void *b) | |
4431 | { | |
4432 | const unsigned long *ipa = a; | |
4433 | const unsigned long *ipb = b; | |
4434 | ||
4435 | if (*ipa > *ipb) | |
4436 | return 1; | |
4437 | if (*ipa < *ipb) | |
4438 | return -1; | |
4439 | return 0; | |
4440 | } | |
4441 | ||
4442 | static void ftrace_swap_ips(void *a, void *b, int size) | |
4443 | { | |
4444 | unsigned long *ipa = a; | |
4445 | unsigned long *ipb = b; | |
4446 | unsigned long t; | |
4447 | ||
4448 | t = *ipa; | |
4449 | *ipa = *ipb; | |
4450 | *ipb = t; | |
4451 | } | |
4452 | ||
4453 | static int ftrace_process_locs(struct module *mod, | |
4454 | unsigned long *start, | |
4455 | unsigned long *end) | |
4456 | { | |
4457 | struct ftrace_page *start_pg; | |
4458 | struct ftrace_page *pg; | |
4459 | struct dyn_ftrace *rec; | |
4460 | unsigned long count; | |
4461 | unsigned long *p; | |
4462 | unsigned long addr; | |
4463 | unsigned long flags = 0; /* Shut up gcc */ | |
4464 | int ret = -ENOMEM; | |
4465 | ||
4466 | count = end - start; | |
4467 | ||
4468 | if (!count) | |
4469 | return 0; | |
4470 | ||
4471 | sort(start, count, sizeof(*start), | |
4472 | ftrace_cmp_ips, ftrace_swap_ips); | |
4473 | ||
4474 | start_pg = ftrace_allocate_pages(count); | |
4475 | if (!start_pg) | |
4476 | return -ENOMEM; | |
4477 | ||
4478 | mutex_lock(&ftrace_lock); | |
4479 | ||
4480 | /* | |
4481 | * Core and each module needs their own pages, as | |
4482 | * modules will free them when they are removed. | |
4483 | * Force a new page to be allocated for modules. | |
4484 | */ | |
4485 | if (!mod) { | |
4486 | WARN_ON(ftrace_pages || ftrace_pages_start); | |
4487 | /* First initialization */ | |
4488 | ftrace_pages = ftrace_pages_start = start_pg; | |
4489 | } else { | |
4490 | if (!ftrace_pages) | |
4491 | goto out; | |
4492 | ||
4493 | if (WARN_ON(ftrace_pages->next)) { | |
4494 | /* Hmm, we have free pages? */ | |
4495 | while (ftrace_pages->next) | |
4496 | ftrace_pages = ftrace_pages->next; | |
4497 | } | |
4498 | ||
4499 | ftrace_pages->next = start_pg; | |
4500 | } | |
4501 | ||
4502 | p = start; | |
4503 | pg = start_pg; | |
4504 | while (p < end) { | |
4505 | addr = ftrace_call_adjust(*p++); | |
4506 | /* | |
4507 | * Some architecture linkers will pad between | |
4508 | * the different mcount_loc sections of different | |
4509 | * object files to satisfy alignments. | |
4510 | * Skip any NULL pointers. | |
4511 | */ | |
4512 | if (!addr) | |
4513 | continue; | |
4514 | ||
4515 | if (pg->index == pg->size) { | |
4516 | /* We should have allocated enough */ | |
4517 | if (WARN_ON(!pg->next)) | |
4518 | break; | |
4519 | pg = pg->next; | |
4520 | } | |
4521 | ||
4522 | rec = &pg->records[pg->index++]; | |
4523 | rec->ip = addr; | |
4524 | } | |
4525 | ||
4526 | /* We should have used all pages */ | |
4527 | WARN_ON(pg->next); | |
4528 | ||
4529 | /* Assign the last page to ftrace_pages */ | |
4530 | ftrace_pages = pg; | |
4531 | ||
4532 | /* | |
4533 | * We only need to disable interrupts on start up | |
4534 | * because we are modifying code that an interrupt | |
4535 | * may execute, and the modification is not atomic. | |
4536 | * But for modules, nothing runs the code we modify | |
4537 | * until we are finished with it, and there's no | |
4538 | * reason to cause large interrupt latencies while we do it. | |
4539 | */ | |
4540 | if (!mod) | |
4541 | local_irq_save(flags); | |
4542 | ftrace_update_code(mod, start_pg); | |
4543 | if (!mod) | |
4544 | local_irq_restore(flags); | |
4545 | ret = 0; | |
4546 | out: | |
4547 | mutex_unlock(&ftrace_lock); | |
4548 | ||
4549 | return ret; | |
4550 | } | |
4551 | ||
4552 | #ifdef CONFIG_MODULES | |
4553 | ||
4554 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) | |
4555 | ||
4556 | void ftrace_release_mod(struct module *mod) | |
4557 | { | |
4558 | struct dyn_ftrace *rec; | |
4559 | struct ftrace_page **last_pg; | |
4560 | struct ftrace_page *pg; | |
4561 | int order; | |
4562 | ||
4563 | mutex_lock(&ftrace_lock); | |
4564 | ||
4565 | if (ftrace_disabled) | |
4566 | goto out_unlock; | |
4567 | ||
4568 | /* | |
4569 | * Each module has its own ftrace_pages, remove | |
4570 | * them from the list. | |
4571 | */ | |
4572 | last_pg = &ftrace_pages_start; | |
4573 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { | |
4574 | rec = &pg->records[0]; | |
4575 | if (within_module_core(rec->ip, mod)) { | |
4576 | /* | |
4577 | * As core pages are first, the first | |
4578 | * page should never be a module page. | |
4579 | */ | |
4580 | if (WARN_ON(pg == ftrace_pages_start)) | |
4581 | goto out_unlock; | |
4582 | ||
4583 | /* Check if we are deleting the last page */ | |
4584 | if (pg == ftrace_pages) | |
4585 | ftrace_pages = next_to_ftrace_page(last_pg); | |
4586 | ||
4587 | *last_pg = pg->next; | |
4588 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | |
4589 | free_pages((unsigned long)pg->records, order); | |
4590 | kfree(pg); | |
4591 | } else | |
4592 | last_pg = &pg->next; | |
4593 | } | |
4594 | out_unlock: | |
4595 | mutex_unlock(&ftrace_lock); | |
4596 | } | |
4597 | ||
4598 | static void ftrace_init_module(struct module *mod, | |
4599 | unsigned long *start, unsigned long *end) | |
4600 | { | |
4601 | if (ftrace_disabled || start == end) | |
4602 | return; | |
4603 | ftrace_process_locs(mod, start, end); | |
4604 | } | |
4605 | ||
4606 | void ftrace_module_init(struct module *mod) | |
4607 | { | |
4608 | ftrace_init_module(mod, mod->ftrace_callsites, | |
4609 | mod->ftrace_callsites + | |
4610 | mod->num_ftrace_callsites); | |
4611 | } | |
4612 | ||
4613 | static int ftrace_module_notify_exit(struct notifier_block *self, | |
4614 | unsigned long val, void *data) | |
4615 | { | |
4616 | struct module *mod = data; | |
4617 | ||
4618 | if (val == MODULE_STATE_GOING) | |
4619 | ftrace_release_mod(mod); | |
4620 | ||
4621 | return 0; | |
4622 | } | |
4623 | #else | |
4624 | static int ftrace_module_notify_exit(struct notifier_block *self, | |
4625 | unsigned long val, void *data) | |
4626 | { | |
4627 | return 0; | |
4628 | } | |
4629 | #endif /* CONFIG_MODULES */ | |
4630 | ||
4631 | struct notifier_block ftrace_module_exit_nb = { | |
4632 | .notifier_call = ftrace_module_notify_exit, | |
4633 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | |
4634 | }; | |
4635 | ||
4636 | void __init ftrace_init(void) | |
4637 | { | |
4638 | extern unsigned long __start_mcount_loc[]; | |
4639 | extern unsigned long __stop_mcount_loc[]; | |
4640 | unsigned long count, flags; | |
4641 | int ret; | |
4642 | ||
4643 | local_irq_save(flags); | |
4644 | ret = ftrace_dyn_arch_init(); | |
4645 | local_irq_restore(flags); | |
4646 | if (ret) | |
4647 | goto failed; | |
4648 | ||
4649 | count = __stop_mcount_loc - __start_mcount_loc; | |
4650 | if (!count) { | |
4651 | pr_info("ftrace: No functions to be traced?\n"); | |
4652 | goto failed; | |
4653 | } | |
4654 | ||
4655 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | |
4656 | count, count / ENTRIES_PER_PAGE + 1); | |
4657 | ||
4658 | last_ftrace_enabled = ftrace_enabled = 1; | |
4659 | ||
4660 | ret = ftrace_process_locs(NULL, | |
4661 | __start_mcount_loc, | |
4662 | __stop_mcount_loc); | |
4663 | ||
4664 | ret = register_module_notifier(&ftrace_module_exit_nb); | |
4665 | if (ret) | |
4666 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | |
4667 | ||
4668 | set_ftrace_early_filters(); | |
4669 | ||
4670 | return; | |
4671 | failed: | |
4672 | ftrace_disabled = 1; | |
4673 | } | |
4674 | ||
4675 | #else | |
4676 | ||
4677 | static struct ftrace_ops global_ops = { | |
4678 | .func = ftrace_stub, | |
4679 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | |
4680 | }; | |
4681 | ||
4682 | static int __init ftrace_nodyn_init(void) | |
4683 | { | |
4684 | ftrace_enabled = 1; | |
4685 | return 0; | |
4686 | } | |
4687 | core_initcall(ftrace_nodyn_init); | |
4688 | ||
4689 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | |
4690 | static inline void ftrace_startup_enable(int command) { } | |
4691 | /* Keep as macros so we do not need to define the commands */ | |
4692 | # define ftrace_startup(ops, command) \ | |
4693 | ({ \ | |
4694 | int ___ret = __register_ftrace_function(ops); \ | |
4695 | if (!___ret) \ | |
4696 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | |
4697 | ___ret; \ | |
4698 | }) | |
4699 | # define ftrace_shutdown(ops, command) \ | |
4700 | ({ \ | |
4701 | int ___ret = __unregister_ftrace_function(ops); \ | |
4702 | if (!___ret) \ | |
4703 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | |
4704 | ___ret; \ | |
4705 | }) | |
4706 | ||
4707 | # define ftrace_startup_sysctl() do { } while (0) | |
4708 | # define ftrace_shutdown_sysctl() do { } while (0) | |
4709 | ||
4710 | static inline int | |
4711 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |
4712 | { | |
4713 | return 1; | |
4714 | } | |
4715 | ||
4716 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
4717 | ||
4718 | __init void ftrace_init_global_array_ops(struct trace_array *tr) | |
4719 | { | |
4720 | tr->ops = &global_ops; | |
4721 | tr->ops->private = tr; | |
4722 | } | |
4723 | ||
4724 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | |
4725 | { | |
4726 | /* If we filter on pids, update to use the pid function */ | |
4727 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
4728 | if (WARN_ON(tr->ops->func != ftrace_stub)) | |
4729 | printk("ftrace ops had %pS for function\n", | |
4730 | tr->ops->func); | |
4731 | /* Only the top level instance does pid tracing */ | |
4732 | if (!list_empty(&ftrace_pids)) { | |
4733 | set_ftrace_pid_function(func); | |
4734 | func = ftrace_pid_func; | |
4735 | } | |
4736 | } | |
4737 | tr->ops->func = func; | |
4738 | tr->ops->private = tr; | |
4739 | } | |
4740 | ||
4741 | void ftrace_reset_array_ops(struct trace_array *tr) | |
4742 | { | |
4743 | tr->ops->func = ftrace_stub; | |
4744 | } | |
4745 | ||
4746 | static void | |
4747 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |
4748 | struct ftrace_ops *op, struct pt_regs *regs) | |
4749 | { | |
4750 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) | |
4751 | return; | |
4752 | ||
4753 | /* | |
4754 | * Some of the ops may be dynamically allocated, | |
4755 | * they must be freed after a synchronize_sched(). | |
4756 | */ | |
4757 | preempt_disable_notrace(); | |
4758 | trace_recursion_set(TRACE_CONTROL_BIT); | |
4759 | ||
4760 | /* | |
4761 | * Control funcs (perf) uses RCU. Only trace if | |
4762 | * RCU is currently active. | |
4763 | */ | |
4764 | if (!rcu_is_watching()) | |
4765 | goto out; | |
4766 | ||
4767 | do_for_each_ftrace_op(op, ftrace_control_list) { | |
4768 | if (!(op->flags & FTRACE_OPS_FL_STUB) && | |
4769 | !ftrace_function_local_disabled(op) && | |
4770 | ftrace_ops_test(op, ip, regs)) | |
4771 | op->func(ip, parent_ip, op, regs); | |
4772 | } while_for_each_ftrace_op(op); | |
4773 | out: | |
4774 | trace_recursion_clear(TRACE_CONTROL_BIT); | |
4775 | preempt_enable_notrace(); | |
4776 | } | |
4777 | ||
4778 | static struct ftrace_ops control_ops = { | |
4779 | .func = ftrace_ops_control_func, | |
4780 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | |
4781 | INIT_OPS_HASH(control_ops) | |
4782 | }; | |
4783 | ||
4784 | static inline void | |
4785 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |
4786 | struct ftrace_ops *ignored, struct pt_regs *regs) | |
4787 | { | |
4788 | struct ftrace_ops *op; | |
4789 | int bit; | |
4790 | ||
4791 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | |
4792 | if (bit < 0) | |
4793 | return; | |
4794 | ||
4795 | /* | |
4796 | * Some of the ops may be dynamically allocated, | |
4797 | * they must be freed after a synchronize_sched(). | |
4798 | */ | |
4799 | preempt_disable_notrace(); | |
4800 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
4801 | if (ftrace_ops_test(op, ip, regs)) { | |
4802 | if (FTRACE_WARN_ON(!op->func)) { | |
4803 | pr_warn("op=%p %pS\n", op, op); | |
4804 | goto out; | |
4805 | } | |
4806 | op->func(ip, parent_ip, op, regs); | |
4807 | } | |
4808 | } while_for_each_ftrace_op(op); | |
4809 | out: | |
4810 | preempt_enable_notrace(); | |
4811 | trace_clear_recursion(bit); | |
4812 | } | |
4813 | ||
4814 | /* | |
4815 | * Some archs only support passing ip and parent_ip. Even though | |
4816 | * the list function ignores the op parameter, we do not want any | |
4817 | * C side effects, where a function is called without the caller | |
4818 | * sending a third parameter. | |
4819 | * Archs are to support both the regs and ftrace_ops at the same time. | |
4820 | * If they support ftrace_ops, it is assumed they support regs. | |
4821 | * If call backs want to use regs, they must either check for regs | |
4822 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. | |
4823 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. | |
4824 | * An architecture can pass partial regs with ftrace_ops and still | |
4825 | * set the ARCH_SUPPORT_FTARCE_OPS. | |
4826 | */ | |
4827 | #if ARCH_SUPPORTS_FTRACE_OPS | |
4828 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |
4829 | struct ftrace_ops *op, struct pt_regs *regs) | |
4830 | { | |
4831 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); | |
4832 | } | |
4833 | #else | |
4834 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | |
4835 | { | |
4836 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); | |
4837 | } | |
4838 | #endif | |
4839 | ||
4840 | /* | |
4841 | * If there's only one function registered but it does not support | |
4842 | * recursion, this function will be called by the mcount trampoline. | |
4843 | * This function will handle recursion protection. | |
4844 | */ | |
4845 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | |
4846 | struct ftrace_ops *op, struct pt_regs *regs) | |
4847 | { | |
4848 | int bit; | |
4849 | ||
4850 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | |
4851 | if (bit < 0) | |
4852 | return; | |
4853 | ||
4854 | op->func(ip, parent_ip, op, regs); | |
4855 | ||
4856 | trace_clear_recursion(bit); | |
4857 | } | |
4858 | ||
4859 | static void clear_ftrace_swapper(void) | |
4860 | { | |
4861 | struct task_struct *p; | |
4862 | int cpu; | |
4863 | ||
4864 | get_online_cpus(); | |
4865 | for_each_online_cpu(cpu) { | |
4866 | p = idle_task(cpu); | |
4867 | clear_tsk_trace_trace(p); | |
4868 | } | |
4869 | put_online_cpus(); | |
4870 | } | |
4871 | ||
4872 | static void set_ftrace_swapper(void) | |
4873 | { | |
4874 | struct task_struct *p; | |
4875 | int cpu; | |
4876 | ||
4877 | get_online_cpus(); | |
4878 | for_each_online_cpu(cpu) { | |
4879 | p = idle_task(cpu); | |
4880 | set_tsk_trace_trace(p); | |
4881 | } | |
4882 | put_online_cpus(); | |
4883 | } | |
4884 | ||
4885 | static void clear_ftrace_pid(struct pid *pid) | |
4886 | { | |
4887 | struct task_struct *p; | |
4888 | ||
4889 | rcu_read_lock(); | |
4890 | do_each_pid_task(pid, PIDTYPE_PID, p) { | |
4891 | clear_tsk_trace_trace(p); | |
4892 | } while_each_pid_task(pid, PIDTYPE_PID, p); | |
4893 | rcu_read_unlock(); | |
4894 | ||
4895 | put_pid(pid); | |
4896 | } | |
4897 | ||
4898 | static void set_ftrace_pid(struct pid *pid) | |
4899 | { | |
4900 | struct task_struct *p; | |
4901 | ||
4902 | rcu_read_lock(); | |
4903 | do_each_pid_task(pid, PIDTYPE_PID, p) { | |
4904 | set_tsk_trace_trace(p); | |
4905 | } while_each_pid_task(pid, PIDTYPE_PID, p); | |
4906 | rcu_read_unlock(); | |
4907 | } | |
4908 | ||
4909 | static void clear_ftrace_pid_task(struct pid *pid) | |
4910 | { | |
4911 | if (pid == ftrace_swapper_pid) | |
4912 | clear_ftrace_swapper(); | |
4913 | else | |
4914 | clear_ftrace_pid(pid); | |
4915 | } | |
4916 | ||
4917 | static void set_ftrace_pid_task(struct pid *pid) | |
4918 | { | |
4919 | if (pid == ftrace_swapper_pid) | |
4920 | set_ftrace_swapper(); | |
4921 | else | |
4922 | set_ftrace_pid(pid); | |
4923 | } | |
4924 | ||
4925 | static int ftrace_pid_add(int p) | |
4926 | { | |
4927 | struct pid *pid; | |
4928 | struct ftrace_pid *fpid; | |
4929 | int ret = -EINVAL; | |
4930 | ||
4931 | mutex_lock(&ftrace_lock); | |
4932 | ||
4933 | if (!p) | |
4934 | pid = ftrace_swapper_pid; | |
4935 | else | |
4936 | pid = find_get_pid(p); | |
4937 | ||
4938 | if (!pid) | |
4939 | goto out; | |
4940 | ||
4941 | ret = 0; | |
4942 | ||
4943 | list_for_each_entry(fpid, &ftrace_pids, list) | |
4944 | if (fpid->pid == pid) | |
4945 | goto out_put; | |
4946 | ||
4947 | ret = -ENOMEM; | |
4948 | ||
4949 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); | |
4950 | if (!fpid) | |
4951 | goto out_put; | |
4952 | ||
4953 | list_add(&fpid->list, &ftrace_pids); | |
4954 | fpid->pid = pid; | |
4955 | ||
4956 | set_ftrace_pid_task(pid); | |
4957 | ||
4958 | ftrace_update_pid_func(); | |
4959 | ftrace_startup_enable(0); | |
4960 | ||
4961 | mutex_unlock(&ftrace_lock); | |
4962 | return 0; | |
4963 | ||
4964 | out_put: | |
4965 | if (pid != ftrace_swapper_pid) | |
4966 | put_pid(pid); | |
4967 | ||
4968 | out: | |
4969 | mutex_unlock(&ftrace_lock); | |
4970 | return ret; | |
4971 | } | |
4972 | ||
4973 | static void ftrace_pid_reset(void) | |
4974 | { | |
4975 | struct ftrace_pid *fpid, *safe; | |
4976 | ||
4977 | mutex_lock(&ftrace_lock); | |
4978 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { | |
4979 | struct pid *pid = fpid->pid; | |
4980 | ||
4981 | clear_ftrace_pid_task(pid); | |
4982 | ||
4983 | list_del(&fpid->list); | |
4984 | kfree(fpid); | |
4985 | } | |
4986 | ||
4987 | ftrace_update_pid_func(); | |
4988 | ftrace_startup_enable(0); | |
4989 | ||
4990 | mutex_unlock(&ftrace_lock); | |
4991 | } | |
4992 | ||
4993 | static void *fpid_start(struct seq_file *m, loff_t *pos) | |
4994 | { | |
4995 | mutex_lock(&ftrace_lock); | |
4996 | ||
4997 | if (list_empty(&ftrace_pids) && (!*pos)) | |
4998 | return (void *) 1; | |
4999 | ||
5000 | return seq_list_start(&ftrace_pids, *pos); | |
5001 | } | |
5002 | ||
5003 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | |
5004 | { | |
5005 | if (v == (void *)1) | |
5006 | return NULL; | |
5007 | ||
5008 | return seq_list_next(v, &ftrace_pids, pos); | |
5009 | } | |
5010 | ||
5011 | static void fpid_stop(struct seq_file *m, void *p) | |
5012 | { | |
5013 | mutex_unlock(&ftrace_lock); | |
5014 | } | |
5015 | ||
5016 | static int fpid_show(struct seq_file *m, void *v) | |
5017 | { | |
5018 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | |
5019 | ||
5020 | if (v == (void *)1) { | |
5021 | seq_printf(m, "no pid\n"); | |
5022 | return 0; | |
5023 | } | |
5024 | ||
5025 | if (fpid->pid == ftrace_swapper_pid) | |
5026 | seq_printf(m, "swapper tasks\n"); | |
5027 | else | |
5028 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | |
5029 | ||
5030 | return 0; | |
5031 | } | |
5032 | ||
5033 | static const struct seq_operations ftrace_pid_sops = { | |
5034 | .start = fpid_start, | |
5035 | .next = fpid_next, | |
5036 | .stop = fpid_stop, | |
5037 | .show = fpid_show, | |
5038 | }; | |
5039 | ||
5040 | static int | |
5041 | ftrace_pid_open(struct inode *inode, struct file *file) | |
5042 | { | |
5043 | int ret = 0; | |
5044 | ||
5045 | if ((file->f_mode & FMODE_WRITE) && | |
5046 | (file->f_flags & O_TRUNC)) | |
5047 | ftrace_pid_reset(); | |
5048 | ||
5049 | if (file->f_mode & FMODE_READ) | |
5050 | ret = seq_open(file, &ftrace_pid_sops); | |
5051 | ||
5052 | return ret; | |
5053 | } | |
5054 | ||
5055 | static ssize_t | |
5056 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | |
5057 | size_t cnt, loff_t *ppos) | |
5058 | { | |
5059 | char buf[64], *tmp; | |
5060 | long val; | |
5061 | int ret; | |
5062 | ||
5063 | if (cnt >= sizeof(buf)) | |
5064 | return -EINVAL; | |
5065 | ||
5066 | if (copy_from_user(&buf, ubuf, cnt)) | |
5067 | return -EFAULT; | |
5068 | ||
5069 | buf[cnt] = 0; | |
5070 | ||
5071 | /* | |
5072 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" | |
5073 | * to clean the filter quietly. | |
5074 | */ | |
5075 | tmp = strstrip(buf); | |
5076 | if (strlen(tmp) == 0) | |
5077 | return 1; | |
5078 | ||
5079 | ret = kstrtol(tmp, 10, &val); | |
5080 | if (ret < 0) | |
5081 | return ret; | |
5082 | ||
5083 | ret = ftrace_pid_add(val); | |
5084 | ||
5085 | return ret ? ret : cnt; | |
5086 | } | |
5087 | ||
5088 | static int | |
5089 | ftrace_pid_release(struct inode *inode, struct file *file) | |
5090 | { | |
5091 | if (file->f_mode & FMODE_READ) | |
5092 | seq_release(inode, file); | |
5093 | ||
5094 | return 0; | |
5095 | } | |
5096 | ||
5097 | static const struct file_operations ftrace_pid_fops = { | |
5098 | .open = ftrace_pid_open, | |
5099 | .write = ftrace_pid_write, | |
5100 | .read = seq_read, | |
5101 | .llseek = tracing_lseek, | |
5102 | .release = ftrace_pid_release, | |
5103 | }; | |
5104 | ||
5105 | static __init int ftrace_init_debugfs(void) | |
5106 | { | |
5107 | struct dentry *d_tracer; | |
5108 | ||
5109 | d_tracer = tracing_init_dentry(); | |
5110 | if (!d_tracer) | |
5111 | return 0; | |
5112 | ||
5113 | ftrace_init_dyn_debugfs(d_tracer); | |
5114 | ||
5115 | trace_create_file("set_ftrace_pid", 0644, d_tracer, | |
5116 | NULL, &ftrace_pid_fops); | |
5117 | ||
5118 | ftrace_profile_debugfs(d_tracer); | |
5119 | ||
5120 | return 0; | |
5121 | } | |
5122 | fs_initcall(ftrace_init_debugfs); | |
5123 | ||
5124 | /** | |
5125 | * ftrace_kill - kill ftrace | |
5126 | * | |
5127 | * This function should be used by panic code. It stops ftrace | |
5128 | * but in a not so nice way. If you need to simply kill ftrace | |
5129 | * from a non-atomic section, use ftrace_kill. | |
5130 | */ | |
5131 | void ftrace_kill(void) | |
5132 | { | |
5133 | ftrace_disabled = 1; | |
5134 | ftrace_enabled = 0; | |
5135 | clear_ftrace_function(); | |
5136 | } | |
5137 | ||
5138 | /** | |
5139 | * Test if ftrace is dead or not. | |
5140 | */ | |
5141 | int ftrace_is_dead(void) | |
5142 | { | |
5143 | return ftrace_disabled; | |
5144 | } | |
5145 | ||
5146 | /** | |
5147 | * register_ftrace_function - register a function for profiling | |
5148 | * @ops - ops structure that holds the function for profiling. | |
5149 | * | |
5150 | * Register a function to be called by all functions in the | |
5151 | * kernel. | |
5152 | * | |
5153 | * Note: @ops->func and all the functions it calls must be labeled | |
5154 | * with "notrace", otherwise it will go into a | |
5155 | * recursive loop. | |
5156 | */ | |
5157 | int register_ftrace_function(struct ftrace_ops *ops) | |
5158 | { | |
5159 | int ret = -1; | |
5160 | ||
5161 | ftrace_ops_init(ops); | |
5162 | ||
5163 | mutex_lock(&ftrace_lock); | |
5164 | ||
5165 | ret = ftrace_startup(ops, 0); | |
5166 | ||
5167 | mutex_unlock(&ftrace_lock); | |
5168 | ||
5169 | return ret; | |
5170 | } | |
5171 | EXPORT_SYMBOL_GPL(register_ftrace_function); | |
5172 | ||
5173 | /** | |
5174 | * unregister_ftrace_function - unregister a function for profiling. | |
5175 | * @ops - ops structure that holds the function to unregister | |
5176 | * | |
5177 | * Unregister a function that was added to be called by ftrace profiling. | |
5178 | */ | |
5179 | int unregister_ftrace_function(struct ftrace_ops *ops) | |
5180 | { | |
5181 | int ret; | |
5182 | ||
5183 | mutex_lock(&ftrace_lock); | |
5184 | ret = ftrace_shutdown(ops, 0); | |
5185 | mutex_unlock(&ftrace_lock); | |
5186 | ||
5187 | return ret; | |
5188 | } | |
5189 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); | |
5190 | ||
5191 | int | |
5192 | ftrace_enable_sysctl(struct ctl_table *table, int write, | |
5193 | void __user *buffer, size_t *lenp, | |
5194 | loff_t *ppos) | |
5195 | { | |
5196 | int ret = -ENODEV; | |
5197 | ||
5198 | mutex_lock(&ftrace_lock); | |
5199 | ||
5200 | if (unlikely(ftrace_disabled)) | |
5201 | goto out; | |
5202 | ||
5203 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
5204 | ||
5205 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | |
5206 | goto out; | |
5207 | ||
5208 | last_ftrace_enabled = !!ftrace_enabled; | |
5209 | ||
5210 | if (ftrace_enabled) { | |
5211 | ||
5212 | ftrace_startup_sysctl(); | |
5213 | ||
5214 | /* we are starting ftrace again */ | |
5215 | if (ftrace_ops_list != &ftrace_list_end) | |
5216 | update_ftrace_function(); | |
5217 | ||
5218 | } else { | |
5219 | /* stopping ftrace calls (just send to ftrace_stub) */ | |
5220 | ftrace_trace_function = ftrace_stub; | |
5221 | ||
5222 | ftrace_shutdown_sysctl(); | |
5223 | } | |
5224 | ||
5225 | out: | |
5226 | mutex_unlock(&ftrace_lock); | |
5227 | return ret; | |
5228 | } | |
5229 | ||
5230 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
5231 | ||
5232 | static struct ftrace_ops graph_ops = { | |
5233 | .func = ftrace_stub, | |
5234 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | |
5235 | FTRACE_OPS_FL_INITIALIZED | | |
5236 | FTRACE_OPS_FL_STUB, | |
5237 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | |
5238 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | |
5239 | #endif | |
5240 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | |
5241 | }; | |
5242 | ||
5243 | static int ftrace_graph_active; | |
5244 | ||
5245 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | |
5246 | { | |
5247 | return 0; | |
5248 | } | |
5249 | ||
5250 | /* The callbacks that hook a function */ | |
5251 | trace_func_graph_ret_t ftrace_graph_return = | |
5252 | (trace_func_graph_ret_t)ftrace_stub; | |
5253 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | |
5254 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; | |
5255 | ||
5256 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | |
5257 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |
5258 | { | |
5259 | int i; | |
5260 | int ret = 0; | |
5261 | unsigned long flags; | |
5262 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | |
5263 | struct task_struct *g, *t; | |
5264 | ||
5265 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | |
5266 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH | |
5267 | * sizeof(struct ftrace_ret_stack), | |
5268 | GFP_KERNEL); | |
5269 | if (!ret_stack_list[i]) { | |
5270 | start = 0; | |
5271 | end = i; | |
5272 | ret = -ENOMEM; | |
5273 | goto free; | |
5274 | } | |
5275 | } | |
5276 | ||
5277 | read_lock_irqsave(&tasklist_lock, flags); | |
5278 | do_each_thread(g, t) { | |
5279 | if (start == end) { | |
5280 | ret = -EAGAIN; | |
5281 | goto unlock; | |
5282 | } | |
5283 | ||
5284 | if (t->ret_stack == NULL) { | |
5285 | atomic_set(&t->tracing_graph_pause, 0); | |
5286 | atomic_set(&t->trace_overrun, 0); | |
5287 | t->curr_ret_stack = -1; | |
5288 | /* Make sure the tasks see the -1 first: */ | |
5289 | smp_wmb(); | |
5290 | t->ret_stack = ret_stack_list[start++]; | |
5291 | } | |
5292 | } while_each_thread(g, t); | |
5293 | ||
5294 | unlock: | |
5295 | read_unlock_irqrestore(&tasklist_lock, flags); | |
5296 | free: | |
5297 | for (i = start; i < end; i++) | |
5298 | kfree(ret_stack_list[i]); | |
5299 | return ret; | |
5300 | } | |
5301 | ||
5302 | static void | |
5303 | ftrace_graph_probe_sched_switch(void *ignore, | |
5304 | struct task_struct *prev, struct task_struct *next) | |
5305 | { | |
5306 | unsigned long long timestamp; | |
5307 | int index; | |
5308 | ||
5309 | /* | |
5310 | * Does the user want to count the time a function was asleep. | |
5311 | * If so, do not update the time stamps. | |
5312 | */ | |
5313 | if (trace_flags & TRACE_ITER_SLEEP_TIME) | |
5314 | return; | |
5315 | ||
5316 | timestamp = trace_clock_local(); | |
5317 | ||
5318 | prev->ftrace_timestamp = timestamp; | |
5319 | ||
5320 | /* only process tasks that we timestamped */ | |
5321 | if (!next->ftrace_timestamp) | |
5322 | return; | |
5323 | ||
5324 | /* | |
5325 | * Update all the counters in next to make up for the | |
5326 | * time next was sleeping. | |
5327 | */ | |
5328 | timestamp -= next->ftrace_timestamp; | |
5329 | ||
5330 | for (index = next->curr_ret_stack; index >= 0; index--) | |
5331 | next->ret_stack[index].calltime += timestamp; | |
5332 | } | |
5333 | ||
5334 | /* Allocate a return stack for each task */ | |
5335 | static int start_graph_tracing(void) | |
5336 | { | |
5337 | struct ftrace_ret_stack **ret_stack_list; | |
5338 | int ret, cpu; | |
5339 | ||
5340 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | |
5341 | sizeof(struct ftrace_ret_stack *), | |
5342 | GFP_KERNEL); | |
5343 | ||
5344 | if (!ret_stack_list) | |
5345 | return -ENOMEM; | |
5346 | ||
5347 | /* The cpu_boot init_task->ret_stack will never be freed */ | |
5348 | for_each_online_cpu(cpu) { | |
5349 | if (!idle_task(cpu)->ret_stack) | |
5350 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); | |
5351 | } | |
5352 | ||
5353 | do { | |
5354 | ret = alloc_retstack_tasklist(ret_stack_list); | |
5355 | } while (ret == -EAGAIN); | |
5356 | ||
5357 | if (!ret) { | |
5358 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | |
5359 | if (ret) | |
5360 | pr_info("ftrace_graph: Couldn't activate tracepoint" | |
5361 | " probe to kernel_sched_switch\n"); | |
5362 | } | |
5363 | ||
5364 | kfree(ret_stack_list); | |
5365 | return ret; | |
5366 | } | |
5367 | ||
5368 | /* | |
5369 | * Hibernation protection. | |
5370 | * The state of the current task is too much unstable during | |
5371 | * suspend/restore to disk. We want to protect against that. | |
5372 | */ | |
5373 | static int | |
5374 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | |
5375 | void *unused) | |
5376 | { | |
5377 | switch (state) { | |
5378 | case PM_HIBERNATION_PREPARE: | |
5379 | pause_graph_tracing(); | |
5380 | break; | |
5381 | ||
5382 | case PM_POST_HIBERNATION: | |
5383 | unpause_graph_tracing(); | |
5384 | break; | |
5385 | } | |
5386 | return NOTIFY_DONE; | |
5387 | } | |
5388 | ||
5389 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | |
5390 | { | |
5391 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | |
5392 | return 0; | |
5393 | return __ftrace_graph_entry(trace); | |
5394 | } | |
5395 | ||
5396 | /* | |
5397 | * The function graph tracer should only trace the functions defined | |
5398 | * by set_ftrace_filter and set_ftrace_notrace. If another function | |
5399 | * tracer ops is registered, the graph tracer requires testing the | |
5400 | * function against the global ops, and not just trace any function | |
5401 | * that any ftrace_ops registered. | |
5402 | */ | |
5403 | static void update_function_graph_func(void) | |
5404 | { | |
5405 | struct ftrace_ops *op; | |
5406 | bool do_test = false; | |
5407 | ||
5408 | /* | |
5409 | * The graph and global ops share the same set of functions | |
5410 | * to test. If any other ops is on the list, then | |
5411 | * the graph tracing needs to test if its the function | |
5412 | * it should call. | |
5413 | */ | |
5414 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
5415 | if (op != &global_ops && op != &graph_ops && | |
5416 | op != &ftrace_list_end) { | |
5417 | do_test = true; | |
5418 | /* in double loop, break out with goto */ | |
5419 | goto out; | |
5420 | } | |
5421 | } while_for_each_ftrace_op(op); | |
5422 | out: | |
5423 | if (do_test) | |
5424 | ftrace_graph_entry = ftrace_graph_entry_test; | |
5425 | else | |
5426 | ftrace_graph_entry = __ftrace_graph_entry; | |
5427 | } | |
5428 | ||
5429 | static struct notifier_block ftrace_suspend_notifier = { | |
5430 | .notifier_call = ftrace_suspend_notifier_call, | |
5431 | }; | |
5432 | ||
5433 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |
5434 | trace_func_graph_ent_t entryfunc) | |
5435 | { | |
5436 | int ret = 0; | |
5437 | ||
5438 | mutex_lock(&ftrace_lock); | |
5439 | ||
5440 | /* we currently allow only one tracer registered at a time */ | |
5441 | if (ftrace_graph_active) { | |
5442 | ret = -EBUSY; | |
5443 | goto out; | |
5444 | } | |
5445 | ||
5446 | register_pm_notifier(&ftrace_suspend_notifier); | |
5447 | ||
5448 | ftrace_graph_active++; | |
5449 | ret = start_graph_tracing(); | |
5450 | if (ret) { | |
5451 | ftrace_graph_active--; | |
5452 | goto out; | |
5453 | } | |
5454 | ||
5455 | ftrace_graph_return = retfunc; | |
5456 | ||
5457 | /* | |
5458 | * Update the indirect function to the entryfunc, and the | |
5459 | * function that gets called to the entry_test first. Then | |
5460 | * call the update fgraph entry function to determine if | |
5461 | * the entryfunc should be called directly or not. | |
5462 | */ | |
5463 | __ftrace_graph_entry = entryfunc; | |
5464 | ftrace_graph_entry = ftrace_graph_entry_test; | |
5465 | update_function_graph_func(); | |
5466 | ||
5467 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); | |
5468 | ||
5469 | out: | |
5470 | mutex_unlock(&ftrace_lock); | |
5471 | return ret; | |
5472 | } | |
5473 | ||
5474 | void unregister_ftrace_graph(void) | |
5475 | { | |
5476 | mutex_lock(&ftrace_lock); | |
5477 | ||
5478 | if (unlikely(!ftrace_graph_active)) | |
5479 | goto out; | |
5480 | ||
5481 | ftrace_graph_active--; | |
5482 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | |
5483 | ftrace_graph_entry = ftrace_graph_entry_stub; | |
5484 | __ftrace_graph_entry = ftrace_graph_entry_stub; | |
5485 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); | |
5486 | unregister_pm_notifier(&ftrace_suspend_notifier); | |
5487 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | |
5488 | ||
5489 | out: | |
5490 | mutex_unlock(&ftrace_lock); | |
5491 | } | |
5492 | ||
5493 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); | |
5494 | ||
5495 | static void | |
5496 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | |
5497 | { | |
5498 | atomic_set(&t->tracing_graph_pause, 0); | |
5499 | atomic_set(&t->trace_overrun, 0); | |
5500 | t->ftrace_timestamp = 0; | |
5501 | /* make curr_ret_stack visible before we add the ret_stack */ | |
5502 | smp_wmb(); | |
5503 | t->ret_stack = ret_stack; | |
5504 | } | |
5505 | ||
5506 | /* | |
5507 | * Allocate a return stack for the idle task. May be the first | |
5508 | * time through, or it may be done by CPU hotplug online. | |
5509 | */ | |
5510 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | |
5511 | { | |
5512 | t->curr_ret_stack = -1; | |
5513 | /* | |
5514 | * The idle task has no parent, it either has its own | |
5515 | * stack or no stack at all. | |
5516 | */ | |
5517 | if (t->ret_stack) | |
5518 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | |
5519 | ||
5520 | if (ftrace_graph_active) { | |
5521 | struct ftrace_ret_stack *ret_stack; | |
5522 | ||
5523 | ret_stack = per_cpu(idle_ret_stack, cpu); | |
5524 | if (!ret_stack) { | |
5525 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | |
5526 | * sizeof(struct ftrace_ret_stack), | |
5527 | GFP_KERNEL); | |
5528 | if (!ret_stack) | |
5529 | return; | |
5530 | per_cpu(idle_ret_stack, cpu) = ret_stack; | |
5531 | } | |
5532 | graph_init_task(t, ret_stack); | |
5533 | } | |
5534 | } | |
5535 | ||
5536 | /* Allocate a return stack for newly created task */ | |
5537 | void ftrace_graph_init_task(struct task_struct *t) | |
5538 | { | |
5539 | /* Make sure we do not use the parent ret_stack */ | |
5540 | t->ret_stack = NULL; | |
5541 | t->curr_ret_stack = -1; | |
5542 | ||
5543 | if (ftrace_graph_active) { | |
5544 | struct ftrace_ret_stack *ret_stack; | |
5545 | ||
5546 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | |
5547 | * sizeof(struct ftrace_ret_stack), | |
5548 | GFP_KERNEL); | |
5549 | if (!ret_stack) | |
5550 | return; | |
5551 | graph_init_task(t, ret_stack); | |
5552 | } | |
5553 | } | |
5554 | ||
5555 | void ftrace_graph_exit_task(struct task_struct *t) | |
5556 | { | |
5557 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | |
5558 | ||
5559 | t->ret_stack = NULL; | |
5560 | /* NULL must become visible to IRQs before we free it: */ | |
5561 | barrier(); | |
5562 | ||
5563 | kfree(ret_stack); | |
5564 | } | |
5565 | #endif |