]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - kernel/trace/ftrace.c
ftrace: Pass retval through return in ftrace_dyn_arch_init()
[mirror_ubuntu-focal-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 13 * Copyright (C) 2004 Nadia Yvette Chambers
16444a8a
ACM
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
5855fead 25#include <linux/bsearch.h>
56d82e00 26#include <linux/module.h>
2d8b820b 27#include <linux/ftrace.h>
b0fc494f 28#include <linux/sysctl.h>
5a0e3ad6 29#include <linux/slab.h>
5072c59f 30#include <linux/ctype.h>
68950619 31#include <linux/sort.h>
3d083395 32#include <linux/list.h>
59df055f 33#include <linux/hash.h>
3f379b03 34#include <linux/rcupdate.h>
3d083395 35
ad8d75ff 36#include <trace/events/sched.h>
8aef2d28 37
2af15d6a 38#include <asm/setup.h>
395a59d0 39
0706f1c4 40#include "trace_output.h"
bac429f0 41#include "trace_stat.h"
16444a8a 42
6912896e 43#define FTRACE_WARN_ON(cond) \
0778d9ad
SR
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
6912896e 47 ftrace_kill(); \
0778d9ad
SR
48 ___r; \
49 })
6912896e
SR
50
51#define FTRACE_WARN_ON_ONCE(cond) \
0778d9ad
SR
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
6912896e 55 ftrace_kill(); \
0778d9ad
SR
56 ___r; \
57 })
6912896e 58
8fc0c701
SR
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
33dc9b12
SR
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
8fc0c701 64
e248491a
JO
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
f04f24fb
MH
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70#else
71#define INIT_REGEX_LOCK(opsname)
72#endif
73
2f5f6ad9
SR
74static struct ftrace_ops ftrace_list_end __read_mostly = {
75 .func = ftrace_stub,
395b97a3 76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
2f5f6ad9
SR
77};
78
4eebcc81
SR
79/* ftrace_enabled is a method to turn ftrace on or off */
80int ftrace_enabled __read_mostly;
d61f82d0 81static int last_ftrace_enabled;
b0fc494f 82
60a7ecf4 83/* Quick disabling of function tracer. */
2f5f6ad9
SR
84int function_trace_stop __read_mostly;
85
86/* Current function tracing op */
87struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
405e1d83
SRRH
88/* What to set function_trace_op to */
89static struct ftrace_ops *set_function_trace_op;
60a7ecf4 90
756d17ee 91/* List for set_ftrace_pid's pids. */
92LIST_HEAD(ftrace_pids);
93struct ftrace_pid {
94 struct list_head list;
95 struct pid *pid;
96};
97
4eebcc81
SR
98/*
99 * ftrace_disabled is set when an anomaly is discovered.
100 * ftrace_disabled is much stronger than ftrace_enabled.
101 */
102static int ftrace_disabled __read_mostly;
103
52baf119 104static DEFINE_MUTEX(ftrace_lock);
b0fc494f 105
b848914c 106static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
e248491a 107static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
b848914c 108static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
16444a8a 109ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 110ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
2b499381 111static struct ftrace_ops global_ops;
e248491a 112static struct ftrace_ops control_ops;
16444a8a 113
2f5f6ad9
SR
114#if ARCH_SUPPORTS_FTRACE_OPS
115static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 116 struct ftrace_ops *op, struct pt_regs *regs);
2f5f6ad9
SR
117#else
118/* See comment below, where ftrace_ops_list_func is defined */
119static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
120#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
121#endif
b848914c 122
0a016409
SR
123/*
124 * Traverse the ftrace_global_list, invoking all entries. The reason that we
1bb539ca 125 * can use rcu_dereference_raw_notrace() is that elements removed from this list
0a016409 126 * are simply leaked, so there is no need to interact with a grace-period
1bb539ca 127 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
0a016409
SR
128 * concurrent insertions into the ftrace_global_list.
129 *
130 * Silly Alpha and silly pointer-speculation compiler optimizations!
131 */
132#define do_for_each_ftrace_op(op, list) \
1bb539ca 133 op = rcu_dereference_raw_notrace(list); \
0a016409
SR
134 do
135
136/*
137 * Optimized for just a single item in the list (as that is the normal case).
138 */
139#define while_for_each_ftrace_op(op) \
1bb539ca 140 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
0a016409
SR
141 unlikely((op) != &ftrace_list_end))
142
f04f24fb
MH
143static inline void ftrace_ops_init(struct ftrace_ops *ops)
144{
145#ifdef CONFIG_DYNAMIC_FTRACE
146 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
147 mutex_init(&ops->regex_lock);
148 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
149 }
150#endif
151}
152
ea701f11
SR
153/**
154 * ftrace_nr_registered_ops - return number of ops registered
155 *
156 * Returns the number of ftrace_ops registered and tracing functions
157 */
158int ftrace_nr_registered_ops(void)
159{
160 struct ftrace_ops *ops;
161 int cnt = 0;
162
163 mutex_lock(&ftrace_lock);
164
165 for (ops = ftrace_ops_list;
166 ops != &ftrace_list_end; ops = ops->next)
167 cnt++;
168
169 mutex_unlock(&ftrace_lock);
170
171 return cnt;
172}
173
2f5f6ad9
SR
174static void
175ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 176 struct ftrace_ops *op, struct pt_regs *regs)
16444a8a 177{
c29f122c
SR
178 int bit;
179
edc15caf
SR
180 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
181 if (bit < 0)
b1cff0ad 182 return;
16444a8a 183
0a016409 184 do_for_each_ftrace_op(op, ftrace_global_list) {
a1e2e31d 185 op->func(ip, parent_ip, op, regs);
0a016409 186 } while_for_each_ftrace_op(op);
edc15caf
SR
187
188 trace_clear_recursion(bit);
16444a8a
ACM
189}
190
2f5f6ad9 191static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 192 struct ftrace_ops *op, struct pt_regs *regs)
df4fc315 193{
0ef8cde5 194 if (!test_tsk_trace_trace(current))
df4fc315
SR
195 return;
196
a1e2e31d 197 ftrace_pid_function(ip, parent_ip, op, regs);
df4fc315
SR
198}
199
200static void set_ftrace_pid_function(ftrace_func_t func)
201{
202 /* do not set ftrace_pid_function to itself! */
203 if (func != ftrace_pid_func)
204 ftrace_pid_function = func;
205}
206
16444a8a 207/**
3d083395 208 * clear_ftrace_function - reset the ftrace function
16444a8a 209 *
3d083395
SR
210 * This NULLs the ftrace function and in essence stops
211 * tracing. There may be lag
16444a8a 212 */
3d083395 213void clear_ftrace_function(void)
16444a8a 214{
3d083395 215 ftrace_trace_function = ftrace_stub;
df4fc315 216 ftrace_pid_function = ftrace_stub;
3d083395
SR
217}
218
e248491a
JO
219static void control_ops_disable_all(struct ftrace_ops *ops)
220{
221 int cpu;
222
223 for_each_possible_cpu(cpu)
224 *per_cpu_ptr(ops->disabled, cpu) = 1;
225}
226
227static int control_ops_alloc(struct ftrace_ops *ops)
228{
229 int __percpu *disabled;
230
231 disabled = alloc_percpu(int);
232 if (!disabled)
233 return -ENOMEM;
234
235 ops->disabled = disabled;
236 control_ops_disable_all(ops);
237 return 0;
238}
239
240static void control_ops_free(struct ftrace_ops *ops)
241{
242 free_percpu(ops->disabled);
243}
244
2b499381 245static void update_global_ops(void)
491d0dcf 246{
e6435e96
SRRH
247 ftrace_func_t func = ftrace_global_list_func;
248 void *private = NULL;
249
250 /* The list has its own recursion protection. */
251 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
491d0dcf
SR
252
253 /*
254 * If there's only one function registered, then call that
255 * function directly. Otherwise, we need to iterate over the
256 * registered callers.
257 */
b848914c 258 if (ftrace_global_list == &ftrace_list_end ||
63503794 259 ftrace_global_list->next == &ftrace_list_end) {
b848914c 260 func = ftrace_global_list->func;
e6435e96 261 private = ftrace_global_list->private;
63503794
SR
262 /*
263 * As we are calling the function directly.
264 * If it does not have recursion protection,
265 * the function_trace_op needs to be updated
266 * accordingly.
267 */
e6435e96 268 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
63503794 269 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
63503794
SR
270 }
271
491d0dcf
SR
272 /* If we filter on pids, update to use the pid function */
273 if (!list_empty(&ftrace_pids)) {
274 set_ftrace_pid_function(func);
275 func = ftrace_pid_func;
276 }
2b499381
SR
277
278 global_ops.func = func;
e6435e96 279 global_ops.private = private;
2b499381
SR
280}
281
405e1d83
SRRH
282static void ftrace_sync(struct work_struct *work)
283{
284 /*
285 * This function is just a stub to implement a hard force
286 * of synchronize_sched(). This requires synchronizing
287 * tasks even in userspace and idle.
288 *
289 * Yes, function tracing is rude.
290 */
291}
292
293static void ftrace_sync_ipi(void *data)
294{
295 /* Probably not needed, but do it anyway */
296 smp_rmb();
297}
298
23a8e844
SRRH
299#ifdef CONFIG_FUNCTION_GRAPH_TRACER
300static void update_function_graph_func(void);
301#else
302static inline void update_function_graph_func(void) { }
303#endif
304
2b499381
SR
305static void update_ftrace_function(void)
306{
307 ftrace_func_t func;
308
309 update_global_ops();
310
cdbe61bf
SR
311 /*
312 * If we are at the end of the list and this ops is
4740974a
SR
313 * recursion safe and not dynamic and the arch supports passing ops,
314 * then have the mcount trampoline call the function directly.
cdbe61bf 315 */
b848914c 316 if (ftrace_ops_list == &ftrace_list_end ||
cdbe61bf 317 (ftrace_ops_list->next == &ftrace_list_end &&
2f5f6ad9 318 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
4740974a 319 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
ccf3672d 320 !FTRACE_FORCE_LIST_FUNC)) {
2f5f6ad9
SR
321 /* Set the ftrace_ops that the arch callback uses */
322 if (ftrace_ops_list == &global_ops)
405e1d83 323 set_function_trace_op = ftrace_global_list;
2f5f6ad9 324 else
405e1d83 325 set_function_trace_op = ftrace_ops_list;
b848914c 326 func = ftrace_ops_list->func;
2f5f6ad9
SR
327 } else {
328 /* Just use the default ftrace_ops */
405e1d83 329 set_function_trace_op = &ftrace_list_end;
b848914c 330 func = ftrace_ops_list_func;
2f5f6ad9 331 }
2b499381 332
405e1d83
SRRH
333 /* If there's no change, then do nothing more here */
334 if (ftrace_trace_function == func)
335 return;
336
23a8e844
SRRH
337 update_function_graph_func();
338
405e1d83
SRRH
339 /*
340 * If we are using the list function, it doesn't care
341 * about the function_trace_ops.
342 */
343 if (func == ftrace_ops_list_func) {
344 ftrace_trace_function = func;
345 /*
346 * Don't even bother setting function_trace_ops,
347 * it would be racy to do so anyway.
348 */
349 return;
350 }
351
352#ifndef CONFIG_DYNAMIC_FTRACE
353 /*
354 * For static tracing, we need to be a bit more careful.
355 * The function change takes affect immediately. Thus,
356 * we need to coorditate the setting of the function_trace_ops
357 * with the setting of the ftrace_trace_function.
358 *
359 * Set the function to the list ops, which will call the
360 * function we want, albeit indirectly, but it handles the
361 * ftrace_ops and doesn't depend on function_trace_op.
362 */
363 ftrace_trace_function = ftrace_ops_list_func;
364 /*
365 * Make sure all CPUs see this. Yes this is slow, but static
366 * tracing is slow and nasty to have enabled.
367 */
368 schedule_on_each_cpu(ftrace_sync);
369 /* Now all cpus are using the list ops. */
370 function_trace_op = set_function_trace_op;
371 /* Make sure the function_trace_op is visible on all CPUs */
372 smp_wmb();
373 /* Nasty way to force a rmb on all cpus */
374 smp_call_function(ftrace_sync_ipi, NULL, 1);
375 /* OK, we are all set to update the ftrace_trace_function now! */
376#endif /* !CONFIG_DYNAMIC_FTRACE */
377
491d0dcf 378 ftrace_trace_function = func;
491d0dcf
SR
379}
380
2b499381 381static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
3d083395 382{
2b499381 383 ops->next = *list;
16444a8a 384 /*
b848914c 385 * We are entering ops into the list but another
16444a8a
ACM
386 * CPU might be walking that list. We need to make sure
387 * the ops->next pointer is valid before another CPU sees
b848914c 388 * the ops pointer included into the list.
16444a8a 389 */
2b499381 390 rcu_assign_pointer(*list, ops);
16444a8a
ACM
391}
392
2b499381 393static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
16444a8a 394{
16444a8a 395 struct ftrace_ops **p;
16444a8a
ACM
396
397 /*
3d083395
SR
398 * If we are removing the last function, then simply point
399 * to the ftrace_stub.
16444a8a 400 */
2b499381
SR
401 if (*list == ops && ops->next == &ftrace_list_end) {
402 *list = &ftrace_list_end;
e6ea44e9 403 return 0;
16444a8a
ACM
404 }
405
2b499381 406 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
16444a8a
ACM
407 if (*p == ops)
408 break;
409
e6ea44e9
SR
410 if (*p != ops)
411 return -1;
16444a8a
ACM
412
413 *p = (*p)->next;
2b499381
SR
414 return 0;
415}
16444a8a 416
e248491a
JO
417static void add_ftrace_list_ops(struct ftrace_ops **list,
418 struct ftrace_ops *main_ops,
419 struct ftrace_ops *ops)
420{
421 int first = *list == &ftrace_list_end;
422 add_ftrace_ops(list, ops);
423 if (first)
424 add_ftrace_ops(&ftrace_ops_list, main_ops);
425}
426
427static int remove_ftrace_list_ops(struct ftrace_ops **list,
428 struct ftrace_ops *main_ops,
429 struct ftrace_ops *ops)
430{
431 int ret = remove_ftrace_ops(list, ops);
432 if (!ret && *list == &ftrace_list_end)
433 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
434 return ret;
435}
436
2b499381
SR
437static int __register_ftrace_function(struct ftrace_ops *ops)
438{
591dffda
SRRH
439 if (ops->flags & FTRACE_OPS_FL_DELETED)
440 return -EINVAL;
441
2b499381
SR
442 if (FTRACE_WARN_ON(ops == &global_ops))
443 return -EINVAL;
444
b848914c
SR
445 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
446 return -EBUSY;
447
e248491a
JO
448 /* We don't support both control and global flags set. */
449 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
450 return -EINVAL;
451
06aeaaea 452#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
453 /*
454 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
455 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
456 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
457 */
458 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
459 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
460 return -EINVAL;
461
462 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
463 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
464#endif
465
cdbe61bf
SR
466 if (!core_kernel_data((unsigned long)ops))
467 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
468
b848914c 469 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
e248491a 470 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
b848914c 471 ops->flags |= FTRACE_OPS_FL_ENABLED;
e248491a
JO
472 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
473 if (control_ops_alloc(ops))
474 return -ENOMEM;
475 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
b848914c
SR
476 } else
477 add_ftrace_ops(&ftrace_ops_list, ops);
478
2b499381
SR
479 if (ftrace_enabled)
480 update_ftrace_function();
481
482 return 0;
483}
484
485static int __unregister_ftrace_function(struct ftrace_ops *ops)
486{
487 int ret;
488
b848914c
SR
489 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
490 return -EBUSY;
491
2b499381
SR
492 if (FTRACE_WARN_ON(ops == &global_ops))
493 return -EINVAL;
494
b848914c 495 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
e248491a
JO
496 ret = remove_ftrace_list_ops(&ftrace_global_list,
497 &global_ops, ops);
b848914c
SR
498 if (!ret)
499 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
e248491a
JO
500 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
501 ret = remove_ftrace_list_ops(&ftrace_control_list,
502 &control_ops, ops);
b848914c
SR
503 } else
504 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
505
2b499381
SR
506 if (ret < 0)
507 return ret;
b848914c 508
491d0dcf
SR
509 if (ftrace_enabled)
510 update_ftrace_function();
16444a8a 511
e6ea44e9 512 return 0;
3d083395
SR
513}
514
df4fc315
SR
515static void ftrace_update_pid_func(void)
516{
491d0dcf 517 /* Only do something if we are tracing something */
df4fc315 518 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 519 return;
df4fc315 520
491d0dcf 521 update_ftrace_function();
df4fc315
SR
522}
523
493762fc
SR
524#ifdef CONFIG_FUNCTION_PROFILER
525struct ftrace_profile {
526 struct hlist_node node;
527 unsigned long ip;
528 unsigned long counter;
0706f1c4
SR
529#ifdef CONFIG_FUNCTION_GRAPH_TRACER
530 unsigned long long time;
e330b3bc 531 unsigned long long time_squared;
0706f1c4 532#endif
8fc0c701
SR
533};
534
493762fc
SR
535struct ftrace_profile_page {
536 struct ftrace_profile_page *next;
537 unsigned long index;
538 struct ftrace_profile records[];
d61f82d0
SR
539};
540
cafb168a
SR
541struct ftrace_profile_stat {
542 atomic_t disabled;
543 struct hlist_head *hash;
544 struct ftrace_profile_page *pages;
545 struct ftrace_profile_page *start;
546 struct tracer_stat stat;
547};
548
493762fc
SR
549#define PROFILE_RECORDS_SIZE \
550 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 551
493762fc
SR
552#define PROFILES_PER_PAGE \
553 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 554
fb9fb015
SR
555static int ftrace_profile_enabled __read_mostly;
556
557/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
558static DEFINE_MUTEX(ftrace_profile_lock);
559
cafb168a 560static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc 561
20079ebe
NK
562#define FTRACE_PROFILE_HASH_BITS 10
563#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
493762fc 564
bac429f0
SR
565static void *
566function_stat_next(void *v, int idx)
567{
493762fc
SR
568 struct ftrace_profile *rec = v;
569 struct ftrace_profile_page *pg;
bac429f0 570
493762fc 571 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
572
573 again:
0296e425
LZ
574 if (idx != 0)
575 rec++;
576
bac429f0
SR
577 if ((void *)rec >= (void *)&pg->records[pg->index]) {
578 pg = pg->next;
579 if (!pg)
580 return NULL;
581 rec = &pg->records[0];
493762fc
SR
582 if (!rec->counter)
583 goto again;
bac429f0
SR
584 }
585
bac429f0
SR
586 return rec;
587}
588
589static void *function_stat_start(struct tracer_stat *trace)
590{
cafb168a
SR
591 struct ftrace_profile_stat *stat =
592 container_of(trace, struct ftrace_profile_stat, stat);
593
594 if (!stat || !stat->start)
595 return NULL;
596
597 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
598}
599
0706f1c4
SR
600#ifdef CONFIG_FUNCTION_GRAPH_TRACER
601/* function graph compares on total time */
602static int function_stat_cmp(void *p1, void *p2)
603{
604 struct ftrace_profile *a = p1;
605 struct ftrace_profile *b = p2;
606
607 if (a->time < b->time)
608 return -1;
609 if (a->time > b->time)
610 return 1;
611 else
612 return 0;
613}
614#else
615/* not function graph compares against hits */
bac429f0
SR
616static int function_stat_cmp(void *p1, void *p2)
617{
493762fc
SR
618 struct ftrace_profile *a = p1;
619 struct ftrace_profile *b = p2;
bac429f0
SR
620
621 if (a->counter < b->counter)
622 return -1;
623 if (a->counter > b->counter)
624 return 1;
625 else
626 return 0;
627}
0706f1c4 628#endif
bac429f0
SR
629
630static int function_stat_headers(struct seq_file *m)
631{
0706f1c4 632#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b 633 seq_printf(m, " Function "
e330b3bc 634 "Hit Time Avg s^2\n"
34886c8b 635 " -------- "
e330b3bc 636 "--- ---- --- ---\n");
0706f1c4 637#else
bac429f0
SR
638 seq_printf(m, " Function Hit\n"
639 " -------- ---\n");
0706f1c4 640#endif
bac429f0
SR
641 return 0;
642}
643
644static int function_stat_show(struct seq_file *m, void *v)
645{
493762fc 646 struct ftrace_profile *rec = v;
bac429f0 647 char str[KSYM_SYMBOL_LEN];
3aaba20f 648 int ret = 0;
0706f1c4 649#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
650 static struct trace_seq s;
651 unsigned long long avg;
e330b3bc 652 unsigned long long stddev;
0706f1c4 653#endif
3aaba20f
LZ
654 mutex_lock(&ftrace_profile_lock);
655
656 /* we raced with function_profile_reset() */
657 if (unlikely(rec->counter == 0)) {
658 ret = -EBUSY;
659 goto out;
660 }
bac429f0
SR
661
662 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
663 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
664
665#ifdef CONFIG_FUNCTION_GRAPH_TRACER
666 seq_printf(m, " ");
34886c8b
SR
667 avg = rec->time;
668 do_div(avg, rec->counter);
669
e330b3bc
CD
670 /* Sample standard deviation (s^2) */
671 if (rec->counter <= 1)
672 stddev = 0;
673 else {
52d85d76
JL
674 /*
675 * Apply Welford's method:
676 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
677 */
678 stddev = rec->counter * rec->time_squared -
679 rec->time * rec->time;
680
e330b3bc
CD
681 /*
682 * Divide only 1000 for ns^2 -> us^2 conversion.
683 * trace_print_graph_duration will divide 1000 again.
684 */
52d85d76 685 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
e330b3bc
CD
686 }
687
34886c8b
SR
688 trace_seq_init(&s);
689 trace_print_graph_duration(rec->time, &s);
690 trace_seq_puts(&s, " ");
691 trace_print_graph_duration(avg, &s);
e330b3bc
CD
692 trace_seq_puts(&s, " ");
693 trace_print_graph_duration(stddev, &s);
0706f1c4 694 trace_print_seq(m, &s);
0706f1c4
SR
695#endif
696 seq_putc(m, '\n');
3aaba20f
LZ
697out:
698 mutex_unlock(&ftrace_profile_lock);
bac429f0 699
3aaba20f 700 return ret;
bac429f0
SR
701}
702
cafb168a 703static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 704{
493762fc 705 struct ftrace_profile_page *pg;
bac429f0 706
cafb168a 707 pg = stat->pages = stat->start;
bac429f0 708
493762fc
SR
709 while (pg) {
710 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
711 pg->index = 0;
712 pg = pg->next;
bac429f0
SR
713 }
714
cafb168a 715 memset(stat->hash, 0,
493762fc
SR
716 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
717}
bac429f0 718
cafb168a 719int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
720{
721 struct ftrace_profile_page *pg;
318e0a73
SR
722 int functions;
723 int pages;
493762fc 724 int i;
bac429f0 725
493762fc 726 /* If we already allocated, do nothing */
cafb168a 727 if (stat->pages)
493762fc 728 return 0;
bac429f0 729
cafb168a
SR
730 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
731 if (!stat->pages)
493762fc 732 return -ENOMEM;
bac429f0 733
318e0a73
SR
734#ifdef CONFIG_DYNAMIC_FTRACE
735 functions = ftrace_update_tot_cnt;
736#else
737 /*
738 * We do not know the number of functions that exist because
739 * dynamic tracing is what counts them. With past experience
740 * we have around 20K functions. That should be more than enough.
741 * It is highly unlikely we will execute every function in
742 * the kernel.
743 */
744 functions = 20000;
745#endif
746
cafb168a 747 pg = stat->start = stat->pages;
bac429f0 748
318e0a73
SR
749 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
750
39e30cd1 751 for (i = 1; i < pages; i++) {
493762fc 752 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 753 if (!pg->next)
318e0a73 754 goto out_free;
493762fc
SR
755 pg = pg->next;
756 }
757
758 return 0;
318e0a73
SR
759
760 out_free:
761 pg = stat->start;
762 while (pg) {
763 unsigned long tmp = (unsigned long)pg;
764
765 pg = pg->next;
766 free_page(tmp);
767 }
768
318e0a73
SR
769 stat->pages = NULL;
770 stat->start = NULL;
771
772 return -ENOMEM;
bac429f0
SR
773}
774
cafb168a 775static int ftrace_profile_init_cpu(int cpu)
bac429f0 776{
cafb168a 777 struct ftrace_profile_stat *stat;
493762fc 778 int size;
bac429f0 779
cafb168a
SR
780 stat = &per_cpu(ftrace_profile_stats, cpu);
781
782 if (stat->hash) {
493762fc 783 /* If the profile is already created, simply reset it */
cafb168a 784 ftrace_profile_reset(stat);
493762fc
SR
785 return 0;
786 }
bac429f0 787
493762fc
SR
788 /*
789 * We are profiling all functions, but usually only a few thousand
790 * functions are hit. We'll make a hash of 1024 items.
791 */
792 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 793
cafb168a 794 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 795
cafb168a 796 if (!stat->hash)
493762fc
SR
797 return -ENOMEM;
798
318e0a73 799 /* Preallocate the function profiling pages */
cafb168a
SR
800 if (ftrace_profile_pages_init(stat) < 0) {
801 kfree(stat->hash);
802 stat->hash = NULL;
493762fc
SR
803 return -ENOMEM;
804 }
805
806 return 0;
bac429f0
SR
807}
808
cafb168a
SR
809static int ftrace_profile_init(void)
810{
811 int cpu;
812 int ret = 0;
813
c4602c1c 814 for_each_possible_cpu(cpu) {
cafb168a
SR
815 ret = ftrace_profile_init_cpu(cpu);
816 if (ret)
817 break;
818 }
819
820 return ret;
821}
822
493762fc 823/* interrupts must be disabled */
cafb168a
SR
824static struct ftrace_profile *
825ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 826{
493762fc 827 struct ftrace_profile *rec;
bac429f0 828 struct hlist_head *hhd;
bac429f0
SR
829 unsigned long key;
830
20079ebe 831 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 832 hhd = &stat->hash[key];
bac429f0
SR
833
834 if (hlist_empty(hhd))
835 return NULL;
836
1bb539ca 837 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
bac429f0 838 if (rec->ip == ip)
493762fc
SR
839 return rec;
840 }
841
842 return NULL;
843}
844
cafb168a
SR
845static void ftrace_add_profile(struct ftrace_profile_stat *stat,
846 struct ftrace_profile *rec)
493762fc
SR
847{
848 unsigned long key;
849
20079ebe 850 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 851 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
852}
853
318e0a73
SR
854/*
855 * The memory is already allocated, this simply finds a new record to use.
856 */
493762fc 857static struct ftrace_profile *
318e0a73 858ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
859{
860 struct ftrace_profile *rec = NULL;
861
318e0a73 862 /* prevent recursion (from NMIs) */
cafb168a 863 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
864 goto out;
865
493762fc 866 /*
318e0a73
SR
867 * Try to find the function again since an NMI
868 * could have added it
493762fc 869 */
cafb168a 870 rec = ftrace_find_profiled_func(stat, ip);
493762fc 871 if (rec)
cafb168a 872 goto out;
493762fc 873
cafb168a
SR
874 if (stat->pages->index == PROFILES_PER_PAGE) {
875 if (!stat->pages->next)
876 goto out;
877 stat->pages = stat->pages->next;
bac429f0 878 }
493762fc 879
cafb168a 880 rec = &stat->pages->records[stat->pages->index++];
493762fc 881 rec->ip = ip;
cafb168a 882 ftrace_add_profile(stat, rec);
493762fc 883
bac429f0 884 out:
cafb168a 885 atomic_dec(&stat->disabled);
bac429f0
SR
886
887 return rec;
888}
889
890static void
2f5f6ad9 891function_profile_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 892 struct ftrace_ops *ops, struct pt_regs *regs)
bac429f0 893{
cafb168a 894 struct ftrace_profile_stat *stat;
493762fc 895 struct ftrace_profile *rec;
bac429f0
SR
896 unsigned long flags;
897
898 if (!ftrace_profile_enabled)
899 return;
900
901 local_irq_save(flags);
cafb168a
SR
902
903 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 904 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
905 goto out;
906
907 rec = ftrace_find_profiled_func(stat, ip);
493762fc 908 if (!rec) {
318e0a73 909 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
910 if (!rec)
911 goto out;
912 }
bac429f0
SR
913
914 rec->counter++;
915 out:
916 local_irq_restore(flags);
917}
918
0706f1c4
SR
919#ifdef CONFIG_FUNCTION_GRAPH_TRACER
920static int profile_graph_entry(struct ftrace_graph_ent *trace)
921{
a1e2e31d 922 function_profile_call(trace->func, 0, NULL, NULL);
0706f1c4
SR
923 return 1;
924}
925
926static void profile_graph_return(struct ftrace_graph_ret *trace)
927{
cafb168a 928 struct ftrace_profile_stat *stat;
a2a16d6a 929 unsigned long long calltime;
0706f1c4 930 struct ftrace_profile *rec;
cafb168a 931 unsigned long flags;
0706f1c4
SR
932
933 local_irq_save(flags);
cafb168a 934 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 935 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
936 goto out;
937
37e44bc5
SR
938 /* If the calltime was zero'd ignore it */
939 if (!trace->calltime)
940 goto out;
941
a2a16d6a
SR
942 calltime = trace->rettime - trace->calltime;
943
944 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
945 int index;
946
947 index = trace->depth;
948
949 /* Append this call time to the parent time to subtract */
950 if (index)
951 current->ret_stack[index - 1].subtime += calltime;
952
953 if (current->ret_stack[index].subtime < calltime)
954 calltime -= current->ret_stack[index].subtime;
955 else
956 calltime = 0;
957 }
958
cafb168a 959 rec = ftrace_find_profiled_func(stat, trace->func);
e330b3bc 960 if (rec) {
a2a16d6a 961 rec->time += calltime;
e330b3bc
CD
962 rec->time_squared += calltime * calltime;
963 }
a2a16d6a 964
cafb168a 965 out:
0706f1c4
SR
966 local_irq_restore(flags);
967}
968
969static int register_ftrace_profiler(void)
970{
971 return register_ftrace_graph(&profile_graph_return,
972 &profile_graph_entry);
973}
974
975static void unregister_ftrace_profiler(void)
976{
977 unregister_ftrace_graph();
978}
979#else
bd38c0e6 980static struct ftrace_ops ftrace_profile_ops __read_mostly = {
fb9fb015 981 .func = function_profile_call,
f04f24fb
MH
982 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
983 INIT_REGEX_LOCK(ftrace_profile_ops)
bac429f0
SR
984};
985
0706f1c4
SR
986static int register_ftrace_profiler(void)
987{
988 return register_ftrace_function(&ftrace_profile_ops);
989}
990
991static void unregister_ftrace_profiler(void)
992{
993 unregister_ftrace_function(&ftrace_profile_ops);
994}
995#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
996
bac429f0
SR
997static ssize_t
998ftrace_profile_write(struct file *filp, const char __user *ubuf,
999 size_t cnt, loff_t *ppos)
1000{
1001 unsigned long val;
bac429f0
SR
1002 int ret;
1003
22fe9b54
PH
1004 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1005 if (ret)
bac429f0
SR
1006 return ret;
1007
1008 val = !!val;
1009
1010 mutex_lock(&ftrace_profile_lock);
1011 if (ftrace_profile_enabled ^ val) {
1012 if (val) {
493762fc
SR
1013 ret = ftrace_profile_init();
1014 if (ret < 0) {
1015 cnt = ret;
1016 goto out;
1017 }
1018
0706f1c4
SR
1019 ret = register_ftrace_profiler();
1020 if (ret < 0) {
1021 cnt = ret;
1022 goto out;
1023 }
bac429f0
SR
1024 ftrace_profile_enabled = 1;
1025 } else {
1026 ftrace_profile_enabled = 0;
0f6ce3de
SR
1027 /*
1028 * unregister_ftrace_profiler calls stop_machine
1029 * so this acts like an synchronize_sched.
1030 */
0706f1c4 1031 unregister_ftrace_profiler();
bac429f0
SR
1032 }
1033 }
493762fc 1034 out:
bac429f0
SR
1035 mutex_unlock(&ftrace_profile_lock);
1036
cf8517cf 1037 *ppos += cnt;
bac429f0
SR
1038
1039 return cnt;
1040}
1041
493762fc
SR
1042static ssize_t
1043ftrace_profile_read(struct file *filp, char __user *ubuf,
1044 size_t cnt, loff_t *ppos)
1045{
fb9fb015 1046 char buf[64]; /* big enough to hold a number */
493762fc
SR
1047 int r;
1048
1049 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1050 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1051}
1052
bac429f0
SR
1053static const struct file_operations ftrace_profile_fops = {
1054 .open = tracing_open_generic,
1055 .read = ftrace_profile_read,
1056 .write = ftrace_profile_write,
6038f373 1057 .llseek = default_llseek,
bac429f0
SR
1058};
1059
cafb168a
SR
1060/* used to initialize the real stat files */
1061static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
1062 .name = "functions",
1063 .stat_start = function_stat_start,
1064 .stat_next = function_stat_next,
1065 .stat_cmp = function_stat_cmp,
1066 .stat_headers = function_stat_headers,
1067 .stat_show = function_stat_show
cafb168a
SR
1068};
1069
6ab5d668 1070static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0 1071{
cafb168a 1072 struct ftrace_profile_stat *stat;
bac429f0 1073 struct dentry *entry;
cafb168a 1074 char *name;
bac429f0 1075 int ret;
cafb168a
SR
1076 int cpu;
1077
1078 for_each_possible_cpu(cpu) {
1079 stat = &per_cpu(ftrace_profile_stats, cpu);
1080
1081 /* allocate enough for function name + cpu number */
1082 name = kmalloc(32, GFP_KERNEL);
1083 if (!name) {
1084 /*
1085 * The files created are permanent, if something happens
1086 * we still do not free memory.
1087 */
cafb168a
SR
1088 WARN(1,
1089 "Could not allocate stat file for cpu %d\n",
1090 cpu);
1091 return;
1092 }
1093 stat->stat = function_stats;
1094 snprintf(name, 32, "function%d", cpu);
1095 stat->stat.name = name;
1096 ret = register_stat_tracer(&stat->stat);
1097 if (ret) {
1098 WARN(1,
1099 "Could not register function stat for cpu %d\n",
1100 cpu);
1101 kfree(name);
1102 return;
1103 }
bac429f0
SR
1104 }
1105
1106 entry = debugfs_create_file("function_profile_enabled", 0644,
1107 d_tracer, NULL, &ftrace_profile_fops);
1108 if (!entry)
1109 pr_warning("Could not create debugfs "
1110 "'function_profile_enabled' entry\n");
1111}
1112
bac429f0 1113#else /* CONFIG_FUNCTION_PROFILER */
6ab5d668 1114static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0
SR
1115{
1116}
bac429f0
SR
1117#endif /* CONFIG_FUNCTION_PROFILER */
1118
493762fc
SR
1119static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1120
1121#ifdef CONFIG_DYNAMIC_FTRACE
1122
1123#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1124# error Dynamic ftrace depends on MCOUNT_RECORD
1125#endif
1126
1127static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1128
1129struct ftrace_func_probe {
1130 struct hlist_node node;
1131 struct ftrace_probe_ops *ops;
1132 unsigned long flags;
1133 unsigned long ip;
1134 void *data;
7818b388 1135 struct list_head free_list;
493762fc
SR
1136};
1137
b448c4e3
SR
1138struct ftrace_func_entry {
1139 struct hlist_node hlist;
1140 unsigned long ip;
1141};
1142
1143struct ftrace_hash {
1144 unsigned long size_bits;
1145 struct hlist_head *buckets;
1146 unsigned long count;
07fd5515 1147 struct rcu_head rcu;
b448c4e3
SR
1148};
1149
33dc9b12
SR
1150/*
1151 * We make these constant because no one should touch them,
1152 * but they are used as the default "empty hash", to avoid allocating
1153 * it all the time. These are in a read only section such that if
1154 * anyone does try to modify it, it will cause an exception.
1155 */
1156static const struct hlist_head empty_buckets[1];
1157static const struct ftrace_hash empty_hash = {
1158 .buckets = (struct hlist_head *)empty_buckets,
1cf41dd7 1159};
33dc9b12 1160#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
493762fc 1161
2b499381 1162static struct ftrace_ops global_ops = {
f45948e8 1163 .func = ftrace_stub,
33dc9b12
SR
1164 .notrace_hash = EMPTY_HASH,
1165 .filter_hash = EMPTY_HASH,
f04f24fb
MH
1166 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1167 INIT_REGEX_LOCK(global_ops)
f45948e8
SR
1168};
1169
493762fc
SR
1170struct ftrace_page {
1171 struct ftrace_page *next;
a7900875 1172 struct dyn_ftrace *records;
493762fc 1173 int index;
a7900875 1174 int size;
493762fc
SR
1175};
1176
a7900875
SR
1177#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1178#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
493762fc
SR
1179
1180/* estimate from running different kernels */
1181#define NR_TO_INIT 10000
1182
1183static struct ftrace_page *ftrace_pages_start;
1184static struct ftrace_page *ftrace_pages;
1185
06a51d93
SR
1186static bool ftrace_hash_empty(struct ftrace_hash *hash)
1187{
1188 return !hash || !hash->count;
1189}
1190
b448c4e3
SR
1191static struct ftrace_func_entry *
1192ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1193{
1194 unsigned long key;
1195 struct ftrace_func_entry *entry;
1196 struct hlist_head *hhd;
b448c4e3 1197
06a51d93 1198 if (ftrace_hash_empty(hash))
b448c4e3
SR
1199 return NULL;
1200
1201 if (hash->size_bits > 0)
1202 key = hash_long(ip, hash->size_bits);
1203 else
1204 key = 0;
1205
1206 hhd = &hash->buckets[key];
1207
1bb539ca 1208 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
b448c4e3
SR
1209 if (entry->ip == ip)
1210 return entry;
1211 }
1212 return NULL;
1213}
1214
33dc9b12
SR
1215static void __add_hash_entry(struct ftrace_hash *hash,
1216 struct ftrace_func_entry *entry)
b448c4e3 1217{
b448c4e3
SR
1218 struct hlist_head *hhd;
1219 unsigned long key;
1220
b448c4e3 1221 if (hash->size_bits)
33dc9b12 1222 key = hash_long(entry->ip, hash->size_bits);
b448c4e3
SR
1223 else
1224 key = 0;
1225
b448c4e3
SR
1226 hhd = &hash->buckets[key];
1227 hlist_add_head(&entry->hlist, hhd);
1228 hash->count++;
33dc9b12
SR
1229}
1230
1231static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1232{
1233 struct ftrace_func_entry *entry;
1234
1235 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1236 if (!entry)
1237 return -ENOMEM;
1238
1239 entry->ip = ip;
1240 __add_hash_entry(hash, entry);
b448c4e3
SR
1241
1242 return 0;
1243}
1244
1245static void
33dc9b12 1246free_hash_entry(struct ftrace_hash *hash,
b448c4e3
SR
1247 struct ftrace_func_entry *entry)
1248{
1249 hlist_del(&entry->hlist);
1250 kfree(entry);
1251 hash->count--;
1252}
1253
33dc9b12
SR
1254static void
1255remove_hash_entry(struct ftrace_hash *hash,
1256 struct ftrace_func_entry *entry)
1257{
1258 hlist_del(&entry->hlist);
1259 hash->count--;
1260}
1261
b448c4e3
SR
1262static void ftrace_hash_clear(struct ftrace_hash *hash)
1263{
1264 struct hlist_head *hhd;
b67bfe0d 1265 struct hlist_node *tn;
b448c4e3
SR
1266 struct ftrace_func_entry *entry;
1267 int size = 1 << hash->size_bits;
1268 int i;
1269
33dc9b12
SR
1270 if (!hash->count)
1271 return;
1272
b448c4e3
SR
1273 for (i = 0; i < size; i++) {
1274 hhd = &hash->buckets[i];
b67bfe0d 1275 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
33dc9b12 1276 free_hash_entry(hash, entry);
b448c4e3
SR
1277 }
1278 FTRACE_WARN_ON(hash->count);
1279}
1280
33dc9b12
SR
1281static void free_ftrace_hash(struct ftrace_hash *hash)
1282{
1283 if (!hash || hash == EMPTY_HASH)
1284 return;
1285 ftrace_hash_clear(hash);
1286 kfree(hash->buckets);
1287 kfree(hash);
1288}
1289
07fd5515
SR
1290static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1291{
1292 struct ftrace_hash *hash;
1293
1294 hash = container_of(rcu, struct ftrace_hash, rcu);
1295 free_ftrace_hash(hash);
1296}
1297
1298static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1299{
1300 if (!hash || hash == EMPTY_HASH)
1301 return;
1302 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1303}
1304
5500fa51
JO
1305void ftrace_free_filter(struct ftrace_ops *ops)
1306{
f04f24fb 1307 ftrace_ops_init(ops);
5500fa51
JO
1308 free_ftrace_hash(ops->filter_hash);
1309 free_ftrace_hash(ops->notrace_hash);
1310}
1311
33dc9b12
SR
1312static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1313{
1314 struct ftrace_hash *hash;
1315 int size;
1316
1317 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1318 if (!hash)
1319 return NULL;
1320
1321 size = 1 << size_bits;
47b0edcb 1322 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
33dc9b12
SR
1323
1324 if (!hash->buckets) {
1325 kfree(hash);
1326 return NULL;
1327 }
1328
1329 hash->size_bits = size_bits;
1330
1331 return hash;
1332}
1333
1334static struct ftrace_hash *
1335alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1336{
1337 struct ftrace_func_entry *entry;
1338 struct ftrace_hash *new_hash;
33dc9b12
SR
1339 int size;
1340 int ret;
1341 int i;
1342
1343 new_hash = alloc_ftrace_hash(size_bits);
1344 if (!new_hash)
1345 return NULL;
1346
1347 /* Empty hash? */
06a51d93 1348 if (ftrace_hash_empty(hash))
33dc9b12
SR
1349 return new_hash;
1350
1351 size = 1 << hash->size_bits;
1352 for (i = 0; i < size; i++) {
b67bfe0d 1353 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
33dc9b12
SR
1354 ret = add_hash_entry(new_hash, entry->ip);
1355 if (ret < 0)
1356 goto free_hash;
1357 }
1358 }
1359
1360 FTRACE_WARN_ON(new_hash->count != hash->count);
1361
1362 return new_hash;
1363
1364 free_hash:
1365 free_ftrace_hash(new_hash);
1366 return NULL;
1367}
1368
41fb61c2
SR
1369static void
1370ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1371static void
1372ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1373
33dc9b12 1374static int
41fb61c2
SR
1375ftrace_hash_move(struct ftrace_ops *ops, int enable,
1376 struct ftrace_hash **dst, struct ftrace_hash *src)
33dc9b12
SR
1377{
1378 struct ftrace_func_entry *entry;
b67bfe0d 1379 struct hlist_node *tn;
33dc9b12 1380 struct hlist_head *hhd;
07fd5515
SR
1381 struct ftrace_hash *old_hash;
1382 struct ftrace_hash *new_hash;
33dc9b12
SR
1383 int size = src->count;
1384 int bits = 0;
41fb61c2 1385 int ret;
33dc9b12
SR
1386 int i;
1387
41fb61c2
SR
1388 /*
1389 * Remove the current set, update the hash and add
1390 * them back.
1391 */
1392 ftrace_hash_rec_disable(ops, enable);
1393
33dc9b12
SR
1394 /*
1395 * If the new source is empty, just free dst and assign it
1396 * the empty_hash.
1397 */
1398 if (!src->count) {
07fd5515
SR
1399 free_ftrace_hash_rcu(*dst);
1400 rcu_assign_pointer(*dst, EMPTY_HASH);
d4d34b98
SR
1401 /* still need to update the function records */
1402 ret = 0;
1403 goto out;
33dc9b12
SR
1404 }
1405
33dc9b12
SR
1406 /*
1407 * Make the hash size about 1/2 the # found
1408 */
1409 for (size /= 2; size; size >>= 1)
1410 bits++;
1411
1412 /* Don't allocate too much */
1413 if (bits > FTRACE_HASH_MAX_BITS)
1414 bits = FTRACE_HASH_MAX_BITS;
1415
41fb61c2 1416 ret = -ENOMEM;
07fd5515
SR
1417 new_hash = alloc_ftrace_hash(bits);
1418 if (!new_hash)
41fb61c2 1419 goto out;
33dc9b12
SR
1420
1421 size = 1 << src->size_bits;
1422 for (i = 0; i < size; i++) {
1423 hhd = &src->buckets[i];
b67bfe0d 1424 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
33dc9b12 1425 remove_hash_entry(src, entry);
07fd5515 1426 __add_hash_entry(new_hash, entry);
33dc9b12
SR
1427 }
1428 }
1429
07fd5515
SR
1430 old_hash = *dst;
1431 rcu_assign_pointer(*dst, new_hash);
1432 free_ftrace_hash_rcu(old_hash);
1433
41fb61c2
SR
1434 ret = 0;
1435 out:
1436 /*
1437 * Enable regardless of ret:
1438 * On success, we enable the new hash.
1439 * On failure, we re-enable the original hash.
1440 */
1441 ftrace_hash_rec_enable(ops, enable);
1442
1443 return ret;
33dc9b12
SR
1444}
1445
b848914c
SR
1446/*
1447 * Test the hashes for this ops to see if we want to call
1448 * the ops->func or not.
1449 *
1450 * It's a match if the ip is in the ops->filter_hash or
1451 * the filter_hash does not exist or is empty,
1452 * AND
1453 * the ip is not in the ops->notrace_hash.
cdbe61bf
SR
1454 *
1455 * This needs to be called with preemption disabled as
1456 * the hashes are freed with call_rcu_sched().
b848914c
SR
1457 */
1458static int
195a8afc 1459ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c
SR
1460{
1461 struct ftrace_hash *filter_hash;
1462 struct ftrace_hash *notrace_hash;
1463 int ret;
1464
195a8afc
SRRH
1465#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1466 /*
1467 * There's a small race when adding ops that the ftrace handler
1468 * that wants regs, may be called without them. We can not
1469 * allow that handler to be called if regs is NULL.
1470 */
1471 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1472 return 0;
1473#endif
1474
1bb539ca
SR
1475 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1476 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
b848914c 1477
06a51d93 1478 if ((ftrace_hash_empty(filter_hash) ||
b848914c 1479 ftrace_lookup_ip(filter_hash, ip)) &&
06a51d93 1480 (ftrace_hash_empty(notrace_hash) ||
b848914c
SR
1481 !ftrace_lookup_ip(notrace_hash, ip)))
1482 ret = 1;
1483 else
1484 ret = 0;
b848914c
SR
1485
1486 return ret;
1487}
1488
493762fc
SR
1489/*
1490 * This is a double for. Do not use 'break' to break out of the loop,
1491 * you must use a goto.
1492 */
1493#define do_for_each_ftrace_rec(pg, rec) \
1494 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1495 int _____i; \
1496 for (_____i = 0; _____i < pg->index; _____i++) { \
1497 rec = &pg->records[_____i];
1498
1499#define while_for_each_ftrace_rec() \
1500 } \
1501 }
1502
5855fead
SR
1503
1504static int ftrace_cmp_recs(const void *a, const void *b)
1505{
a650e02a
SR
1506 const struct dyn_ftrace *key = a;
1507 const struct dyn_ftrace *rec = b;
5855fead 1508
a650e02a 1509 if (key->flags < rec->ip)
5855fead 1510 return -1;
a650e02a
SR
1511 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1512 return 1;
5855fead
SR
1513 return 0;
1514}
1515
f0cf973a 1516static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
c88fd863
SR
1517{
1518 struct ftrace_page *pg;
1519 struct dyn_ftrace *rec;
5855fead 1520 struct dyn_ftrace key;
c88fd863 1521
a650e02a
SR
1522 key.ip = start;
1523 key.flags = end; /* overload flags, as it is unsigned long */
5855fead
SR
1524
1525 for (pg = ftrace_pages_start; pg; pg = pg->next) {
a650e02a
SR
1526 if (end < pg->records[0].ip ||
1527 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
9644302e 1528 continue;
5855fead
SR
1529 rec = bsearch(&key, pg->records, pg->index,
1530 sizeof(struct dyn_ftrace),
1531 ftrace_cmp_recs);
1532 if (rec)
f0cf973a 1533 return rec->ip;
5855fead 1534 }
c88fd863
SR
1535
1536 return 0;
1537}
1538
a650e02a
SR
1539/**
1540 * ftrace_location - return true if the ip giving is a traced location
1541 * @ip: the instruction pointer to check
1542 *
f0cf973a 1543 * Returns rec->ip if @ip given is a pointer to a ftrace location.
a650e02a
SR
1544 * That is, the instruction that is either a NOP or call to
1545 * the function tracer. It checks the ftrace internal tables to
1546 * determine if the address belongs or not.
1547 */
f0cf973a 1548unsigned long ftrace_location(unsigned long ip)
a650e02a
SR
1549{
1550 return ftrace_location_range(ip, ip);
1551}
1552
1553/**
1554 * ftrace_text_reserved - return true if range contains an ftrace location
1555 * @start: start of range to search
1556 * @end: end of range to search (inclusive). @end points to the last byte to check.
1557 *
1558 * Returns 1 if @start and @end contains a ftrace location.
1559 * That is, the instruction that is either a NOP or call to
1560 * the function tracer. It checks the ftrace internal tables to
1561 * determine if the address belongs or not.
1562 */
1563int ftrace_text_reserved(void *start, void *end)
1564{
f0cf973a
SR
1565 unsigned long ret;
1566
1567 ret = ftrace_location_range((unsigned long)start,
1568 (unsigned long)end);
1569
1570 return (int)!!ret;
a650e02a
SR
1571}
1572
ed926f9b
SR
1573static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1574 int filter_hash,
1575 bool inc)
1576{
1577 struct ftrace_hash *hash;
1578 struct ftrace_hash *other_hash;
1579 struct ftrace_page *pg;
1580 struct dyn_ftrace *rec;
1581 int count = 0;
1582 int all = 0;
1583
1584 /* Only update if the ops has been registered */
1585 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1586 return;
1587
1588 /*
1589 * In the filter_hash case:
1590 * If the count is zero, we update all records.
1591 * Otherwise we just update the items in the hash.
1592 *
1593 * In the notrace_hash case:
1594 * We enable the update in the hash.
1595 * As disabling notrace means enabling the tracing,
1596 * and enabling notrace means disabling, the inc variable
1597 * gets inversed.
1598 */
1599 if (filter_hash) {
1600 hash = ops->filter_hash;
1601 other_hash = ops->notrace_hash;
06a51d93 1602 if (ftrace_hash_empty(hash))
ed926f9b
SR
1603 all = 1;
1604 } else {
1605 inc = !inc;
1606 hash = ops->notrace_hash;
1607 other_hash = ops->filter_hash;
1608 /*
1609 * If the notrace hash has no items,
1610 * then there's nothing to do.
1611 */
06a51d93 1612 if (ftrace_hash_empty(hash))
ed926f9b
SR
1613 return;
1614 }
1615
1616 do_for_each_ftrace_rec(pg, rec) {
1617 int in_other_hash = 0;
1618 int in_hash = 0;
1619 int match = 0;
1620
1621 if (all) {
1622 /*
1623 * Only the filter_hash affects all records.
1624 * Update if the record is not in the notrace hash.
1625 */
b848914c 1626 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
ed926f9b
SR
1627 match = 1;
1628 } else {
06a51d93
SR
1629 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1630 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
ed926f9b
SR
1631
1632 /*
1633 *
1634 */
1635 if (filter_hash && in_hash && !in_other_hash)
1636 match = 1;
1637 else if (!filter_hash && in_hash &&
06a51d93 1638 (in_other_hash || ftrace_hash_empty(other_hash)))
ed926f9b
SR
1639 match = 1;
1640 }
1641 if (!match)
1642 continue;
1643
1644 if (inc) {
1645 rec->flags++;
1646 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1647 return;
08f6fba5
SR
1648 /*
1649 * If any ops wants regs saved for this function
1650 * then all ops will get saved regs.
1651 */
1652 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1653 rec->flags |= FTRACE_FL_REGS;
ed926f9b
SR
1654 } else {
1655 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1656 return;
1657 rec->flags--;
1658 }
1659 count++;
1660 /* Shortcut, if we handled all records, we are done. */
1661 if (!all && count == hash->count)
1662 return;
1663 } while_for_each_ftrace_rec();
1664}
1665
1666static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1667 int filter_hash)
1668{
1669 __ftrace_hash_rec_update(ops, filter_hash, 0);
1670}
1671
1672static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1673 int filter_hash)
1674{
1675 __ftrace_hash_rec_update(ops, filter_hash, 1);
1676}
1677
b17e8a37
SR
1678static void print_ip_ins(const char *fmt, unsigned char *p)
1679{
1680 int i;
1681
1682 printk(KERN_CONT "%s", fmt);
1683
1684 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1685 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1686}
1687
c88fd863
SR
1688/**
1689 * ftrace_bug - report and shutdown function tracer
1690 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1691 * @ip: The address that failed
1692 *
1693 * The arch code that enables or disables the function tracing
1694 * can call ftrace_bug() when it has detected a problem in
1695 * modifying the code. @failed should be one of either:
1696 * EFAULT - if the problem happens on reading the @ip address
1697 * EINVAL - if what is read at @ip is not what was expected
1698 * EPERM - if the problem happens on writting to the @ip address
1699 */
1700void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
1701{
1702 switch (failed) {
1703 case -EFAULT:
1704 FTRACE_WARN_ON_ONCE(1);
1705 pr_info("ftrace faulted on modifying ");
1706 print_ip_sym(ip);
1707 break;
1708 case -EINVAL:
1709 FTRACE_WARN_ON_ONCE(1);
1710 pr_info("ftrace failed to modify ");
1711 print_ip_sym(ip);
b17e8a37 1712 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
1713 printk(KERN_CONT "\n");
1714 break;
1715 case -EPERM:
1716 FTRACE_WARN_ON_ONCE(1);
1717 pr_info("ftrace faulted on writing ");
1718 print_ip_sym(ip);
1719 break;
1720 default:
1721 FTRACE_WARN_ON_ONCE(1);
1722 pr_info("ftrace faulted on unknown error ");
1723 print_ip_sym(ip);
1724 }
1725}
1726
c88fd863 1727static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
5072c59f 1728{
64fbcd16 1729 unsigned long flag = 0UL;
e7d3737e 1730
982c350b 1731 /*
30fb6aa7 1732 * If we are updating calls:
982c350b 1733 *
ed926f9b
SR
1734 * If the record has a ref count, then we need to enable it
1735 * because someone is using it.
982c350b 1736 *
ed926f9b
SR
1737 * Otherwise we make sure its disabled.
1738 *
30fb6aa7 1739 * If we are disabling calls, then disable all records that
ed926f9b 1740 * are enabled.
982c350b 1741 */
c88fd863 1742 if (enable && (rec->flags & ~FTRACE_FL_MASK))
ed926f9b 1743 flag = FTRACE_FL_ENABLED;
982c350b 1744
08f6fba5
SR
1745 /*
1746 * If enabling and the REGS flag does not match the REGS_EN, then
1747 * do not ignore this record. Set flags to fail the compare against
1748 * ENABLED.
1749 */
1750 if (flag &&
1751 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1752 flag |= FTRACE_FL_REGS;
1753
64fbcd16
XG
1754 /* If the state of this record hasn't changed, then do nothing */
1755 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
c88fd863 1756 return FTRACE_UPDATE_IGNORE;
982c350b 1757
64fbcd16 1758 if (flag) {
08f6fba5
SR
1759 /* Save off if rec is being enabled (for return value) */
1760 flag ^= rec->flags & FTRACE_FL_ENABLED;
1761
1762 if (update) {
c88fd863 1763 rec->flags |= FTRACE_FL_ENABLED;
08f6fba5
SR
1764 if (flag & FTRACE_FL_REGS) {
1765 if (rec->flags & FTRACE_FL_REGS)
1766 rec->flags |= FTRACE_FL_REGS_EN;
1767 else
1768 rec->flags &= ~FTRACE_FL_REGS_EN;
1769 }
1770 }
1771
1772 /*
1773 * If this record is being updated from a nop, then
1774 * return UPDATE_MAKE_CALL.
1775 * Otherwise, if the EN flag is set, then return
1776 * UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1777 * from the non-save regs, to a save regs function.
1778 * Otherwise,
1779 * return UPDATE_MODIFY_CALL to tell the caller to convert
1780 * from the save regs, to a non-save regs function.
1781 */
1782 if (flag & FTRACE_FL_ENABLED)
1783 return FTRACE_UPDATE_MAKE_CALL;
1784 else if (rec->flags & FTRACE_FL_REGS_EN)
1785 return FTRACE_UPDATE_MODIFY_CALL_REGS;
1786 else
1787 return FTRACE_UPDATE_MODIFY_CALL;
c88fd863
SR
1788 }
1789
08f6fba5
SR
1790 if (update) {
1791 /* If there's no more users, clear all flags */
1792 if (!(rec->flags & ~FTRACE_FL_MASK))
1793 rec->flags = 0;
1794 else
1795 /* Just disable the record (keep REGS state) */
1796 rec->flags &= ~FTRACE_FL_ENABLED;
1797 }
c88fd863
SR
1798
1799 return FTRACE_UPDATE_MAKE_NOP;
1800}
1801
1802/**
1803 * ftrace_update_record, set a record that now is tracing or not
1804 * @rec: the record to update
1805 * @enable: set to 1 if the record is tracing, zero to force disable
1806 *
1807 * The records that represent all functions that can be traced need
1808 * to be updated when tracing has been enabled.
1809 */
1810int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1811{
1812 return ftrace_check_record(rec, enable, 1);
1813}
1814
1815/**
1816 * ftrace_test_record, check if the record has been enabled or not
1817 * @rec: the record to test
1818 * @enable: set to 1 to check if enabled, 0 if it is disabled
1819 *
1820 * The arch code may need to test if a record is already set to
1821 * tracing to determine how to modify the function code that it
1822 * represents.
1823 */
1824int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1825{
1826 return ftrace_check_record(rec, enable, 0);
1827}
1828
1829static int
1830__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1831{
08f6fba5 1832 unsigned long ftrace_old_addr;
c88fd863
SR
1833 unsigned long ftrace_addr;
1834 int ret;
1835
c88fd863
SR
1836 ret = ftrace_update_record(rec, enable);
1837
08f6fba5
SR
1838 if (rec->flags & FTRACE_FL_REGS)
1839 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1840 else
1841 ftrace_addr = (unsigned long)FTRACE_ADDR;
1842
c88fd863
SR
1843 switch (ret) {
1844 case FTRACE_UPDATE_IGNORE:
1845 return 0;
1846
1847 case FTRACE_UPDATE_MAKE_CALL:
64fbcd16 1848 return ftrace_make_call(rec, ftrace_addr);
c88fd863
SR
1849
1850 case FTRACE_UPDATE_MAKE_NOP:
1851 return ftrace_make_nop(NULL, rec, ftrace_addr);
08f6fba5
SR
1852
1853 case FTRACE_UPDATE_MODIFY_CALL_REGS:
1854 case FTRACE_UPDATE_MODIFY_CALL:
1855 if (rec->flags & FTRACE_FL_REGS)
1856 ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1857 else
1858 ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1859
1860 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
5072c59f
SR
1861 }
1862
c88fd863 1863 return -1; /* unknow ftrace bug */
5072c59f
SR
1864}
1865
e4f5d544 1866void __weak ftrace_replace_code(int enable)
3c1720f0 1867{
3c1720f0
SR
1868 struct dyn_ftrace *rec;
1869 struct ftrace_page *pg;
6a24a244 1870 int failed;
3c1720f0 1871
45a4a237
SR
1872 if (unlikely(ftrace_disabled))
1873 return;
1874
265c831c 1875 do_for_each_ftrace_rec(pg, rec) {
e4f5d544 1876 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 1877 if (failed) {
3279ba37
SR
1878 ftrace_bug(failed, rec->ip);
1879 /* Stop processing */
1880 return;
3c1720f0 1881 }
265c831c 1882 } while_for_each_ftrace_rec();
3c1720f0
SR
1883}
1884
c88fd863
SR
1885struct ftrace_rec_iter {
1886 struct ftrace_page *pg;
1887 int index;
1888};
1889
1890/**
1891 * ftrace_rec_iter_start, start up iterating over traced functions
1892 *
1893 * Returns an iterator handle that is used to iterate over all
1894 * the records that represent address locations where functions
1895 * are traced.
1896 *
1897 * May return NULL if no records are available.
1898 */
1899struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1900{
1901 /*
1902 * We only use a single iterator.
1903 * Protected by the ftrace_lock mutex.
1904 */
1905 static struct ftrace_rec_iter ftrace_rec_iter;
1906 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1907
1908 iter->pg = ftrace_pages_start;
1909 iter->index = 0;
1910
1911 /* Could have empty pages */
1912 while (iter->pg && !iter->pg->index)
1913 iter->pg = iter->pg->next;
1914
1915 if (!iter->pg)
1916 return NULL;
1917
1918 return iter;
1919}
1920
1921/**
1922 * ftrace_rec_iter_next, get the next record to process.
1923 * @iter: The handle to the iterator.
1924 *
1925 * Returns the next iterator after the given iterator @iter.
1926 */
1927struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1928{
1929 iter->index++;
1930
1931 if (iter->index >= iter->pg->index) {
1932 iter->pg = iter->pg->next;
1933 iter->index = 0;
1934
1935 /* Could have empty pages */
1936 while (iter->pg && !iter->pg->index)
1937 iter->pg = iter->pg->next;
1938 }
1939
1940 if (!iter->pg)
1941 return NULL;
1942
1943 return iter;
1944}
1945
1946/**
1947 * ftrace_rec_iter_record, get the record at the iterator location
1948 * @iter: The current iterator location
1949 *
1950 * Returns the record that the current @iter is at.
1951 */
1952struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1953{
1954 return &iter->pg->records[iter->index];
1955}
1956
492a7ea5 1957static int
31e88909 1958ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
1959{
1960 unsigned long ip;
593eb8a2 1961 int ret;
3c1720f0
SR
1962
1963 ip = rec->ip;
1964
45a4a237
SR
1965 if (unlikely(ftrace_disabled))
1966 return 0;
1967
25aac9dc 1968 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 1969 if (ret) {
31e88909 1970 ftrace_bug(ret, ip);
492a7ea5 1971 return 0;
37ad5084 1972 }
492a7ea5 1973 return 1;
3c1720f0
SR
1974}
1975
000ab691
SR
1976/*
1977 * archs can override this function if they must do something
1978 * before the modifying code is performed.
1979 */
1980int __weak ftrace_arch_code_modify_prepare(void)
1981{
1982 return 0;
1983}
1984
1985/*
1986 * archs can override this function if they must do something
1987 * after the modifying code is performed.
1988 */
1989int __weak ftrace_arch_code_modify_post_process(void)
1990{
1991 return 0;
1992}
1993
8ed3e2cf 1994void ftrace_modify_all_code(int command)
3d083395 1995{
59338f75
SRRH
1996 int update = command & FTRACE_UPDATE_TRACE_FUNC;
1997
1998 /*
1999 * If the ftrace_caller calls a ftrace_ops func directly,
2000 * we need to make sure that it only traces functions it
2001 * expects to trace. When doing the switch of functions,
2002 * we need to update to the ftrace_ops_list_func first
2003 * before the transition between old and new calls are set,
2004 * as the ftrace_ops_list_func will check the ops hashes
2005 * to make sure the ops are having the right functions
2006 * traced.
2007 */
2008 if (update)
2009 ftrace_update_ftrace_func(ftrace_ops_list_func);
2010
8ed3e2cf 2011 if (command & FTRACE_UPDATE_CALLS)
d61f82d0 2012 ftrace_replace_code(1);
8ed3e2cf 2013 else if (command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
2014 ftrace_replace_code(0);
2015
405e1d83
SRRH
2016 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2017 function_trace_op = set_function_trace_op;
2018 smp_wmb();
2019 /* If irqs are disabled, we are in stop machine */
2020 if (!irqs_disabled())
2021 smp_call_function(ftrace_sync_ipi, NULL, 1);
d61f82d0 2022 ftrace_update_ftrace_func(ftrace_trace_function);
405e1d83 2023 }
d61f82d0 2024
8ed3e2cf 2025 if (command & FTRACE_START_FUNC_RET)
5a45cfe1 2026 ftrace_enable_ftrace_graph_caller();
8ed3e2cf 2027 else if (command & FTRACE_STOP_FUNC_RET)
5a45cfe1 2028 ftrace_disable_ftrace_graph_caller();
8ed3e2cf
SR
2029}
2030
2031static int __ftrace_modify_code(void *data)
2032{
2033 int *command = data;
2034
2035 ftrace_modify_all_code(*command);
5a45cfe1 2036
d61f82d0 2037 return 0;
3d083395
SR
2038}
2039
c88fd863
SR
2040/**
2041 * ftrace_run_stop_machine, go back to the stop machine method
2042 * @command: The command to tell ftrace what to do
2043 *
2044 * If an arch needs to fall back to the stop machine method, the
2045 * it can call this function.
2046 */
2047void ftrace_run_stop_machine(int command)
2048{
2049 stop_machine(__ftrace_modify_code, &command, NULL);
2050}
2051
2052/**
2053 * arch_ftrace_update_code, modify the code to trace or not trace
2054 * @command: The command that needs to be done
2055 *
2056 * Archs can override this function if it does not need to
2057 * run stop_machine() to modify code.
2058 */
2059void __weak arch_ftrace_update_code(int command)
2060{
2061 ftrace_run_stop_machine(command);
2062}
2063
e309b41d 2064static void ftrace_run_update_code(int command)
3d083395 2065{
000ab691
SR
2066 int ret;
2067
2068 ret = ftrace_arch_code_modify_prepare();
2069 FTRACE_WARN_ON(ret);
2070 if (ret)
2071 return;
c88fd863
SR
2072 /*
2073 * Do not call function tracer while we update the code.
2074 * We are in stop machine.
2075 */
2076 function_trace_stop++;
000ab691 2077
c88fd863
SR
2078 /*
2079 * By default we use stop_machine() to modify the code.
2080 * But archs can do what ever they want as long as it
2081 * is safe. The stop_machine() is the safest, but also
2082 * produces the most overhead.
2083 */
2084 arch_ftrace_update_code(command);
2085
c88fd863 2086 function_trace_stop--;
000ab691
SR
2087
2088 ret = ftrace_arch_code_modify_post_process();
2089 FTRACE_WARN_ON(ret);
3d083395
SR
2090}
2091
d61f82d0 2092static ftrace_func_t saved_ftrace_func;
60a7ecf4 2093static int ftrace_start_up;
b848914c 2094static int global_start_up;
df4fc315
SR
2095
2096static void ftrace_startup_enable(int command)
2097{
2098 if (saved_ftrace_func != ftrace_trace_function) {
2099 saved_ftrace_func = ftrace_trace_function;
2100 command |= FTRACE_UPDATE_TRACE_FUNC;
2101 }
2102
2103 if (!command || !ftrace_enabled)
2104 return;
2105
2106 ftrace_run_update_code(command);
2107}
d61f82d0 2108
a1cd6173 2109static int ftrace_startup(struct ftrace_ops *ops, int command)
3d083395 2110{
b848914c 2111 bool hash_enable = true;
8a56d776 2112 int ret;
b848914c 2113
4eebcc81 2114 if (unlikely(ftrace_disabled))
a1cd6173 2115 return -ENODEV;
4eebcc81 2116
8a56d776
SRRH
2117 ret = __register_ftrace_function(ops);
2118 if (ret)
2119 return ret;
2120
60a7ecf4 2121 ftrace_start_up++;
30fb6aa7 2122 command |= FTRACE_UPDATE_CALLS;
d61f82d0 2123
b848914c
SR
2124 /* ops marked global share the filter hashes */
2125 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2126 ops = &global_ops;
2127 /* Don't update hash if global is already set */
2128 if (global_start_up)
2129 hash_enable = false;
2130 global_start_up++;
2131 }
2132
ed926f9b 2133 ops->flags |= FTRACE_OPS_FL_ENABLED;
b848914c 2134 if (hash_enable)
ed926f9b
SR
2135 ftrace_hash_rec_enable(ops, 1);
2136
df4fc315 2137 ftrace_startup_enable(command);
a1cd6173
SR
2138
2139 return 0;
3d083395
SR
2140}
2141
8a56d776 2142static int ftrace_shutdown(struct ftrace_ops *ops, int command)
3d083395 2143{
b848914c 2144 bool hash_disable = true;
8a56d776 2145 int ret;
b848914c 2146
4eebcc81 2147 if (unlikely(ftrace_disabled))
8a56d776
SRRH
2148 return -ENODEV;
2149
2150 ret = __unregister_ftrace_function(ops);
2151 if (ret)
2152 return ret;
4eebcc81 2153
60a7ecf4 2154 ftrace_start_up--;
9ea1a153
FW
2155 /*
2156 * Just warn in case of unbalance, no need to kill ftrace, it's not
2157 * critical but the ftrace_call callers may be never nopped again after
2158 * further ftrace uses.
2159 */
2160 WARN_ON_ONCE(ftrace_start_up < 0);
2161
b848914c
SR
2162 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2163 ops = &global_ops;
2164 global_start_up--;
2165 WARN_ON_ONCE(global_start_up < 0);
2166 /* Don't update hash if global still has users */
2167 if (global_start_up) {
2168 WARN_ON_ONCE(!ftrace_start_up);
2169 hash_disable = false;
2170 }
2171 }
2172
2173 if (hash_disable)
ed926f9b
SR
2174 ftrace_hash_rec_disable(ops, 1);
2175
b848914c 2176 if (ops != &global_ops || !global_start_up)
ed926f9b 2177 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
b848914c 2178
30fb6aa7 2179 command |= FTRACE_UPDATE_CALLS;
3d083395 2180
d61f82d0
SR
2181 if (saved_ftrace_func != ftrace_trace_function) {
2182 saved_ftrace_func = ftrace_trace_function;
2183 command |= FTRACE_UPDATE_TRACE_FUNC;
2184 }
3d083395 2185
a4c35ed2
SRRH
2186 if (!command || !ftrace_enabled) {
2187 /*
2188 * If these are control ops, they still need their
2189 * per_cpu field freed. Since, function tracing is
2190 * not currently active, we can just free them
2191 * without synchronizing all CPUs.
2192 */
2193 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2194 control_ops_free(ops);
8a56d776 2195 return 0;
a4c35ed2 2196 }
d61f82d0
SR
2197
2198 ftrace_run_update_code(command);
a4c35ed2
SRRH
2199
2200 /*
2201 * Dynamic ops may be freed, we must make sure that all
2202 * callers are done before leaving this function.
2203 * The same goes for freeing the per_cpu data of the control
2204 * ops.
2205 *
2206 * Again, normal synchronize_sched() is not good enough.
2207 * We need to do a hard force of sched synchronization.
2208 * This is because we use preempt_disable() to do RCU, but
2209 * the function tracers can be called where RCU is not watching
2210 * (like before user_exit()). We can not rely on the RCU
2211 * infrastructure to do the synchronization, thus we must do it
2212 * ourselves.
2213 */
2214 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2215 schedule_on_each_cpu(ftrace_sync);
2216
2217 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2218 control_ops_free(ops);
2219 }
2220
8a56d776 2221 return 0;
3d083395
SR
2222}
2223
e309b41d 2224static void ftrace_startup_sysctl(void)
b0fc494f 2225{
4eebcc81
SR
2226 if (unlikely(ftrace_disabled))
2227 return;
2228
d61f82d0
SR
2229 /* Force update next time */
2230 saved_ftrace_func = NULL;
60a7ecf4
SR
2231 /* ftrace_start_up is true if we want ftrace running */
2232 if (ftrace_start_up)
30fb6aa7 2233 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
b0fc494f
SR
2234}
2235
e309b41d 2236static void ftrace_shutdown_sysctl(void)
b0fc494f 2237{
4eebcc81
SR
2238 if (unlikely(ftrace_disabled))
2239 return;
2240
60a7ecf4
SR
2241 /* ftrace_start_up is true if ftrace is running */
2242 if (ftrace_start_up)
79e406d7 2243 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
b0fc494f
SR
2244}
2245
3d083395 2246static cycle_t ftrace_update_time;
3d083395
SR
2247unsigned long ftrace_update_tot_cnt;
2248
8c4f3c3f 2249static inline int ops_traces_mod(struct ftrace_ops *ops)
f7bc8b61 2250{
8c4f3c3f
SRRH
2251 /*
2252 * Filter_hash being empty will default to trace module.
2253 * But notrace hash requires a test of individual module functions.
2254 */
2255 return ftrace_hash_empty(ops->filter_hash) &&
2256 ftrace_hash_empty(ops->notrace_hash);
2257}
2258
2259/*
2260 * Check if the current ops references the record.
2261 *
2262 * If the ops traces all functions, then it was already accounted for.
2263 * If the ops does not trace the current record function, skip it.
2264 * If the ops ignores the function via notrace filter, skip it.
2265 */
2266static inline bool
2267ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2268{
2269 /* If ops isn't enabled, ignore it */
2270 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2271 return 0;
2272
2273 /* If ops traces all mods, we already accounted for it */
2274 if (ops_traces_mod(ops))
2275 return 0;
2276
2277 /* The function must be in the filter */
2278 if (!ftrace_hash_empty(ops->filter_hash) &&
2279 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2280 return 0;
f7bc8b61 2281
8c4f3c3f
SRRH
2282 /* If in notrace hash, we ignore it too */
2283 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2284 return 0;
2285
2286 return 1;
2287}
2288
2289static int referenced_filters(struct dyn_ftrace *rec)
2290{
2291 struct ftrace_ops *ops;
2292 int cnt = 0;
2293
2294 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2295 if (ops_references_rec(ops, rec))
2296 cnt++;
2297 }
2298
2299 return cnt;
f7bc8b61
SR
2300}
2301
1dc43cf0 2302static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3d083395 2303{
85ae32ae 2304 struct ftrace_page *pg;
e94142a6 2305 struct dyn_ftrace *p;
f22f9a89 2306 cycle_t start, stop;
1dc43cf0 2307 unsigned long update_cnt = 0;
f7bc8b61 2308 unsigned long ref = 0;
8c4f3c3f 2309 bool test = false;
85ae32ae 2310 int i;
f7bc8b61
SR
2311
2312 /*
2313 * When adding a module, we need to check if tracers are
2314 * currently enabled and if they are set to trace all functions.
2315 * If they are, we need to enable the module functions as well
2316 * as update the reference counts for those function records.
2317 */
2318 if (mod) {
2319 struct ftrace_ops *ops;
2320
2321 for (ops = ftrace_ops_list;
2322 ops != &ftrace_list_end; ops = ops->next) {
8c4f3c3f
SRRH
2323 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2324 if (ops_traces_mod(ops))
2325 ref++;
2326 else
2327 test = true;
2328 }
f7bc8b61
SR
2329 }
2330 }
3d083395 2331
750ed1a4 2332 start = ftrace_now(raw_smp_processor_id());
3d083395 2333
1dc43cf0 2334 for (pg = new_pgs; pg; pg = pg->next) {
3d083395 2335
85ae32ae 2336 for (i = 0; i < pg->index; i++) {
8c4f3c3f
SRRH
2337 int cnt = ref;
2338
85ae32ae
SR
2339 /* If something went wrong, bail without enabling anything */
2340 if (unlikely(ftrace_disabled))
2341 return -1;
f22f9a89 2342
85ae32ae 2343 p = &pg->records[i];
8c4f3c3f
SRRH
2344 if (test)
2345 cnt += referenced_filters(p);
2346 p->flags = cnt;
f22f9a89 2347
85ae32ae
SR
2348 /*
2349 * Do the initial record conversion from mcount jump
2350 * to the NOP instructions.
2351 */
2352 if (!ftrace_code_disable(mod, p))
2353 break;
5cb084bb 2354
1dc43cf0 2355 update_cnt++;
5cb084bb 2356
85ae32ae
SR
2357 /*
2358 * If the tracing is enabled, go ahead and enable the record.
2359 *
2360 * The reason not to enable the record immediatelly is the
2361 * inherent check of ftrace_make_nop/ftrace_make_call for
2362 * correct previous instructions. Making first the NOP
2363 * conversion puts the module to the correct state, thus
2364 * passing the ftrace_make_call check.
2365 */
8c4f3c3f 2366 if (ftrace_start_up && cnt) {
85ae32ae
SR
2367 int failed = __ftrace_replace_code(p, 1);
2368 if (failed)
2369 ftrace_bug(failed, p->ip);
2370 }
5cb084bb 2371 }
3d083395
SR
2372 }
2373
750ed1a4 2374 stop = ftrace_now(raw_smp_processor_id());
3d083395 2375 ftrace_update_time = stop - start;
1dc43cf0 2376 ftrace_update_tot_cnt += update_cnt;
3d083395 2377
16444a8a
ACM
2378 return 0;
2379}
2380
a7900875 2381static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3c1720f0 2382{
a7900875 2383 int order;
3c1720f0 2384 int cnt;
3c1720f0 2385
a7900875
SR
2386 if (WARN_ON(!count))
2387 return -EINVAL;
2388
2389 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3c1720f0
SR
2390
2391 /*
a7900875
SR
2392 * We want to fill as much as possible. No more than a page
2393 * may be empty.
3c1720f0 2394 */
a7900875
SR
2395 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2396 order--;
3c1720f0 2397
a7900875
SR
2398 again:
2399 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3c1720f0 2400
a7900875
SR
2401 if (!pg->records) {
2402 /* if we can't allocate this size, try something smaller */
2403 if (!order)
2404 return -ENOMEM;
2405 order >>= 1;
2406 goto again;
2407 }
3c1720f0 2408
a7900875
SR
2409 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2410 pg->size = cnt;
3c1720f0 2411
a7900875
SR
2412 if (cnt > count)
2413 cnt = count;
2414
2415 return cnt;
2416}
2417
2418static struct ftrace_page *
2419ftrace_allocate_pages(unsigned long num_to_init)
2420{
2421 struct ftrace_page *start_pg;
2422 struct ftrace_page *pg;
2423 int order;
2424 int cnt;
2425
2426 if (!num_to_init)
2427 return 0;
2428
2429 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2430 if (!pg)
2431 return NULL;
2432
2433 /*
2434 * Try to allocate as much as possible in one continues
2435 * location that fills in all of the space. We want to
2436 * waste as little space as possible.
2437 */
2438 for (;;) {
2439 cnt = ftrace_allocate_records(pg, num_to_init);
2440 if (cnt < 0)
2441 goto free_pages;
2442
2443 num_to_init -= cnt;
2444 if (!num_to_init)
3c1720f0
SR
2445 break;
2446
a7900875
SR
2447 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2448 if (!pg->next)
2449 goto free_pages;
2450
3c1720f0
SR
2451 pg = pg->next;
2452 }
2453
a7900875
SR
2454 return start_pg;
2455
2456 free_pages:
2457 while (start_pg) {
2458 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2459 free_pages((unsigned long)pg->records, order);
2460 start_pg = pg->next;
2461 kfree(pg);
2462 pg = start_pg;
2463 }
2464 pr_info("ftrace: FAILED to allocate memory for functions\n");
2465 return NULL;
2466}
2467
5072c59f
SR
2468#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2469
2470struct ftrace_iterator {
98c4fd04 2471 loff_t pos;
4aeb6967
SR
2472 loff_t func_pos;
2473 struct ftrace_page *pg;
2474 struct dyn_ftrace *func;
2475 struct ftrace_func_probe *probe;
2476 struct trace_parser parser;
1cf41dd7 2477 struct ftrace_hash *hash;
33dc9b12 2478 struct ftrace_ops *ops;
4aeb6967
SR
2479 int hidx;
2480 int idx;
2481 unsigned flags;
5072c59f
SR
2482};
2483
8fc0c701 2484static void *
4aeb6967 2485t_hash_next(struct seq_file *m, loff_t *pos)
8fc0c701
SR
2486{
2487 struct ftrace_iterator *iter = m->private;
4aeb6967 2488 struct hlist_node *hnd = NULL;
8fc0c701
SR
2489 struct hlist_head *hhd;
2490
8fc0c701 2491 (*pos)++;
98c4fd04 2492 iter->pos = *pos;
8fc0c701 2493
4aeb6967
SR
2494 if (iter->probe)
2495 hnd = &iter->probe->node;
8fc0c701
SR
2496 retry:
2497 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2498 return NULL;
2499
2500 hhd = &ftrace_func_hash[iter->hidx];
2501
2502 if (hlist_empty(hhd)) {
2503 iter->hidx++;
2504 hnd = NULL;
2505 goto retry;
2506 }
2507
2508 if (!hnd)
2509 hnd = hhd->first;
2510 else {
2511 hnd = hnd->next;
2512 if (!hnd) {
2513 iter->hidx++;
2514 goto retry;
2515 }
2516 }
2517
4aeb6967
SR
2518 if (WARN_ON_ONCE(!hnd))
2519 return NULL;
2520
2521 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2522
2523 return iter;
8fc0c701
SR
2524}
2525
2526static void *t_hash_start(struct seq_file *m, loff_t *pos)
2527{
2528 struct ftrace_iterator *iter = m->private;
2529 void *p = NULL;
d82d6244
LZ
2530 loff_t l;
2531
69a3083c
SR
2532 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2533 return NULL;
2534
2bccfffd
SR
2535 if (iter->func_pos > *pos)
2536 return NULL;
8fc0c701 2537
d82d6244 2538 iter->hidx = 0;
2bccfffd 2539 for (l = 0; l <= (*pos - iter->func_pos); ) {
4aeb6967 2540 p = t_hash_next(m, &l);
d82d6244
LZ
2541 if (!p)
2542 break;
2543 }
4aeb6967
SR
2544 if (!p)
2545 return NULL;
2546
98c4fd04
SR
2547 /* Only set this if we have an item */
2548 iter->flags |= FTRACE_ITER_HASH;
2549
4aeb6967 2550 return iter;
8fc0c701
SR
2551}
2552
4aeb6967
SR
2553static int
2554t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
8fc0c701 2555{
b6887d79 2556 struct ftrace_func_probe *rec;
8fc0c701 2557
4aeb6967
SR
2558 rec = iter->probe;
2559 if (WARN_ON_ONCE(!rec))
2560 return -EIO;
8fc0c701 2561
809dcf29
SR
2562 if (rec->ops->print)
2563 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2564
b375a11a 2565 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
2566
2567 if (rec->data)
2568 seq_printf(m, ":%p", rec->data);
2569 seq_putc(m, '\n');
2570
2571 return 0;
2572}
2573
e309b41d 2574static void *
5072c59f
SR
2575t_next(struct seq_file *m, void *v, loff_t *pos)
2576{
2577 struct ftrace_iterator *iter = m->private;
fc13cb0c 2578 struct ftrace_ops *ops = iter->ops;
5072c59f
SR
2579 struct dyn_ftrace *rec = NULL;
2580
45a4a237
SR
2581 if (unlikely(ftrace_disabled))
2582 return NULL;
2583
8fc0c701 2584 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 2585 return t_hash_next(m, pos);
8fc0c701 2586
5072c59f 2587 (*pos)++;
1106b699 2588 iter->pos = iter->func_pos = *pos;
5072c59f 2589
0c75a3ed 2590 if (iter->flags & FTRACE_ITER_PRINTALL)
57c072c7 2591 return t_hash_start(m, pos);
0c75a3ed 2592
5072c59f
SR
2593 retry:
2594 if (iter->idx >= iter->pg->index) {
2595 if (iter->pg->next) {
2596 iter->pg = iter->pg->next;
2597 iter->idx = 0;
2598 goto retry;
2599 }
2600 } else {
2601 rec = &iter->pg->records[iter->idx++];
32082309 2602 if (((iter->flags & FTRACE_ITER_FILTER) &&
f45948e8 2603 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
0183fb1c 2604
41c52c0d 2605 ((iter->flags & FTRACE_ITER_NOTRACE) &&
647bcd03
SR
2606 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2607
2608 ((iter->flags & FTRACE_ITER_ENABLED) &&
23ea9c4d 2609 !(rec->flags & FTRACE_FL_ENABLED))) {
647bcd03 2610
5072c59f
SR
2611 rec = NULL;
2612 goto retry;
2613 }
2614 }
2615
4aeb6967 2616 if (!rec)
57c072c7 2617 return t_hash_start(m, pos);
4aeb6967
SR
2618
2619 iter->func = rec;
2620
2621 return iter;
5072c59f
SR
2622}
2623
98c4fd04
SR
2624static void reset_iter_read(struct ftrace_iterator *iter)
2625{
2626 iter->pos = 0;
2627 iter->func_pos = 0;
70f77b3f 2628 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
5072c59f
SR
2629}
2630
2631static void *t_start(struct seq_file *m, loff_t *pos)
2632{
2633 struct ftrace_iterator *iter = m->private;
fc13cb0c 2634 struct ftrace_ops *ops = iter->ops;
5072c59f 2635 void *p = NULL;
694ce0a5 2636 loff_t l;
5072c59f 2637
8fc0c701 2638 mutex_lock(&ftrace_lock);
45a4a237
SR
2639
2640 if (unlikely(ftrace_disabled))
2641 return NULL;
2642
98c4fd04
SR
2643 /*
2644 * If an lseek was done, then reset and start from beginning.
2645 */
2646 if (*pos < iter->pos)
2647 reset_iter_read(iter);
2648
0c75a3ed
SR
2649 /*
2650 * For set_ftrace_filter reading, if we have the filter
2651 * off, we can short cut and just print out that all
2652 * functions are enabled.
2653 */
06a51d93
SR
2654 if (iter->flags & FTRACE_ITER_FILTER &&
2655 ftrace_hash_empty(ops->filter_hash)) {
0c75a3ed 2656 if (*pos > 0)
8fc0c701 2657 return t_hash_start(m, pos);
0c75a3ed 2658 iter->flags |= FTRACE_ITER_PRINTALL;
df091625
CW
2659 /* reset in case of seek/pread */
2660 iter->flags &= ~FTRACE_ITER_HASH;
0c75a3ed
SR
2661 return iter;
2662 }
2663
8fc0c701
SR
2664 if (iter->flags & FTRACE_ITER_HASH)
2665 return t_hash_start(m, pos);
2666
98c4fd04
SR
2667 /*
2668 * Unfortunately, we need to restart at ftrace_pages_start
2669 * every time we let go of the ftrace_mutex. This is because
2670 * those pointers can change without the lock.
2671 */
694ce0a5
LZ
2672 iter->pg = ftrace_pages_start;
2673 iter->idx = 0;
2674 for (l = 0; l <= *pos; ) {
2675 p = t_next(m, p, &l);
2676 if (!p)
2677 break;
50cdaf08 2678 }
5821e1b7 2679
69a3083c
SR
2680 if (!p)
2681 return t_hash_start(m, pos);
4aeb6967
SR
2682
2683 return iter;
5072c59f
SR
2684}
2685
2686static void t_stop(struct seq_file *m, void *p)
2687{
8fc0c701 2688 mutex_unlock(&ftrace_lock);
5072c59f
SR
2689}
2690
2691static int t_show(struct seq_file *m, void *v)
2692{
0c75a3ed 2693 struct ftrace_iterator *iter = m->private;
4aeb6967 2694 struct dyn_ftrace *rec;
5072c59f 2695
8fc0c701 2696 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 2697 return t_hash_show(m, iter);
8fc0c701 2698
0c75a3ed
SR
2699 if (iter->flags & FTRACE_ITER_PRINTALL) {
2700 seq_printf(m, "#### all functions enabled ####\n");
2701 return 0;
2702 }
2703
4aeb6967
SR
2704 rec = iter->func;
2705
5072c59f
SR
2706 if (!rec)
2707 return 0;
2708
647bcd03
SR
2709 seq_printf(m, "%ps", (void *)rec->ip);
2710 if (iter->flags & FTRACE_ITER_ENABLED)
08f6fba5
SR
2711 seq_printf(m, " (%ld)%s",
2712 rec->flags & ~FTRACE_FL_MASK,
2713 rec->flags & FTRACE_FL_REGS ? " R" : "");
647bcd03 2714 seq_printf(m, "\n");
5072c59f
SR
2715
2716 return 0;
2717}
2718
88e9d34c 2719static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
2720 .start = t_start,
2721 .next = t_next,
2722 .stop = t_stop,
2723 .show = t_show,
2724};
2725
e309b41d 2726static int
5072c59f
SR
2727ftrace_avail_open(struct inode *inode, struct file *file)
2728{
2729 struct ftrace_iterator *iter;
5072c59f 2730
4eebcc81
SR
2731 if (unlikely(ftrace_disabled))
2732 return -ENODEV;
2733
50e18b94
JO
2734 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2735 if (iter) {
2736 iter->pg = ftrace_pages_start;
2737 iter->ops = &global_ops;
4bf39a94 2738 }
5072c59f 2739
50e18b94 2740 return iter ? 0 : -ENOMEM;
5072c59f
SR
2741}
2742
647bcd03
SR
2743static int
2744ftrace_enabled_open(struct inode *inode, struct file *file)
2745{
2746 struct ftrace_iterator *iter;
647bcd03
SR
2747
2748 if (unlikely(ftrace_disabled))
2749 return -ENODEV;
2750
50e18b94
JO
2751 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2752 if (iter) {
2753 iter->pg = ftrace_pages_start;
2754 iter->flags = FTRACE_ITER_ENABLED;
2755 iter->ops = &global_ops;
647bcd03
SR
2756 }
2757
50e18b94 2758 return iter ? 0 : -ENOMEM;
647bcd03
SR
2759}
2760
1cf41dd7 2761static void ftrace_filter_reset(struct ftrace_hash *hash)
5072c59f 2762{
52baf119 2763 mutex_lock(&ftrace_lock);
1cf41dd7 2764 ftrace_hash_clear(hash);
52baf119 2765 mutex_unlock(&ftrace_lock);
5072c59f
SR
2766}
2767
fc13cb0c
SR
2768/**
2769 * ftrace_regex_open - initialize function tracer filter files
2770 * @ops: The ftrace_ops that hold the hash filters
2771 * @flag: The type of filter to process
2772 * @inode: The inode, usually passed in to your open routine
2773 * @file: The file, usually passed in to your open routine
2774 *
2775 * ftrace_regex_open() initializes the filter files for the
2776 * @ops. Depending on @flag it may process the filter hash or
2777 * the notrace hash of @ops. With this called from the open
2778 * routine, you can use ftrace_filter_write() for the write
2779 * routine if @flag has FTRACE_ITER_FILTER set, or
2780 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
098c879e 2781 * tracing_lseek() should be used as the lseek routine, and
fc13cb0c
SR
2782 * release must call ftrace_regex_release().
2783 */
2784int
f45948e8 2785ftrace_regex_open(struct ftrace_ops *ops, int flag,
1cf41dd7 2786 struct inode *inode, struct file *file)
5072c59f
SR
2787{
2788 struct ftrace_iterator *iter;
f45948e8 2789 struct ftrace_hash *hash;
5072c59f
SR
2790 int ret = 0;
2791
f04f24fb
MH
2792 ftrace_ops_init(ops);
2793
4eebcc81
SR
2794 if (unlikely(ftrace_disabled))
2795 return -ENODEV;
2796
5072c59f
SR
2797 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2798 if (!iter)
2799 return -ENOMEM;
2800
689fd8b6 2801 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2802 kfree(iter);
2803 return -ENOMEM;
2804 }
2805
3f2367ba
MH
2806 iter->ops = ops;
2807 iter->flags = flag;
2808
2809 mutex_lock(&ops->regex_lock);
2810
f45948e8
SR
2811 if (flag & FTRACE_ITER_NOTRACE)
2812 hash = ops->notrace_hash;
2813 else
2814 hash = ops->filter_hash;
2815
33dc9b12 2816 if (file->f_mode & FMODE_WRITE) {
33dc9b12 2817 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
33dc9b12
SR
2818 if (!iter->hash) {
2819 trace_parser_put(&iter->parser);
2820 kfree(iter);
3f2367ba
MH
2821 ret = -ENOMEM;
2822 goto out_unlock;
33dc9b12
SR
2823 }
2824 }
1cf41dd7 2825
5072c59f 2826 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2827 (file->f_flags & O_TRUNC))
33dc9b12 2828 ftrace_filter_reset(iter->hash);
5072c59f
SR
2829
2830 if (file->f_mode & FMODE_READ) {
2831 iter->pg = ftrace_pages_start;
5072c59f
SR
2832
2833 ret = seq_open(file, &show_ftrace_seq_ops);
2834 if (!ret) {
2835 struct seq_file *m = file->private_data;
2836 m->private = iter;
79fe249c 2837 } else {
33dc9b12
SR
2838 /* Failed */
2839 free_ftrace_hash(iter->hash);
79fe249c 2840 trace_parser_put(&iter->parser);
5072c59f 2841 kfree(iter);
79fe249c 2842 }
5072c59f
SR
2843 } else
2844 file->private_data = iter;
3f2367ba
MH
2845
2846 out_unlock:
f04f24fb 2847 mutex_unlock(&ops->regex_lock);
5072c59f
SR
2848
2849 return ret;
2850}
2851
41c52c0d
SR
2852static int
2853ftrace_filter_open(struct inode *inode, struct file *file)
2854{
e3b3e2e8
SRRH
2855 struct ftrace_ops *ops = inode->i_private;
2856
2857 return ftrace_regex_open(ops,
69a3083c
SR
2858 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2859 inode, file);
41c52c0d
SR
2860}
2861
2862static int
2863ftrace_notrace_open(struct inode *inode, struct file *file)
2864{
e3b3e2e8
SRRH
2865 struct ftrace_ops *ops = inode->i_private;
2866
2867 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
1cf41dd7 2868 inode, file);
41c52c0d
SR
2869}
2870
64e7c440 2871static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 2872{
9f4801e3 2873 int matched = 0;
751e9983 2874 int slen;
9f4801e3 2875
9f4801e3
SR
2876 switch (type) {
2877 case MATCH_FULL:
2878 if (strcmp(str, regex) == 0)
2879 matched = 1;
2880 break;
2881 case MATCH_FRONT_ONLY:
2882 if (strncmp(str, regex, len) == 0)
2883 matched = 1;
2884 break;
2885 case MATCH_MIDDLE_ONLY:
2886 if (strstr(str, regex))
2887 matched = 1;
2888 break;
2889 case MATCH_END_ONLY:
751e9983
LZ
2890 slen = strlen(str);
2891 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
9f4801e3
SR
2892 matched = 1;
2893 break;
2894 }
2895
2896 return matched;
2897}
2898
b448c4e3 2899static int
1cf41dd7 2900enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
996e87be 2901{
b448c4e3 2902 struct ftrace_func_entry *entry;
b448c4e3
SR
2903 int ret = 0;
2904
1cf41dd7
SR
2905 entry = ftrace_lookup_ip(hash, rec->ip);
2906 if (not) {
2907 /* Do nothing if it doesn't exist */
2908 if (!entry)
2909 return 0;
b448c4e3 2910
33dc9b12 2911 free_hash_entry(hash, entry);
1cf41dd7
SR
2912 } else {
2913 /* Do nothing if it exists */
2914 if (entry)
2915 return 0;
b448c4e3 2916
1cf41dd7 2917 ret = add_hash_entry(hash, rec->ip);
b448c4e3
SR
2918 }
2919 return ret;
996e87be
SR
2920}
2921
64e7c440 2922static int
b9df92d2
SR
2923ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2924 char *regex, int len, int type)
64e7c440
SR
2925{
2926 char str[KSYM_SYMBOL_LEN];
b9df92d2
SR
2927 char *modname;
2928
2929 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2930
2931 if (mod) {
2932 /* module lookup requires matching the module */
2933 if (!modname || strcmp(modname, mod))
2934 return 0;
2935
2936 /* blank search means to match all funcs in the mod */
2937 if (!len)
2938 return 1;
2939 }
64e7c440 2940
64e7c440
SR
2941 return ftrace_match(str, regex, len, type);
2942}
2943
1cf41dd7
SR
2944static int
2945match_records(struct ftrace_hash *hash, char *buff,
2946 int len, char *mod, int not)
9f4801e3 2947{
b9df92d2 2948 unsigned search_len = 0;
9f4801e3
SR
2949 struct ftrace_page *pg;
2950 struct dyn_ftrace *rec;
b9df92d2
SR
2951 int type = MATCH_FULL;
2952 char *search = buff;
311d16da 2953 int found = 0;
b448c4e3 2954 int ret;
9f4801e3 2955
b9df92d2
SR
2956 if (len) {
2957 type = filter_parse_regex(buff, len, &search, &not);
2958 search_len = strlen(search);
2959 }
9f4801e3 2960
52baf119 2961 mutex_lock(&ftrace_lock);
265c831c 2962
b9df92d2
SR
2963 if (unlikely(ftrace_disabled))
2964 goto out_unlock;
9f4801e3 2965
265c831c 2966 do_for_each_ftrace_rec(pg, rec) {
b9df92d2 2967 if (ftrace_match_record(rec, mod, search, search_len, type)) {
1cf41dd7 2968 ret = enter_record(hash, rec, not);
b448c4e3
SR
2969 if (ret < 0) {
2970 found = ret;
2971 goto out_unlock;
2972 }
311d16da 2973 found = 1;
265c831c
SR
2974 }
2975 } while_for_each_ftrace_rec();
b9df92d2 2976 out_unlock:
52baf119 2977 mutex_unlock(&ftrace_lock);
311d16da
LZ
2978
2979 return found;
5072c59f
SR
2980}
2981
64e7c440 2982static int
1cf41dd7 2983ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
64e7c440 2984{
1cf41dd7 2985 return match_records(hash, buff, len, NULL, 0);
64e7c440
SR
2986}
2987
1cf41dd7
SR
2988static int
2989ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
64e7c440 2990{
64e7c440 2991 int not = 0;
6a24a244 2992
64e7c440
SR
2993 /* blank or '*' mean the same */
2994 if (strcmp(buff, "*") == 0)
2995 buff[0] = 0;
2996
2997 /* handle the case of 'dont filter this module' */
2998 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2999 buff[0] = 0;
3000 not = 1;
3001 }
3002
1cf41dd7 3003 return match_records(hash, buff, strlen(buff), mod, not);
64e7c440
SR
3004}
3005
f6180773
SR
3006/*
3007 * We register the module command as a template to show others how
3008 * to register the a command as well.
3009 */
3010
3011static int
43dd61c9
SR
3012ftrace_mod_callback(struct ftrace_hash *hash,
3013 char *func, char *cmd, char *param, int enable)
f6180773
SR
3014{
3015 char *mod;
b448c4e3 3016 int ret = -EINVAL;
f6180773
SR
3017
3018 /*
3019 * cmd == 'mod' because we only registered this func
3020 * for the 'mod' ftrace_func_command.
3021 * But if you register one func with multiple commands,
3022 * you can tell which command was used by the cmd
3023 * parameter.
3024 */
3025
3026 /* we must have a module name */
3027 if (!param)
b448c4e3 3028 return ret;
f6180773
SR
3029
3030 mod = strsep(&param, ":");
3031 if (!strlen(mod))
b448c4e3 3032 return ret;
f6180773 3033
1cf41dd7 3034 ret = ftrace_match_module_records(hash, func, mod);
b448c4e3
SR
3035 if (!ret)
3036 ret = -EINVAL;
3037 if (ret < 0)
3038 return ret;
3039
3040 return 0;
f6180773
SR
3041}
3042
3043static struct ftrace_func_command ftrace_mod_cmd = {
3044 .name = "mod",
3045 .func = ftrace_mod_callback,
3046};
3047
3048static int __init ftrace_mod_cmd_init(void)
3049{
3050 return register_ftrace_command(&ftrace_mod_cmd);
3051}
6f415672 3052core_initcall(ftrace_mod_cmd_init);
f6180773 3053
2f5f6ad9 3054static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 3055 struct ftrace_ops *op, struct pt_regs *pt_regs)
59df055f 3056{
b6887d79 3057 struct ftrace_func_probe *entry;
59df055f 3058 struct hlist_head *hhd;
59df055f 3059 unsigned long key;
59df055f
SR
3060
3061 key = hash_long(ip, FTRACE_HASH_BITS);
3062
3063 hhd = &ftrace_func_hash[key];
3064
3065 if (hlist_empty(hhd))
3066 return;
3067
3068 /*
3069 * Disable preemption for these calls to prevent a RCU grace
3070 * period. This syncs the hash iteration and freeing of items
3071 * on the hash. rcu_read_lock is too dangerous here.
3072 */
5168ae50 3073 preempt_disable_notrace();
1bb539ca 3074 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
59df055f
SR
3075 if (entry->ip == ip)
3076 entry->ops->func(ip, parent_ip, &entry->data);
3077 }
5168ae50 3078 preempt_enable_notrace();
59df055f
SR
3079}
3080
b6887d79 3081static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 3082{
fb9fb015 3083 .func = function_trace_probe_call,
f04f24fb
MH
3084 .flags = FTRACE_OPS_FL_INITIALIZED,
3085 INIT_REGEX_LOCK(trace_probe_ops)
59df055f
SR
3086};
3087
b6887d79 3088static int ftrace_probe_registered;
59df055f 3089
b6887d79 3090static void __enable_ftrace_function_probe(void)
59df055f 3091{
b848914c 3092 int ret;
59df055f
SR
3093 int i;
3094
19dd603e
SRRH
3095 if (ftrace_probe_registered) {
3096 /* still need to update the function call sites */
3097 if (ftrace_enabled)
3098 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
59df055f 3099 return;
19dd603e 3100 }
59df055f
SR
3101
3102 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3103 struct hlist_head *hhd = &ftrace_func_hash[i];
3104 if (hhd->first)
3105 break;
3106 }
3107 /* Nothing registered? */
3108 if (i == FTRACE_FUNC_HASHSIZE)
3109 return;
3110
8a56d776 3111 ret = ftrace_startup(&trace_probe_ops, 0);
b848914c 3112
b6887d79 3113 ftrace_probe_registered = 1;
59df055f
SR
3114}
3115
b6887d79 3116static void __disable_ftrace_function_probe(void)
59df055f
SR
3117{
3118 int i;
3119
b6887d79 3120 if (!ftrace_probe_registered)
59df055f
SR
3121 return;
3122
3123 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3124 struct hlist_head *hhd = &ftrace_func_hash[i];
3125 if (hhd->first)
3126 return;
3127 }
3128
3129 /* no more funcs left */
8a56d776 3130 ftrace_shutdown(&trace_probe_ops, 0);
b848914c 3131
b6887d79 3132 ftrace_probe_registered = 0;
59df055f
SR
3133}
3134
3135
7818b388 3136static void ftrace_free_entry(struct ftrace_func_probe *entry)
59df055f 3137{
59df055f 3138 if (entry->ops->free)
e67efb93 3139 entry->ops->free(entry->ops, entry->ip, &entry->data);
59df055f
SR
3140 kfree(entry);
3141}
3142
59df055f 3143int
b6887d79 3144register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3145 void *data)
3146{
b6887d79 3147 struct ftrace_func_probe *entry;
e1df4cb6
SRRH
3148 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3149 struct ftrace_hash *hash;
59df055f
SR
3150 struct ftrace_page *pg;
3151 struct dyn_ftrace *rec;
59df055f 3152 int type, len, not;
6a24a244 3153 unsigned long key;
59df055f
SR
3154 int count = 0;
3155 char *search;
e1df4cb6 3156 int ret;
59df055f 3157
3f6fe06d 3158 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
3159 len = strlen(search);
3160
b6887d79 3161 /* we do not support '!' for function probes */
59df055f
SR
3162 if (WARN_ON(not))
3163 return -EINVAL;
3164
3f2367ba 3165 mutex_lock(&trace_probe_ops.regex_lock);
59df055f 3166
e1df4cb6
SRRH
3167 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3168 if (!hash) {
3169 count = -ENOMEM;
5ae0bf59 3170 goto out;
e1df4cb6
SRRH
3171 }
3172
3173 if (unlikely(ftrace_disabled)) {
3174 count = -ENODEV;
5ae0bf59 3175 goto out;
e1df4cb6 3176 }
59df055f 3177
5ae0bf59
SRRH
3178 mutex_lock(&ftrace_lock);
3179
45a4a237 3180 do_for_each_ftrace_rec(pg, rec) {
59df055f 3181
b9df92d2 3182 if (!ftrace_match_record(rec, NULL, search, len, type))
59df055f
SR
3183 continue;
3184
3185 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3186 if (!entry) {
b6887d79 3187 /* If we did not process any, then return error */
59df055f
SR
3188 if (!count)
3189 count = -ENOMEM;
3190 goto out_unlock;
3191 }
3192
3193 count++;
3194
3195 entry->data = data;
3196
3197 /*
3198 * The caller might want to do something special
3199 * for each function we find. We call the callback
3200 * to give the caller an opportunity to do so.
3201 */
e67efb93
SRRH
3202 if (ops->init) {
3203 if (ops->init(ops, rec->ip, &entry->data) < 0) {
59df055f
SR
3204 /* caller does not like this func */
3205 kfree(entry);
3206 continue;
3207 }
3208 }
3209
e1df4cb6
SRRH
3210 ret = enter_record(hash, rec, 0);
3211 if (ret < 0) {
3212 kfree(entry);
3213 count = ret;
3214 goto out_unlock;
3215 }
3216
59df055f
SR
3217 entry->ops = ops;
3218 entry->ip = rec->ip;
3219
3220 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3221 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3222
3223 } while_for_each_ftrace_rec();
e1df4cb6
SRRH
3224
3225 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3226 if (ret < 0)
3227 count = ret;
3228
b6887d79 3229 __enable_ftrace_function_probe();
59df055f
SR
3230
3231 out_unlock:
5ae0bf59
SRRH
3232 mutex_unlock(&ftrace_lock);
3233 out:
3f2367ba 3234 mutex_unlock(&trace_probe_ops.regex_lock);
e1df4cb6 3235 free_ftrace_hash(hash);
59df055f
SR
3236
3237 return count;
3238}
3239
3240enum {
b6887d79
SR
3241 PROBE_TEST_FUNC = 1,
3242 PROBE_TEST_DATA = 2
59df055f
SR
3243};
3244
3245static void
b6887d79 3246__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3247 void *data, int flags)
3248{
e1df4cb6 3249 struct ftrace_func_entry *rec_entry;
b6887d79 3250 struct ftrace_func_probe *entry;
7818b388 3251 struct ftrace_func_probe *p;
e1df4cb6 3252 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
7818b388 3253 struct list_head free_list;
e1df4cb6 3254 struct ftrace_hash *hash;
b67bfe0d 3255 struct hlist_node *tmp;
59df055f
SR
3256 char str[KSYM_SYMBOL_LEN];
3257 int type = MATCH_FULL;
3258 int i, len = 0;
3259 char *search;
3260
b36461da 3261 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
59df055f 3262 glob = NULL;
b36461da 3263 else if (glob) {
59df055f
SR
3264 int not;
3265
3f6fe06d 3266 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
3267 len = strlen(search);
3268
b6887d79 3269 /* we do not support '!' for function probes */
59df055f
SR
3270 if (WARN_ON(not))
3271 return;
3272 }
3273
3f2367ba 3274 mutex_lock(&trace_probe_ops.regex_lock);
e1df4cb6
SRRH
3275
3276 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3277 if (!hash)
3278 /* Hmm, should report this somehow */
3279 goto out_unlock;
3280
7818b388
SRRH
3281 INIT_LIST_HEAD(&free_list);
3282
59df055f
SR
3283 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3284 struct hlist_head *hhd = &ftrace_func_hash[i];
3285
b67bfe0d 3286 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
59df055f
SR
3287
3288 /* break up if statements for readability */
b6887d79 3289 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
3290 continue;
3291
b6887d79 3292 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
3293 continue;
3294
3295 /* do this last, since it is the most expensive */
3296 if (glob) {
3297 kallsyms_lookup(entry->ip, NULL, NULL,
3298 NULL, str);
3299 if (!ftrace_match(str, glob, len, type))
3300 continue;
3301 }
3302
e1df4cb6
SRRH
3303 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3304 /* It is possible more than one entry had this ip */
3305 if (rec_entry)
3306 free_hash_entry(hash, rec_entry);
3307
740466bc 3308 hlist_del_rcu(&entry->node);
7818b388 3309 list_add(&entry->free_list, &free_list);
59df055f
SR
3310 }
3311 }
3f2367ba 3312 mutex_lock(&ftrace_lock);
b6887d79 3313 __disable_ftrace_function_probe();
e1df4cb6
SRRH
3314 /*
3315 * Remove after the disable is called. Otherwise, if the last
3316 * probe is removed, a null hash means *all enabled*.
3317 */
3318 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
7818b388
SRRH
3319 synchronize_sched();
3320 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3321 list_del(&entry->free_list);
3322 ftrace_free_entry(entry);
3323 }
3f2367ba 3324 mutex_unlock(&ftrace_lock);
7818b388 3325
e1df4cb6 3326 out_unlock:
3f2367ba 3327 mutex_unlock(&trace_probe_ops.regex_lock);
e1df4cb6 3328 free_ftrace_hash(hash);
59df055f
SR
3329}
3330
3331void
b6887d79 3332unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3333 void *data)
3334{
b6887d79
SR
3335 __unregister_ftrace_function_probe(glob, ops, data,
3336 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
3337}
3338
3339void
b6887d79 3340unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 3341{
b6887d79 3342 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
3343}
3344
b6887d79 3345void unregister_ftrace_function_probe_all(char *glob)
59df055f 3346{
b6887d79 3347 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
3348}
3349
f6180773
SR
3350static LIST_HEAD(ftrace_commands);
3351static DEFINE_MUTEX(ftrace_cmd_mutex);
3352
38de93ab
TZ
3353/*
3354 * Currently we only register ftrace commands from __init, so mark this
3355 * __init too.
3356 */
3357__init int register_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
3358{
3359 struct ftrace_func_command *p;
3360 int ret = 0;
3361
3362 mutex_lock(&ftrace_cmd_mutex);
3363 list_for_each_entry(p, &ftrace_commands, list) {
3364 if (strcmp(cmd->name, p->name) == 0) {
3365 ret = -EBUSY;
3366 goto out_unlock;
3367 }
3368 }
3369 list_add(&cmd->list, &ftrace_commands);
3370 out_unlock:
3371 mutex_unlock(&ftrace_cmd_mutex);
3372
3373 return ret;
3374}
3375
38de93ab
TZ
3376/*
3377 * Currently we only unregister ftrace commands from __init, so mark
3378 * this __init too.
3379 */
3380__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
3381{
3382 struct ftrace_func_command *p, *n;
3383 int ret = -ENODEV;
3384
3385 mutex_lock(&ftrace_cmd_mutex);
3386 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3387 if (strcmp(cmd->name, p->name) == 0) {
3388 ret = 0;
3389 list_del_init(&p->list);
3390 goto out_unlock;
3391 }
3392 }
3393 out_unlock:
3394 mutex_unlock(&ftrace_cmd_mutex);
3395
3396 return ret;
3397}
3398
33dc9b12
SR
3399static int ftrace_process_regex(struct ftrace_hash *hash,
3400 char *buff, int len, int enable)
64e7c440 3401{
f6180773 3402 char *func, *command, *next = buff;
6a24a244 3403 struct ftrace_func_command *p;
0aff1c0c 3404 int ret = -EINVAL;
64e7c440
SR
3405
3406 func = strsep(&next, ":");
3407
3408 if (!next) {
1cf41dd7 3409 ret = ftrace_match_records(hash, func, len);
b448c4e3
SR
3410 if (!ret)
3411 ret = -EINVAL;
3412 if (ret < 0)
3413 return ret;
3414 return 0;
64e7c440
SR
3415 }
3416
f6180773 3417 /* command found */
64e7c440
SR
3418
3419 command = strsep(&next, ":");
3420
f6180773
SR
3421 mutex_lock(&ftrace_cmd_mutex);
3422 list_for_each_entry(p, &ftrace_commands, list) {
3423 if (strcmp(p->name, command) == 0) {
43dd61c9 3424 ret = p->func(hash, func, command, next, enable);
f6180773
SR
3425 goto out_unlock;
3426 }
64e7c440 3427 }
f6180773
SR
3428 out_unlock:
3429 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 3430
f6180773 3431 return ret;
64e7c440
SR
3432}
3433
e309b41d 3434static ssize_t
41c52c0d
SR
3435ftrace_regex_write(struct file *file, const char __user *ubuf,
3436 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
3437{
3438 struct ftrace_iterator *iter;
689fd8b6 3439 struct trace_parser *parser;
3440 ssize_t ret, read;
5072c59f 3441
4ba7978e 3442 if (!cnt)
5072c59f
SR
3443 return 0;
3444
5072c59f
SR
3445 if (file->f_mode & FMODE_READ) {
3446 struct seq_file *m = file->private_data;
3447 iter = m->private;
3448 } else
3449 iter = file->private_data;
3450
f04f24fb 3451 if (unlikely(ftrace_disabled))
3f2367ba
MH
3452 return -ENODEV;
3453
3454 /* iter->hash is a local copy, so we don't need regex_lock */
f04f24fb 3455
689fd8b6 3456 parser = &iter->parser;
3457 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 3458
4ba7978e 3459 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 3460 !trace_parser_cont(parser)) {
33dc9b12 3461 ret = ftrace_process_regex(iter->hash, parser->buffer,
689fd8b6 3462 parser->idx, enable);
313254a9 3463 trace_parser_clear(parser);
7c088b51 3464 if (ret < 0)
3f2367ba 3465 goto out;
eda1e328 3466 }
5072c59f 3467
5072c59f 3468 ret = read;
3f2367ba 3469 out:
5072c59f
SR
3470 return ret;
3471}
3472
fc13cb0c 3473ssize_t
41c52c0d
SR
3474ftrace_filter_write(struct file *file, const char __user *ubuf,
3475 size_t cnt, loff_t *ppos)
3476{
3477 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3478}
3479
fc13cb0c 3480ssize_t
41c52c0d
SR
3481ftrace_notrace_write(struct file *file, const char __user *ubuf,
3482 size_t cnt, loff_t *ppos)
3483{
3484 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3485}
3486
33dc9b12 3487static int
647664ea
MH
3488ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3489{
3490 struct ftrace_func_entry *entry;
3491
3492 if (!ftrace_location(ip))
3493 return -EINVAL;
3494
3495 if (remove) {
3496 entry = ftrace_lookup_ip(hash, ip);
3497 if (!entry)
3498 return -ENOENT;
3499 free_hash_entry(hash, entry);
3500 return 0;
3501 }
3502
3503 return add_hash_entry(hash, ip);
3504}
3505
1c80c432
SRRH
3506static void ftrace_ops_update_code(struct ftrace_ops *ops)
3507{
3508 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3509 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3510}
3511
647664ea
MH
3512static int
3513ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3514 unsigned long ip, int remove, int reset, int enable)
41c52c0d 3515{
33dc9b12 3516 struct ftrace_hash **orig_hash;
f45948e8 3517 struct ftrace_hash *hash;
33dc9b12 3518 int ret;
f45948e8 3519
936e074b
SR
3520 /* All global ops uses the global ops filters */
3521 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3522 ops = &global_ops;
3523
41c52c0d 3524 if (unlikely(ftrace_disabled))
33dc9b12 3525 return -ENODEV;
41c52c0d 3526
3f2367ba
MH
3527 mutex_lock(&ops->regex_lock);
3528
f45948e8 3529 if (enable)
33dc9b12 3530 orig_hash = &ops->filter_hash;
f45948e8 3531 else
33dc9b12
SR
3532 orig_hash = &ops->notrace_hash;
3533
3534 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3f2367ba
MH
3535 if (!hash) {
3536 ret = -ENOMEM;
3537 goto out_regex_unlock;
3538 }
f45948e8 3539
41c52c0d 3540 if (reset)
1cf41dd7 3541 ftrace_filter_reset(hash);
ac483c44
JO
3542 if (buf && !ftrace_match_records(hash, buf, len)) {
3543 ret = -EINVAL;
3544 goto out_regex_unlock;
3545 }
647664ea
MH
3546 if (ip) {
3547 ret = ftrace_match_addr(hash, ip, remove);
3548 if (ret < 0)
3549 goto out_regex_unlock;
3550 }
33dc9b12
SR
3551
3552 mutex_lock(&ftrace_lock);
41fb61c2 3553 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
1c80c432
SRRH
3554 if (!ret)
3555 ftrace_ops_update_code(ops);
072126f4 3556
33dc9b12
SR
3557 mutex_unlock(&ftrace_lock);
3558
ac483c44 3559 out_regex_unlock:
f04f24fb 3560 mutex_unlock(&ops->regex_lock);
33dc9b12
SR
3561
3562 free_ftrace_hash(hash);
3563 return ret;
41c52c0d
SR
3564}
3565
647664ea
MH
3566static int
3567ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3568 int reset, int enable)
3569{
3570 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3571}
3572
3573/**
3574 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3575 * @ops - the ops to set the filter with
3576 * @ip - the address to add to or remove from the filter.
3577 * @remove - non zero to remove the ip from the filter
3578 * @reset - non zero to reset all filters before applying this filter.
3579 *
3580 * Filters denote which functions should be enabled when tracing is enabled
3581 * If @ip is NULL, it failes to update filter.
3582 */
3583int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3584 int remove, int reset)
3585{
f04f24fb 3586 ftrace_ops_init(ops);
647664ea
MH
3587 return ftrace_set_addr(ops, ip, remove, reset, 1);
3588}
3589EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3590
3591static int
3592ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3593 int reset, int enable)
3594{
3595 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3596}
3597
77a2b37d
SR
3598/**
3599 * ftrace_set_filter - set a function to filter on in ftrace
936e074b
SR
3600 * @ops - the ops to set the filter with
3601 * @buf - the string that holds the function filter text.
3602 * @len - the length of the string.
3603 * @reset - non zero to reset all filters before applying this filter.
3604 *
3605 * Filters denote which functions should be enabled when tracing is enabled.
3606 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3607 */
ac483c44 3608int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
3609 int len, int reset)
3610{
f04f24fb 3611 ftrace_ops_init(ops);
ac483c44 3612 return ftrace_set_regex(ops, buf, len, reset, 1);
936e074b
SR
3613}
3614EXPORT_SYMBOL_GPL(ftrace_set_filter);
3615
3616/**
3617 * ftrace_set_notrace - set a function to not trace in ftrace
3618 * @ops - the ops to set the notrace filter with
3619 * @buf - the string that holds the function notrace text.
3620 * @len - the length of the string.
3621 * @reset - non zero to reset all filters before applying this filter.
3622 *
3623 * Notrace Filters denote which functions should not be enabled when tracing
3624 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3625 * for tracing.
3626 */
ac483c44 3627int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
3628 int len, int reset)
3629{
f04f24fb 3630 ftrace_ops_init(ops);
ac483c44 3631 return ftrace_set_regex(ops, buf, len, reset, 0);
936e074b
SR
3632}
3633EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3634/**
3635 * ftrace_set_filter - set a function to filter on in ftrace
3636 * @ops - the ops to set the filter with
77a2b37d
SR
3637 * @buf - the string that holds the function filter text.
3638 * @len - the length of the string.
3639 * @reset - non zero to reset all filters before applying this filter.
3640 *
3641 * Filters denote which functions should be enabled when tracing is enabled.
3642 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3643 */
936e074b 3644void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
77a2b37d 3645{
f45948e8 3646 ftrace_set_regex(&global_ops, buf, len, reset, 1);
41c52c0d 3647}
936e074b 3648EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4eebcc81 3649
41c52c0d
SR
3650/**
3651 * ftrace_set_notrace - set a function to not trace in ftrace
936e074b 3652 * @ops - the ops to set the notrace filter with
41c52c0d
SR
3653 * @buf - the string that holds the function notrace text.
3654 * @len - the length of the string.
3655 * @reset - non zero to reset all filters before applying this filter.
3656 *
3657 * Notrace Filters denote which functions should not be enabled when tracing
3658 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3659 * for tracing.
3660 */
936e074b 3661void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
41c52c0d 3662{
f45948e8 3663 ftrace_set_regex(&global_ops, buf, len, reset, 0);
77a2b37d 3664}
936e074b 3665EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
77a2b37d 3666
2af15d6a
SR
3667/*
3668 * command line interface to allow users to set filters on boot up.
3669 */
3670#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3671static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3672static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3673
f1ed7c74
SRRH
3674/* Used by function selftest to not test if filter is set */
3675bool ftrace_filter_param __initdata;
3676
2af15d6a
SR
3677static int __init set_ftrace_notrace(char *str)
3678{
f1ed7c74 3679 ftrace_filter_param = true;
75761cc1 3680 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
3681 return 1;
3682}
3683__setup("ftrace_notrace=", set_ftrace_notrace);
3684
3685static int __init set_ftrace_filter(char *str)
3686{
f1ed7c74 3687 ftrace_filter_param = true;
75761cc1 3688 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
3689 return 1;
3690}
3691__setup("ftrace_filter=", set_ftrace_filter);
3692
369bc18f 3693#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 3694static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
faf982a6 3695static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
801c29fd 3696
369bc18f
SA
3697static int __init set_graph_function(char *str)
3698{
06f43d66 3699 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
3700 return 1;
3701}
3702__setup("ftrace_graph_filter=", set_graph_function);
3703
3704static void __init set_ftrace_early_graph(char *buf)
3705{
3706 int ret;
3707 char *func;
3708
3709 while (buf) {
3710 func = strsep(&buf, ",");
3711 /* we allow only one expression at a time */
3712 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
faf982a6 3713 FTRACE_GRAPH_MAX_FUNCS, func);
369bc18f
SA
3714 if (ret)
3715 printk(KERN_DEBUG "ftrace: function %s not "
3716 "traceable\n", func);
3717 }
3718}
3719#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3720
2a85a37f
SR
3721void __init
3722ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2af15d6a
SR
3723{
3724 char *func;
3725
f04f24fb
MH
3726 ftrace_ops_init(ops);
3727
2af15d6a
SR
3728 while (buf) {
3729 func = strsep(&buf, ",");
f45948e8 3730 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2af15d6a
SR
3731 }
3732}
3733
3734static void __init set_ftrace_early_filters(void)
3735{
3736 if (ftrace_filter_buf[0])
2a85a37f 3737 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
2af15d6a 3738 if (ftrace_notrace_buf[0])
2a85a37f 3739 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
369bc18f
SA
3740#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3741 if (ftrace_graph_buf[0])
3742 set_ftrace_early_graph(ftrace_graph_buf);
3743#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
3744}
3745
fc13cb0c 3746int ftrace_regex_release(struct inode *inode, struct file *file)
5072c59f
SR
3747{
3748 struct seq_file *m = (struct seq_file *)file->private_data;
3749 struct ftrace_iterator *iter;
33dc9b12 3750 struct ftrace_hash **orig_hash;
689fd8b6 3751 struct trace_parser *parser;
ed926f9b 3752 int filter_hash;
33dc9b12 3753 int ret;
5072c59f 3754
5072c59f
SR
3755 if (file->f_mode & FMODE_READ) {
3756 iter = m->private;
5072c59f
SR
3757 seq_release(inode, file);
3758 } else
3759 iter = file->private_data;
3760
689fd8b6 3761 parser = &iter->parser;
3762 if (trace_parser_loaded(parser)) {
3763 parser->buffer[parser->idx] = 0;
1cf41dd7 3764 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5072c59f
SR
3765 }
3766
689fd8b6 3767 trace_parser_put(parser);
689fd8b6 3768
3f2367ba
MH
3769 mutex_lock(&iter->ops->regex_lock);
3770
058e297d 3771 if (file->f_mode & FMODE_WRITE) {
ed926f9b
SR
3772 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3773
3774 if (filter_hash)
33dc9b12 3775 orig_hash = &iter->ops->filter_hash;
ed926f9b
SR
3776 else
3777 orig_hash = &iter->ops->notrace_hash;
33dc9b12 3778
058e297d 3779 mutex_lock(&ftrace_lock);
41fb61c2
SR
3780 ret = ftrace_hash_move(iter->ops, filter_hash,
3781 orig_hash, iter->hash);
1c80c432
SRRH
3782 if (!ret)
3783 ftrace_ops_update_code(iter->ops);
41fb61c2 3784
058e297d
SR
3785 mutex_unlock(&ftrace_lock);
3786 }
3f2367ba
MH
3787
3788 mutex_unlock(&iter->ops->regex_lock);
33dc9b12
SR
3789 free_ftrace_hash(iter->hash);
3790 kfree(iter);
058e297d 3791
5072c59f
SR
3792 return 0;
3793}
3794
5e2336a0 3795static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
3796 .open = ftrace_avail_open,
3797 .read = seq_read,
3798 .llseek = seq_lseek,
3be04b47 3799 .release = seq_release_private,
5072c59f
SR
3800};
3801
647bcd03
SR
3802static const struct file_operations ftrace_enabled_fops = {
3803 .open = ftrace_enabled_open,
3804 .read = seq_read,
3805 .llseek = seq_lseek,
3806 .release = seq_release_private,
3807};
3808
5e2336a0 3809static const struct file_operations ftrace_filter_fops = {
5072c59f 3810 .open = ftrace_filter_open,
850a80cf 3811 .read = seq_read,
5072c59f 3812 .write = ftrace_filter_write,
098c879e 3813 .llseek = tracing_lseek,
1cf41dd7 3814 .release = ftrace_regex_release,
5072c59f
SR
3815};
3816
5e2336a0 3817static const struct file_operations ftrace_notrace_fops = {
41c52c0d 3818 .open = ftrace_notrace_open,
850a80cf 3819 .read = seq_read,
41c52c0d 3820 .write = ftrace_notrace_write,
098c879e 3821 .llseek = tracing_lseek,
1cf41dd7 3822 .release = ftrace_regex_release,
41c52c0d
SR
3823};
3824
ea4e2bc4
SR
3825#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3826
3827static DEFINE_MUTEX(graph_lock);
3828
3829int ftrace_graph_count;
29ad23b0 3830int ftrace_graph_notrace_count;
ea4e2bc4 3831unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
29ad23b0 3832unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
ea4e2bc4 3833
faf982a6
NK
3834struct ftrace_graph_data {
3835 unsigned long *table;
3836 size_t size;
3837 int *count;
3838 const struct seq_operations *seq_ops;
3839};
3840
ea4e2bc4 3841static void *
85951842 3842__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 3843{
faf982a6
NK
3844 struct ftrace_graph_data *fgd = m->private;
3845
3846 if (*pos >= *fgd->count)
ea4e2bc4 3847 return NULL;
faf982a6 3848 return &fgd->table[*pos];
85951842 3849}
ea4e2bc4 3850
85951842
LZ
3851static void *
3852g_next(struct seq_file *m, void *v, loff_t *pos)
3853{
3854 (*pos)++;
3855 return __g_next(m, pos);
ea4e2bc4
SR
3856}
3857
3858static void *g_start(struct seq_file *m, loff_t *pos)
3859{
faf982a6
NK
3860 struct ftrace_graph_data *fgd = m->private;
3861
ea4e2bc4
SR
3862 mutex_lock(&graph_lock);
3863
f9349a8f 3864 /* Nothing, tell g_show to print all functions are enabled */
faf982a6 3865 if (!*fgd->count && !*pos)
f9349a8f
FW
3866 return (void *)1;
3867
85951842 3868 return __g_next(m, pos);
ea4e2bc4
SR
3869}
3870
3871static void g_stop(struct seq_file *m, void *p)
3872{
3873 mutex_unlock(&graph_lock);
3874}
3875
3876static int g_show(struct seq_file *m, void *v)
3877{
3878 unsigned long *ptr = v;
ea4e2bc4
SR
3879
3880 if (!ptr)
3881 return 0;
3882
f9349a8f
FW
3883 if (ptr == (unsigned long *)1) {
3884 seq_printf(m, "#### all functions enabled ####\n");
3885 return 0;
3886 }
3887
b375a11a 3888 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
3889
3890 return 0;
3891}
3892
88e9d34c 3893static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
3894 .start = g_start,
3895 .next = g_next,
3896 .stop = g_stop,
3897 .show = g_show,
3898};
3899
3900static int
faf982a6
NK
3901__ftrace_graph_open(struct inode *inode, struct file *file,
3902 struct ftrace_graph_data *fgd)
ea4e2bc4
SR
3903{
3904 int ret = 0;
3905
ea4e2bc4
SR
3906 mutex_lock(&graph_lock);
3907 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 3908 (file->f_flags & O_TRUNC)) {
faf982a6
NK
3909 *fgd->count = 0;
3910 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
ea4e2bc4 3911 }
a4ec5e0c 3912 mutex_unlock(&graph_lock);
ea4e2bc4 3913
faf982a6
NK
3914 if (file->f_mode & FMODE_READ) {
3915 ret = seq_open(file, fgd->seq_ops);
3916 if (!ret) {
3917 struct seq_file *m = file->private_data;
3918 m->private = fgd;
3919 }
3920 } else
3921 file->private_data = fgd;
ea4e2bc4
SR
3922
3923 return ret;
3924}
3925
faf982a6
NK
3926static int
3927ftrace_graph_open(struct inode *inode, struct file *file)
3928{
3929 struct ftrace_graph_data *fgd;
3930
3931 if (unlikely(ftrace_disabled))
3932 return -ENODEV;
3933
3934 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3935 if (fgd == NULL)
3936 return -ENOMEM;
3937
3938 fgd->table = ftrace_graph_funcs;
3939 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3940 fgd->count = &ftrace_graph_count;
3941 fgd->seq_ops = &ftrace_graph_seq_ops;
3942
3943 return __ftrace_graph_open(inode, file, fgd);
3944}
3945
29ad23b0
NK
3946static int
3947ftrace_graph_notrace_open(struct inode *inode, struct file *file)
3948{
3949 struct ftrace_graph_data *fgd;
3950
3951 if (unlikely(ftrace_disabled))
3952 return -ENODEV;
3953
3954 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3955 if (fgd == NULL)
3956 return -ENOMEM;
3957
3958 fgd->table = ftrace_graph_notrace_funcs;
3959 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3960 fgd->count = &ftrace_graph_notrace_count;
3961 fgd->seq_ops = &ftrace_graph_seq_ops;
3962
3963 return __ftrace_graph_open(inode, file, fgd);
3964}
3965
87827111
LZ
3966static int
3967ftrace_graph_release(struct inode *inode, struct file *file)
3968{
faf982a6
NK
3969 if (file->f_mode & FMODE_READ) {
3970 struct seq_file *m = file->private_data;
3971
3972 kfree(m->private);
87827111 3973 seq_release(inode, file);
faf982a6
NK
3974 } else {
3975 kfree(file->private_data);
3976 }
3977
87827111
LZ
3978 return 0;
3979}
3980
ea4e2bc4 3981static int
faf982a6 3982ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
ea4e2bc4 3983{
ea4e2bc4
SR
3984 struct dyn_ftrace *rec;
3985 struct ftrace_page *pg;
f9349a8f 3986 int search_len;
c7c6b1fe 3987 int fail = 1;
f9349a8f
FW
3988 int type, not;
3989 char *search;
3990 bool exists;
3991 int i;
ea4e2bc4 3992
f9349a8f 3993 /* decode regex */
3f6fe06d 3994 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
faf982a6 3995 if (!not && *idx >= size)
c7c6b1fe 3996 return -EBUSY;
f9349a8f
FW
3997
3998 search_len = strlen(search);
3999
52baf119 4000 mutex_lock(&ftrace_lock);
45a4a237
SR
4001
4002 if (unlikely(ftrace_disabled)) {
4003 mutex_unlock(&ftrace_lock);
4004 return -ENODEV;
4005 }
4006
265c831c
SR
4007 do_for_each_ftrace_rec(pg, rec) {
4008
b9df92d2 4009 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
c7c6b1fe 4010 /* if it is in the array */
f9349a8f 4011 exists = false;
c7c6b1fe 4012 for (i = 0; i < *idx; i++) {
f9349a8f
FW
4013 if (array[i] == rec->ip) {
4014 exists = true;
265c831c
SR
4015 break;
4016 }
c7c6b1fe
LZ
4017 }
4018
4019 if (!not) {
4020 fail = 0;
4021 if (!exists) {
4022 array[(*idx)++] = rec->ip;
faf982a6 4023 if (*idx >= size)
c7c6b1fe
LZ
4024 goto out;
4025 }
4026 } else {
4027 if (exists) {
4028 array[i] = array[--(*idx)];
4029 array[*idx] = 0;
4030 fail = 0;
4031 }
4032 }
ea4e2bc4 4033 }
265c831c 4034 } while_for_each_ftrace_rec();
c7c6b1fe 4035out:
52baf119 4036 mutex_unlock(&ftrace_lock);
ea4e2bc4 4037
c7c6b1fe
LZ
4038 if (fail)
4039 return -EINVAL;
4040
c7c6b1fe 4041 return 0;
ea4e2bc4
SR
4042}
4043
4044static ssize_t
4045ftrace_graph_write(struct file *file, const char __user *ubuf,
4046 size_t cnt, loff_t *ppos)
4047{
689fd8b6 4048 struct trace_parser parser;
6a10108b 4049 ssize_t read, ret = 0;
faf982a6 4050 struct ftrace_graph_data *fgd = file->private_data;
ea4e2bc4 4051
c7c6b1fe 4052 if (!cnt)
ea4e2bc4
SR
4053 return 0;
4054
6a10108b
NK
4055 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4056 return -ENOMEM;
ea4e2bc4 4057
689fd8b6 4058 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 4059
4ba7978e 4060 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 4061 parser.buffer[parser.idx] = 0;
4062
6a10108b
NK
4063 mutex_lock(&graph_lock);
4064
689fd8b6 4065 /* we allow only one expression at a time */
faf982a6
NK
4066 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4067 parser.buffer);
6a10108b
NK
4068
4069 mutex_unlock(&graph_lock);
ea4e2bc4 4070 }
ea4e2bc4 4071
6a10108b
NK
4072 if (!ret)
4073 ret = read;
1eb90f13 4074
689fd8b6 4075 trace_parser_put(&parser);
ea4e2bc4
SR
4076
4077 return ret;
4078}
4079
4080static const struct file_operations ftrace_graph_fops = {
87827111
LZ
4081 .open = ftrace_graph_open,
4082 .read = seq_read,
4083 .write = ftrace_graph_write,
098c879e 4084 .llseek = tracing_lseek,
87827111 4085 .release = ftrace_graph_release,
ea4e2bc4 4086};
29ad23b0
NK
4087
4088static const struct file_operations ftrace_graph_notrace_fops = {
4089 .open = ftrace_graph_notrace_open,
4090 .read = seq_read,
4091 .write = ftrace_graph_write,
098c879e 4092 .llseek = tracing_lseek,
29ad23b0
NK
4093 .release = ftrace_graph_release,
4094};
ea4e2bc4
SR
4095#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4096
591dffda
SRRH
4097void ftrace_create_filter_files(struct ftrace_ops *ops,
4098 struct dentry *parent)
4099{
4100
4101 trace_create_file("set_ftrace_filter", 0644, parent,
4102 ops, &ftrace_filter_fops);
4103
4104 trace_create_file("set_ftrace_notrace", 0644, parent,
4105 ops, &ftrace_notrace_fops);
4106}
4107
4108/*
4109 * The name "destroy_filter_files" is really a misnomer. Although
4110 * in the future, it may actualy delete the files, but this is
4111 * really intended to make sure the ops passed in are disabled
4112 * and that when this function returns, the caller is free to
4113 * free the ops.
4114 *
4115 * The "destroy" name is only to match the "create" name that this
4116 * should be paired with.
4117 */
4118void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4119{
4120 mutex_lock(&ftrace_lock);
4121 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4122 ftrace_shutdown(ops, 0);
4123 ops->flags |= FTRACE_OPS_FL_DELETED;
4124 mutex_unlock(&ftrace_lock);
4125}
4126
df4fc315 4127static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 4128{
5072c59f 4129
5452af66
FW
4130 trace_create_file("available_filter_functions", 0444,
4131 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 4132
647bcd03
SR
4133 trace_create_file("enabled_functions", 0444,
4134 d_tracer, NULL, &ftrace_enabled_fops);
4135
591dffda 4136 ftrace_create_filter_files(&global_ops, d_tracer);
ad90c0e3 4137
ea4e2bc4 4138#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 4139 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
4140 NULL,
4141 &ftrace_graph_fops);
29ad23b0
NK
4142 trace_create_file("set_graph_notrace", 0444, d_tracer,
4143 NULL,
4144 &ftrace_graph_notrace_fops);
ea4e2bc4
SR
4145#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4146
5072c59f
SR
4147 return 0;
4148}
4149
9fd49328 4150static int ftrace_cmp_ips(const void *a, const void *b)
68950619 4151{
9fd49328
SR
4152 const unsigned long *ipa = a;
4153 const unsigned long *ipb = b;
68950619 4154
9fd49328
SR
4155 if (*ipa > *ipb)
4156 return 1;
4157 if (*ipa < *ipb)
4158 return -1;
4159 return 0;
4160}
4161
4162static void ftrace_swap_ips(void *a, void *b, int size)
4163{
4164 unsigned long *ipa = a;
4165 unsigned long *ipb = b;
4166 unsigned long t;
4167
4168 t = *ipa;
4169 *ipa = *ipb;
4170 *ipb = t;
68950619
SR
4171}
4172
5cb084bb 4173static int ftrace_process_locs(struct module *mod,
31e88909 4174 unsigned long *start,
68bf21aa
SR
4175 unsigned long *end)
4176{
706c81f8 4177 struct ftrace_page *start_pg;
a7900875 4178 struct ftrace_page *pg;
706c81f8 4179 struct dyn_ftrace *rec;
a7900875 4180 unsigned long count;
68bf21aa
SR
4181 unsigned long *p;
4182 unsigned long addr;
4376cac6 4183 unsigned long flags = 0; /* Shut up gcc */
a7900875
SR
4184 int ret = -ENOMEM;
4185
4186 count = end - start;
4187
4188 if (!count)
4189 return 0;
4190
9fd49328
SR
4191 sort(start, count, sizeof(*start),
4192 ftrace_cmp_ips, ftrace_swap_ips);
4193
706c81f8
SR
4194 start_pg = ftrace_allocate_pages(count);
4195 if (!start_pg)
a7900875 4196 return -ENOMEM;
68bf21aa 4197
e6ea44e9 4198 mutex_lock(&ftrace_lock);
a7900875 4199
32082309
SR
4200 /*
4201 * Core and each module needs their own pages, as
4202 * modules will free them when they are removed.
4203 * Force a new page to be allocated for modules.
4204 */
a7900875
SR
4205 if (!mod) {
4206 WARN_ON(ftrace_pages || ftrace_pages_start);
4207 /* First initialization */
706c81f8 4208 ftrace_pages = ftrace_pages_start = start_pg;
a7900875 4209 } else {
32082309 4210 if (!ftrace_pages)
a7900875 4211 goto out;
32082309 4212
a7900875
SR
4213 if (WARN_ON(ftrace_pages->next)) {
4214 /* Hmm, we have free pages? */
4215 while (ftrace_pages->next)
4216 ftrace_pages = ftrace_pages->next;
32082309 4217 }
a7900875 4218
706c81f8 4219 ftrace_pages->next = start_pg;
32082309
SR
4220 }
4221
68bf21aa 4222 p = start;
706c81f8 4223 pg = start_pg;
68bf21aa
SR
4224 while (p < end) {
4225 addr = ftrace_call_adjust(*p++);
20e5227e
SR
4226 /*
4227 * Some architecture linkers will pad between
4228 * the different mcount_loc sections of different
4229 * object files to satisfy alignments.
4230 * Skip any NULL pointers.
4231 */
4232 if (!addr)
4233 continue;
706c81f8
SR
4234
4235 if (pg->index == pg->size) {
4236 /* We should have allocated enough */
4237 if (WARN_ON(!pg->next))
4238 break;
4239 pg = pg->next;
4240 }
4241
4242 rec = &pg->records[pg->index++];
4243 rec->ip = addr;
68bf21aa
SR
4244 }
4245
706c81f8
SR
4246 /* We should have used all pages */
4247 WARN_ON(pg->next);
4248
4249 /* Assign the last page to ftrace_pages */
4250 ftrace_pages = pg;
4251
a4f18ed1 4252 /*
4376cac6
SR
4253 * We only need to disable interrupts on start up
4254 * because we are modifying code that an interrupt
4255 * may execute, and the modification is not atomic.
4256 * But for modules, nothing runs the code we modify
4257 * until we are finished with it, and there's no
4258 * reason to cause large interrupt latencies while we do it.
a4f18ed1 4259 */
4376cac6
SR
4260 if (!mod)
4261 local_irq_save(flags);
1dc43cf0 4262 ftrace_update_code(mod, start_pg);
4376cac6
SR
4263 if (!mod)
4264 local_irq_restore(flags);
a7900875
SR
4265 ret = 0;
4266 out:
e6ea44e9 4267 mutex_unlock(&ftrace_lock);
68bf21aa 4268
a7900875 4269 return ret;
68bf21aa
SR
4270}
4271
93eb677d 4272#ifdef CONFIG_MODULES
32082309
SR
4273
4274#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4275
e7247a15 4276void ftrace_release_mod(struct module *mod)
93eb677d
SR
4277{
4278 struct dyn_ftrace *rec;
32082309 4279 struct ftrace_page **last_pg;
93eb677d 4280 struct ftrace_page *pg;
a7900875 4281 int order;
93eb677d 4282
45a4a237
SR
4283 mutex_lock(&ftrace_lock);
4284
e7247a15 4285 if (ftrace_disabled)
45a4a237 4286 goto out_unlock;
93eb677d 4287
32082309
SR
4288 /*
4289 * Each module has its own ftrace_pages, remove
4290 * them from the list.
4291 */
4292 last_pg = &ftrace_pages_start;
4293 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4294 rec = &pg->records[0];
e7247a15 4295 if (within_module_core(rec->ip, mod)) {
93eb677d 4296 /*
32082309
SR
4297 * As core pages are first, the first
4298 * page should never be a module page.
93eb677d 4299 */
32082309
SR
4300 if (WARN_ON(pg == ftrace_pages_start))
4301 goto out_unlock;
4302
4303 /* Check if we are deleting the last page */
4304 if (pg == ftrace_pages)
4305 ftrace_pages = next_to_ftrace_page(last_pg);
4306
4307 *last_pg = pg->next;
a7900875
SR
4308 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4309 free_pages((unsigned long)pg->records, order);
4310 kfree(pg);
32082309
SR
4311 } else
4312 last_pg = &pg->next;
4313 }
45a4a237 4314 out_unlock:
93eb677d
SR
4315 mutex_unlock(&ftrace_lock);
4316}
4317
4318static void ftrace_init_module(struct module *mod,
4319 unsigned long *start, unsigned long *end)
90d595fe 4320{
00fd61ae 4321 if (ftrace_disabled || start == end)
fed1939c 4322 return;
5cb084bb 4323 ftrace_process_locs(mod, start, end);
90d595fe
SR
4324}
4325
8c189ea6
SRRH
4326static int ftrace_module_notify_enter(struct notifier_block *self,
4327 unsigned long val, void *data)
93eb677d
SR
4328{
4329 struct module *mod = data;
4330
8c189ea6 4331 if (val == MODULE_STATE_COMING)
93eb677d
SR
4332 ftrace_init_module(mod, mod->ftrace_callsites,
4333 mod->ftrace_callsites +
4334 mod->num_ftrace_callsites);
8c189ea6
SRRH
4335 return 0;
4336}
4337
4338static int ftrace_module_notify_exit(struct notifier_block *self,
4339 unsigned long val, void *data)
4340{
4341 struct module *mod = data;
4342
4343 if (val == MODULE_STATE_GOING)
e7247a15 4344 ftrace_release_mod(mod);
93eb677d
SR
4345
4346 return 0;
4347}
4348#else
8c189ea6
SRRH
4349static int ftrace_module_notify_enter(struct notifier_block *self,
4350 unsigned long val, void *data)
4351{
4352 return 0;
4353}
4354static int ftrace_module_notify_exit(struct notifier_block *self,
4355 unsigned long val, void *data)
93eb677d
SR
4356{
4357 return 0;
4358}
4359#endif /* CONFIG_MODULES */
4360
8c189ea6
SRRH
4361struct notifier_block ftrace_module_enter_nb = {
4362 .notifier_call = ftrace_module_notify_enter,
c1bf08ac 4363 .priority = INT_MAX, /* Run before anything that can use kprobes */
93eb677d
SR
4364};
4365
8c189ea6
SRRH
4366struct notifier_block ftrace_module_exit_nb = {
4367 .notifier_call = ftrace_module_notify_exit,
4368 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4369};
4370
68bf21aa
SR
4371void __init ftrace_init(void)
4372{
1dc43cf0
JS
4373 extern unsigned long __start_mcount_loc[];
4374 extern unsigned long __stop_mcount_loc[];
68bf21aa
SR
4375 unsigned long count, addr, flags;
4376 int ret;
4377
4378 /* Keep the ftrace pointer to the stub */
4379 addr = (unsigned long)ftrace_stub;
4380
4381 local_irq_save(flags);
af64a7cb 4382 ret = ftrace_dyn_arch_init(&addr);
68bf21aa 4383 local_irq_restore(flags);
af64a7cb 4384 if (ret)
68bf21aa
SR
4385 goto failed;
4386
4387 count = __stop_mcount_loc - __start_mcount_loc;
c867ccd8
JS
4388 if (!count) {
4389 pr_info("ftrace: No functions to be traced?\n");
68bf21aa 4390 goto failed;
c867ccd8
JS
4391 }
4392
4393 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4394 count, count / ENTRIES_PER_PAGE + 1);
68bf21aa
SR
4395
4396 last_ftrace_enabled = ftrace_enabled = 1;
4397
5cb084bb 4398 ret = ftrace_process_locs(NULL,
31e88909 4399 __start_mcount_loc,
68bf21aa
SR
4400 __stop_mcount_loc);
4401
8c189ea6
SRRH
4402 ret = register_module_notifier(&ftrace_module_enter_nb);
4403 if (ret)
4404 pr_warning("Failed to register trace ftrace module enter notifier\n");
4405
4406 ret = register_module_notifier(&ftrace_module_exit_nb);
24ed0c4b 4407 if (ret)
8c189ea6 4408 pr_warning("Failed to register trace ftrace module exit notifier\n");
93eb677d 4409
2af15d6a
SR
4410 set_ftrace_early_filters();
4411
68bf21aa
SR
4412 return;
4413 failed:
4414 ftrace_disabled = 1;
4415}
68bf21aa 4416
3d083395 4417#else
0b6e4d56 4418
2b499381 4419static struct ftrace_ops global_ops = {
bd69c30b 4420 .func = ftrace_stub,
f04f24fb
MH
4421 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4422 INIT_REGEX_LOCK(global_ops)
bd69c30b
SR
4423};
4424
0b6e4d56
FW
4425static int __init ftrace_nodyn_init(void)
4426{
4427 ftrace_enabled = 1;
4428 return 0;
4429}
6f415672 4430core_initcall(ftrace_nodyn_init);
0b6e4d56 4431
df4fc315
SR
4432static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4433static inline void ftrace_startup_enable(int command) { }
5a45cfe1 4434/* Keep as macros so we do not need to define the commands */
8a56d776
SRRH
4435# define ftrace_startup(ops, command) \
4436 ({ \
4437 int ___ret = __register_ftrace_function(ops); \
4438 if (!___ret) \
4439 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4440 ___ret; \
3b6cfdb1 4441 })
1fcc1553
SRRH
4442# define ftrace_shutdown(ops, command) \
4443 ({ \
4444 int ___ret = __unregister_ftrace_function(ops); \
4445 if (!___ret) \
4446 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4447 ___ret; \
4448 })
8a56d776 4449
c7aafc54
IM
4450# define ftrace_startup_sysctl() do { } while (0)
4451# define ftrace_shutdown_sysctl() do { } while (0)
b848914c
SR
4452
4453static inline int
195a8afc 4454ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c
SR
4455{
4456 return 1;
4457}
4458
3d083395
SR
4459#endif /* CONFIG_DYNAMIC_FTRACE */
4460
e248491a 4461static void
2f5f6ad9 4462ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 4463 struct ftrace_ops *op, struct pt_regs *regs)
e248491a 4464{
e248491a
JO
4465 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4466 return;
4467
4468 /*
4469 * Some of the ops may be dynamically allocated,
4470 * they must be freed after a synchronize_sched().
4471 */
4472 preempt_disable_notrace();
4473 trace_recursion_set(TRACE_CONTROL_BIT);
b5aa3a47
SRRH
4474
4475 /*
4476 * Control funcs (perf) uses RCU. Only trace if
4477 * RCU is currently active.
4478 */
4479 if (!rcu_is_watching())
4480 goto out;
4481
0a016409 4482 do_for_each_ftrace_op(op, ftrace_control_list) {
395b97a3
SRRH
4483 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4484 !ftrace_function_local_disabled(op) &&
195a8afc 4485 ftrace_ops_test(op, ip, regs))
a1e2e31d 4486 op->func(ip, parent_ip, op, regs);
0a016409 4487 } while_for_each_ftrace_op(op);
b5aa3a47 4488 out:
e248491a
JO
4489 trace_recursion_clear(TRACE_CONTROL_BIT);
4490 preempt_enable_notrace();
4491}
4492
4493static struct ftrace_ops control_ops = {
f04f24fb
MH
4494 .func = ftrace_ops_control_func,
4495 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4496 INIT_REGEX_LOCK(control_ops)
e248491a
JO
4497};
4498
2f5f6ad9
SR
4499static inline void
4500__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 4501 struct ftrace_ops *ignored, struct pt_regs *regs)
b848914c 4502{
cdbe61bf 4503 struct ftrace_ops *op;
edc15caf 4504 int bit;
b848914c 4505
ccf3672d
SR
4506 if (function_trace_stop)
4507 return;
4508
edc15caf
SR
4509 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4510 if (bit < 0)
4511 return;
b1cff0ad 4512
cdbe61bf
SR
4513 /*
4514 * Some of the ops may be dynamically allocated,
4515 * they must be freed after a synchronize_sched().
4516 */
4517 preempt_disable_notrace();
0a016409 4518 do_for_each_ftrace_op(op, ftrace_ops_list) {
195a8afc 4519 if (ftrace_ops_test(op, ip, regs))
a1e2e31d 4520 op->func(ip, parent_ip, op, regs);
0a016409 4521 } while_for_each_ftrace_op(op);
cdbe61bf 4522 preempt_enable_notrace();
edc15caf 4523 trace_clear_recursion(bit);
b848914c
SR
4524}
4525
2f5f6ad9
SR
4526/*
4527 * Some archs only support passing ip and parent_ip. Even though
4528 * the list function ignores the op parameter, we do not want any
4529 * C side effects, where a function is called without the caller
4530 * sending a third parameter.
a1e2e31d
SR
4531 * Archs are to support both the regs and ftrace_ops at the same time.
4532 * If they support ftrace_ops, it is assumed they support regs.
4533 * If call backs want to use regs, they must either check for regs
06aeaaea
MH
4534 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4535 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
a1e2e31d
SR
4536 * An architecture can pass partial regs with ftrace_ops and still
4537 * set the ARCH_SUPPORT_FTARCE_OPS.
2f5f6ad9
SR
4538 */
4539#if ARCH_SUPPORTS_FTRACE_OPS
4540static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 4541 struct ftrace_ops *op, struct pt_regs *regs)
2f5f6ad9 4542{
a1e2e31d 4543 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
2f5f6ad9
SR
4544}
4545#else
4546static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4547{
a1e2e31d 4548 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2f5f6ad9
SR
4549}
4550#endif
4551
e32d8956 4552static void clear_ftrace_swapper(void)
978f3a45
SR
4553{
4554 struct task_struct *p;
e32d8956 4555 int cpu;
978f3a45 4556
e32d8956
SR
4557 get_online_cpus();
4558 for_each_online_cpu(cpu) {
4559 p = idle_task(cpu);
978f3a45 4560 clear_tsk_trace_trace(p);
e32d8956
SR
4561 }
4562 put_online_cpus();
4563}
978f3a45 4564
e32d8956
SR
4565static void set_ftrace_swapper(void)
4566{
4567 struct task_struct *p;
4568 int cpu;
4569
4570 get_online_cpus();
4571 for_each_online_cpu(cpu) {
4572 p = idle_task(cpu);
4573 set_tsk_trace_trace(p);
4574 }
4575 put_online_cpus();
978f3a45
SR
4576}
4577
e32d8956
SR
4578static void clear_ftrace_pid(struct pid *pid)
4579{
4580 struct task_struct *p;
4581
229c4ef8 4582 rcu_read_lock();
e32d8956
SR
4583 do_each_pid_task(pid, PIDTYPE_PID, p) {
4584 clear_tsk_trace_trace(p);
4585 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
4586 rcu_read_unlock();
4587
e32d8956
SR
4588 put_pid(pid);
4589}
4590
4591static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
4592{
4593 struct task_struct *p;
4594
229c4ef8 4595 rcu_read_lock();
978f3a45
SR
4596 do_each_pid_task(pid, PIDTYPE_PID, p) {
4597 set_tsk_trace_trace(p);
4598 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 4599 rcu_read_unlock();
978f3a45
SR
4600}
4601
756d17ee 4602static void clear_ftrace_pid_task(struct pid *pid)
e32d8956 4603{
756d17ee 4604 if (pid == ftrace_swapper_pid)
e32d8956
SR
4605 clear_ftrace_swapper();
4606 else
756d17ee 4607 clear_ftrace_pid(pid);
e32d8956
SR
4608}
4609
4610static void set_ftrace_pid_task(struct pid *pid)
4611{
4612 if (pid == ftrace_swapper_pid)
4613 set_ftrace_swapper();
4614 else
4615 set_ftrace_pid(pid);
4616}
4617
756d17ee 4618static int ftrace_pid_add(int p)
df4fc315 4619{
978f3a45 4620 struct pid *pid;
756d17ee 4621 struct ftrace_pid *fpid;
4622 int ret = -EINVAL;
df4fc315 4623
756d17ee 4624 mutex_lock(&ftrace_lock);
df4fc315 4625
756d17ee 4626 if (!p)
4627 pid = ftrace_swapper_pid;
4628 else
4629 pid = find_get_pid(p);
df4fc315 4630
756d17ee 4631 if (!pid)
4632 goto out;
df4fc315 4633
756d17ee 4634 ret = 0;
df4fc315 4635
756d17ee 4636 list_for_each_entry(fpid, &ftrace_pids, list)
4637 if (fpid->pid == pid)
4638 goto out_put;
978f3a45 4639
756d17ee 4640 ret = -ENOMEM;
df4fc315 4641
756d17ee 4642 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4643 if (!fpid)
4644 goto out_put;
df4fc315 4645
756d17ee 4646 list_add(&fpid->list, &ftrace_pids);
4647 fpid->pid = pid;
0ef8cde5 4648
756d17ee 4649 set_ftrace_pid_task(pid);
978f3a45 4650
756d17ee 4651 ftrace_update_pid_func();
4652 ftrace_startup_enable(0);
4653
4654 mutex_unlock(&ftrace_lock);
4655 return 0;
4656
4657out_put:
4658 if (pid != ftrace_swapper_pid)
4659 put_pid(pid);
978f3a45 4660
756d17ee 4661out:
4662 mutex_unlock(&ftrace_lock);
4663 return ret;
4664}
4665
4666static void ftrace_pid_reset(void)
4667{
4668 struct ftrace_pid *fpid, *safe;
978f3a45 4669
756d17ee 4670 mutex_lock(&ftrace_lock);
4671 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4672 struct pid *pid = fpid->pid;
4673
4674 clear_ftrace_pid_task(pid);
4675
4676 list_del(&fpid->list);
4677 kfree(fpid);
df4fc315
SR
4678 }
4679
df4fc315
SR
4680 ftrace_update_pid_func();
4681 ftrace_startup_enable(0);
4682
e6ea44e9 4683 mutex_unlock(&ftrace_lock);
756d17ee 4684}
df4fc315 4685
756d17ee 4686static void *fpid_start(struct seq_file *m, loff_t *pos)
4687{
4688 mutex_lock(&ftrace_lock);
4689
4690 if (list_empty(&ftrace_pids) && (!*pos))
4691 return (void *) 1;
4692
4693 return seq_list_start(&ftrace_pids, *pos);
4694}
4695
4696static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4697{
4698 if (v == (void *)1)
4699 return NULL;
4700
4701 return seq_list_next(v, &ftrace_pids, pos);
4702}
4703
4704static void fpid_stop(struct seq_file *m, void *p)
4705{
4706 mutex_unlock(&ftrace_lock);
4707}
4708
4709static int fpid_show(struct seq_file *m, void *v)
4710{
4711 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4712
4713 if (v == (void *)1) {
4714 seq_printf(m, "no pid\n");
4715 return 0;
4716 }
4717
4718 if (fpid->pid == ftrace_swapper_pid)
4719 seq_printf(m, "swapper tasks\n");
4720 else
4721 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4722
4723 return 0;
4724}
4725
4726static const struct seq_operations ftrace_pid_sops = {
4727 .start = fpid_start,
4728 .next = fpid_next,
4729 .stop = fpid_stop,
4730 .show = fpid_show,
4731};
4732
4733static int
4734ftrace_pid_open(struct inode *inode, struct file *file)
4735{
4736 int ret = 0;
4737
4738 if ((file->f_mode & FMODE_WRITE) &&
4739 (file->f_flags & O_TRUNC))
4740 ftrace_pid_reset();
4741
4742 if (file->f_mode & FMODE_READ)
4743 ret = seq_open(file, &ftrace_pid_sops);
4744
4745 return ret;
4746}
4747
df4fc315
SR
4748static ssize_t
4749ftrace_pid_write(struct file *filp, const char __user *ubuf,
4750 size_t cnt, loff_t *ppos)
4751{
457dc928 4752 char buf[64], *tmp;
df4fc315
SR
4753 long val;
4754 int ret;
4755
4756 if (cnt >= sizeof(buf))
4757 return -EINVAL;
4758
4759 if (copy_from_user(&buf, ubuf, cnt))
4760 return -EFAULT;
4761
4762 buf[cnt] = 0;
4763
756d17ee 4764 /*
4765 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4766 * to clean the filter quietly.
4767 */
457dc928
IM
4768 tmp = strstrip(buf);
4769 if (strlen(tmp) == 0)
756d17ee 4770 return 1;
4771
bcd83ea6 4772 ret = kstrtol(tmp, 10, &val);
df4fc315
SR
4773 if (ret < 0)
4774 return ret;
4775
756d17ee 4776 ret = ftrace_pid_add(val);
df4fc315 4777
756d17ee 4778 return ret ? ret : cnt;
4779}
df4fc315 4780
756d17ee 4781static int
4782ftrace_pid_release(struct inode *inode, struct file *file)
4783{
4784 if (file->f_mode & FMODE_READ)
4785 seq_release(inode, file);
df4fc315 4786
756d17ee 4787 return 0;
df4fc315
SR
4788}
4789
5e2336a0 4790static const struct file_operations ftrace_pid_fops = {
756d17ee 4791 .open = ftrace_pid_open,
4792 .write = ftrace_pid_write,
4793 .read = seq_read,
098c879e 4794 .llseek = tracing_lseek,
756d17ee 4795 .release = ftrace_pid_release,
df4fc315
SR
4796};
4797
4798static __init int ftrace_init_debugfs(void)
4799{
4800 struct dentry *d_tracer;
df4fc315
SR
4801
4802 d_tracer = tracing_init_dentry();
4803 if (!d_tracer)
4804 return 0;
4805
4806 ftrace_init_dyn_debugfs(d_tracer);
4807
5452af66
FW
4808 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4809 NULL, &ftrace_pid_fops);
493762fc
SR
4810
4811 ftrace_profile_debugfs(d_tracer);
4812
df4fc315
SR
4813 return 0;
4814}
df4fc315
SR
4815fs_initcall(ftrace_init_debugfs);
4816
a2bb6a3d 4817/**
81adbdc0 4818 * ftrace_kill - kill ftrace
a2bb6a3d
SR
4819 *
4820 * This function should be used by panic code. It stops ftrace
4821 * but in a not so nice way. If you need to simply kill ftrace
4822 * from a non-atomic section, use ftrace_kill.
4823 */
81adbdc0 4824void ftrace_kill(void)
a2bb6a3d
SR
4825{
4826 ftrace_disabled = 1;
4827 ftrace_enabled = 0;
a2bb6a3d
SR
4828 clear_ftrace_function();
4829}
4830
e0a413f6
SR
4831/**
4832 * Test if ftrace is dead or not.
4833 */
4834int ftrace_is_dead(void)
4835{
4836 return ftrace_disabled;
4837}
4838
16444a8a 4839/**
3d083395
SR
4840 * register_ftrace_function - register a function for profiling
4841 * @ops - ops structure that holds the function for profiling.
16444a8a 4842 *
3d083395
SR
4843 * Register a function to be called by all functions in the
4844 * kernel.
4845 *
4846 * Note: @ops->func and all the functions it calls must be labeled
4847 * with "notrace", otherwise it will go into a
4848 * recursive loop.
16444a8a 4849 */
3d083395 4850int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 4851{
45a4a237 4852 int ret = -1;
4eebcc81 4853
f04f24fb
MH
4854 ftrace_ops_init(ops);
4855
e6ea44e9 4856 mutex_lock(&ftrace_lock);
e7d3737e 4857
8a56d776 4858 ret = ftrace_startup(ops, 0);
b848914c 4859
e6ea44e9 4860 mutex_unlock(&ftrace_lock);
8d240dd8 4861
b0fc494f 4862 return ret;
3d083395 4863}
cdbe61bf 4864EXPORT_SYMBOL_GPL(register_ftrace_function);
3d083395
SR
4865
4866/**
32632920 4867 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
4868 * @ops - ops structure that holds the function to unregister
4869 *
4870 * Unregister a function that was added to be called by ftrace profiling.
4871 */
4872int unregister_ftrace_function(struct ftrace_ops *ops)
4873{
4874 int ret;
4875
e6ea44e9 4876 mutex_lock(&ftrace_lock);
8a56d776 4877 ret = ftrace_shutdown(ops, 0);
e6ea44e9 4878 mutex_unlock(&ftrace_lock);
b0fc494f
SR
4879
4880 return ret;
4881}
cdbe61bf 4882EXPORT_SYMBOL_GPL(unregister_ftrace_function);
b0fc494f 4883
e309b41d 4884int
b0fc494f 4885ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 4886 void __user *buffer, size_t *lenp,
b0fc494f
SR
4887 loff_t *ppos)
4888{
45a4a237 4889 int ret = -ENODEV;
4eebcc81 4890
e6ea44e9 4891 mutex_lock(&ftrace_lock);
b0fc494f 4892
45a4a237
SR
4893 if (unlikely(ftrace_disabled))
4894 goto out;
4895
4896 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 4897
a32c7765 4898 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
4899 goto out;
4900
a32c7765 4901 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
4902
4903 if (ftrace_enabled) {
4904
4905 ftrace_startup_sysctl();
4906
4907 /* we are starting ftrace again */
5000c418
JK
4908 if (ftrace_ops_list != &ftrace_list_end)
4909 update_ftrace_function();
b0fc494f
SR
4910
4911 } else {
4912 /* stopping ftrace calls (just send to ftrace_stub) */
4913 ftrace_trace_function = ftrace_stub;
4914
4915 ftrace_shutdown_sysctl();
4916 }
4917
4918 out:
e6ea44e9 4919 mutex_unlock(&ftrace_lock);
3d083395 4920 return ret;
16444a8a 4921}
f17845e5 4922
fb52607a 4923#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 4924
597af815 4925static int ftrace_graph_active;
4a2b8dda 4926static struct notifier_block ftrace_suspend_notifier;
e7d3737e 4927
e49dc19c
SR
4928int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4929{
4930 return 0;
4931}
4932
287b6e68
FW
4933/* The callbacks that hook a function */
4934trace_func_graph_ret_t ftrace_graph_return =
4935 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 4936trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
23a8e844 4937static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
4938
4939/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4940static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4941{
4942 int i;
4943 int ret = 0;
4944 unsigned long flags;
4945 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4946 struct task_struct *g, *t;
4947
4948 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4949 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4950 * sizeof(struct ftrace_ret_stack),
4951 GFP_KERNEL);
4952 if (!ret_stack_list[i]) {
4953 start = 0;
4954 end = i;
4955 ret = -ENOMEM;
4956 goto free;
4957 }
4958 }
4959
4960 read_lock_irqsave(&tasklist_lock, flags);
4961 do_each_thread(g, t) {
4962 if (start == end) {
4963 ret = -EAGAIN;
4964 goto unlock;
4965 }
4966
4967 if (t->ret_stack == NULL) {
380c4b14 4968 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 4969 atomic_set(&t->trace_overrun, 0);
26c01624
SR
4970 t->curr_ret_stack = -1;
4971 /* Make sure the tasks see the -1 first: */
4972 smp_wmb();
4973 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
4974 }
4975 } while_each_thread(g, t);
4976
4977unlock:
4978 read_unlock_irqrestore(&tasklist_lock, flags);
4979free:
4980 for (i = start; i < end; i++)
4981 kfree(ret_stack_list[i]);
4982 return ret;
4983}
4984
8aef2d28 4985static void
38516ab5
SR
4986ftrace_graph_probe_sched_switch(void *ignore,
4987 struct task_struct *prev, struct task_struct *next)
8aef2d28
SR
4988{
4989 unsigned long long timestamp;
4990 int index;
4991
be6f164a
SR
4992 /*
4993 * Does the user want to count the time a function was asleep.
4994 * If so, do not update the time stamps.
4995 */
4996 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4997 return;
4998
8aef2d28
SR
4999 timestamp = trace_clock_local();
5000
5001 prev->ftrace_timestamp = timestamp;
5002
5003 /* only process tasks that we timestamped */
5004 if (!next->ftrace_timestamp)
5005 return;
5006
5007 /*
5008 * Update all the counters in next to make up for the
5009 * time next was sleeping.
5010 */
5011 timestamp -= next->ftrace_timestamp;
5012
5013 for (index = next->curr_ret_stack; index >= 0; index--)
5014 next->ret_stack[index].calltime += timestamp;
5015}
5016
f201ae23 5017/* Allocate a return stack for each task */
fb52607a 5018static int start_graph_tracing(void)
f201ae23
FW
5019{
5020 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 5021 int ret, cpu;
f201ae23
FW
5022
5023 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5024 sizeof(struct ftrace_ret_stack *),
5025 GFP_KERNEL);
5026
5027 if (!ret_stack_list)
5028 return -ENOMEM;
5029
5b058bcd 5030 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
5031 for_each_online_cpu(cpu) {
5032 if (!idle_task(cpu)->ret_stack)
868baf07 5033 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
179c498a 5034 }
5b058bcd 5035
f201ae23
FW
5036 do {
5037 ret = alloc_retstack_tasklist(ret_stack_list);
5038 } while (ret == -EAGAIN);
5039
8aef2d28 5040 if (!ret) {
38516ab5 5041 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
8aef2d28
SR
5042 if (ret)
5043 pr_info("ftrace_graph: Couldn't activate tracepoint"
5044 " probe to kernel_sched_switch\n");
5045 }
5046
f201ae23
FW
5047 kfree(ret_stack_list);
5048 return ret;
5049}
5050
4a2b8dda
FW
5051/*
5052 * Hibernation protection.
5053 * The state of the current task is too much unstable during
5054 * suspend/restore to disk. We want to protect against that.
5055 */
5056static int
5057ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5058 void *unused)
5059{
5060 switch (state) {
5061 case PM_HIBERNATION_PREPARE:
5062 pause_graph_tracing();
5063 break;
5064
5065 case PM_POST_HIBERNATION:
5066 unpause_graph_tracing();
5067 break;
5068 }
5069 return NOTIFY_DONE;
5070}
5071
8a56d776
SRRH
5072/* Just a place holder for function graph */
5073static struct ftrace_ops fgraph_ops __read_mostly = {
5074 .func = ftrace_stub,
5075 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5076 FTRACE_OPS_FL_RECURSION_SAFE,
5077};
5078
23a8e844
SRRH
5079static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5080{
5081 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5082 return 0;
5083 return __ftrace_graph_entry(trace);
5084}
5085
5086/*
5087 * The function graph tracer should only trace the functions defined
5088 * by set_ftrace_filter and set_ftrace_notrace. If another function
5089 * tracer ops is registered, the graph tracer requires testing the
5090 * function against the global ops, and not just trace any function
5091 * that any ftrace_ops registered.
5092 */
5093static void update_function_graph_func(void)
5094{
5095 if (ftrace_ops_list == &ftrace_list_end ||
5096 (ftrace_ops_list == &global_ops &&
5097 global_ops.next == &ftrace_list_end))
5098 ftrace_graph_entry = __ftrace_graph_entry;
5099 else
5100 ftrace_graph_entry = ftrace_graph_entry_test;
5101}
5102
287b6e68
FW
5103int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5104 trace_func_graph_ent_t entryfunc)
15e6cb36 5105{
e7d3737e
FW
5106 int ret = 0;
5107
e6ea44e9 5108 mutex_lock(&ftrace_lock);
e7d3737e 5109
05ce5818 5110 /* we currently allow only one tracer registered at a time */
597af815 5111 if (ftrace_graph_active) {
05ce5818
SR
5112 ret = -EBUSY;
5113 goto out;
5114 }
5115
4a2b8dda
FW
5116 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5117 register_pm_notifier(&ftrace_suspend_notifier);
5118
597af815 5119 ftrace_graph_active++;
fb52607a 5120 ret = start_graph_tracing();
f201ae23 5121 if (ret) {
597af815 5122 ftrace_graph_active--;
f201ae23
FW
5123 goto out;
5124 }
e53a6319 5125
287b6e68 5126 ftrace_graph_return = retfunc;
23a8e844
SRRH
5127
5128 /*
5129 * Update the indirect function to the entryfunc, and the
5130 * function that gets called to the entry_test first. Then
5131 * call the update fgraph entry function to determine if
5132 * the entryfunc should be called directly or not.
5133 */
5134 __ftrace_graph_entry = entryfunc;
5135 ftrace_graph_entry = ftrace_graph_entry_test;
5136 update_function_graph_func();
e53a6319 5137
8a56d776 5138 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
e7d3737e
FW
5139
5140out:
e6ea44e9 5141 mutex_unlock(&ftrace_lock);
e7d3737e 5142 return ret;
15e6cb36
FW
5143}
5144
fb52607a 5145void unregister_ftrace_graph(void)
15e6cb36 5146{
e6ea44e9 5147 mutex_lock(&ftrace_lock);
e7d3737e 5148
597af815 5149 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
5150 goto out;
5151
597af815 5152 ftrace_graph_active--;
287b6e68 5153 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 5154 ftrace_graph_entry = ftrace_graph_entry_stub;
23a8e844 5155 __ftrace_graph_entry = ftrace_graph_entry_stub;
8a56d776 5156 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
4a2b8dda 5157 unregister_pm_notifier(&ftrace_suspend_notifier);
38516ab5 5158 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
e7d3737e 5159
2aad1b76 5160 out:
e6ea44e9 5161 mutex_unlock(&ftrace_lock);
15e6cb36 5162}
f201ae23 5163
868baf07
SR
5164static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5165
5166static void
5167graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5168{
5169 atomic_set(&t->tracing_graph_pause, 0);
5170 atomic_set(&t->trace_overrun, 0);
5171 t->ftrace_timestamp = 0;
25985edc 5172 /* make curr_ret_stack visible before we add the ret_stack */
868baf07
SR
5173 smp_wmb();
5174 t->ret_stack = ret_stack;
5175}
5176
5177/*
5178 * Allocate a return stack for the idle task. May be the first
5179 * time through, or it may be done by CPU hotplug online.
5180 */
5181void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5182{
5183 t->curr_ret_stack = -1;
5184 /*
5185 * The idle task has no parent, it either has its own
5186 * stack or no stack at all.
5187 */
5188 if (t->ret_stack)
5189 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5190
5191 if (ftrace_graph_active) {
5192 struct ftrace_ret_stack *ret_stack;
5193
5194 ret_stack = per_cpu(idle_ret_stack, cpu);
5195 if (!ret_stack) {
5196 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5197 * sizeof(struct ftrace_ret_stack),
5198 GFP_KERNEL);
5199 if (!ret_stack)
5200 return;
5201 per_cpu(idle_ret_stack, cpu) = ret_stack;
5202 }
5203 graph_init_task(t, ret_stack);
5204 }
5205}
5206
f201ae23 5207/* Allocate a return stack for newly created task */
fb52607a 5208void ftrace_graph_init_task(struct task_struct *t)
f201ae23 5209{
84047e36
SR
5210 /* Make sure we do not use the parent ret_stack */
5211 t->ret_stack = NULL;
ea14eb71 5212 t->curr_ret_stack = -1;
84047e36 5213
597af815 5214 if (ftrace_graph_active) {
82310a32
SR
5215 struct ftrace_ret_stack *ret_stack;
5216
5217 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
5218 * sizeof(struct ftrace_ret_stack),
5219 GFP_KERNEL);
82310a32 5220 if (!ret_stack)
f201ae23 5221 return;
868baf07 5222 graph_init_task(t, ret_stack);
84047e36 5223 }
f201ae23
FW
5224}
5225
fb52607a 5226void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 5227{
eae849ca
FW
5228 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5229
f201ae23 5230 t->ret_stack = NULL;
eae849ca
FW
5231 /* NULL must become visible to IRQs before we free it: */
5232 barrier();
5233
5234 kfree(ret_stack);
f201ae23 5235}
14a866c5
SR
5236
5237void ftrace_graph_stop(void)
5238{
5239 ftrace_stop();
5240}
15e6cb36 5241#endif