]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/ftrace.c
Linux 3.18-rc1
[mirror_ubuntu-artful-kernel.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 13 * Copyright (C) 2004 Nadia Yvette Chambers
16444a8a
ACM
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
5855fead 25#include <linux/bsearch.h>
56d82e00 26#include <linux/module.h>
2d8b820b 27#include <linux/ftrace.h>
b0fc494f 28#include <linux/sysctl.h>
5a0e3ad6 29#include <linux/slab.h>
5072c59f 30#include <linux/ctype.h>
68950619 31#include <linux/sort.h>
3d083395 32#include <linux/list.h>
59df055f 33#include <linux/hash.h>
3f379b03 34#include <linux/rcupdate.h>
3d083395 35
ad8d75ff 36#include <trace/events/sched.h>
8aef2d28 37
2af15d6a 38#include <asm/setup.h>
395a59d0 39
0706f1c4 40#include "trace_output.h"
bac429f0 41#include "trace_stat.h"
16444a8a 42
6912896e 43#define FTRACE_WARN_ON(cond) \
0778d9ad
SR
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
6912896e 47 ftrace_kill(); \
0778d9ad
SR
48 ___r; \
49 })
6912896e
SR
50
51#define FTRACE_WARN_ON_ONCE(cond) \
0778d9ad
SR
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
6912896e 55 ftrace_kill(); \
0778d9ad
SR
56 ___r; \
57 })
6912896e 58
8fc0c701
SR
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
33dc9b12
SR
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
8fc0c701 64
4104d326 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
e248491a 66
f04f24fb 67#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
68#define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
5f151b24
SRRH
71#define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
f04f24fb 74#else
33b7f99c 75#define INIT_OPS_HASH(opsname)
5f151b24 76#define ASSIGN_OPS_HASH(opsname, val)
f04f24fb
MH
77#endif
78
2f5f6ad9
SR
79static struct ftrace_ops ftrace_list_end __read_mostly = {
80 .func = ftrace_stub,
395b97a3 81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
33b7f99c 82 INIT_OPS_HASH(ftrace_list_end)
2f5f6ad9
SR
83};
84
4eebcc81
SR
85/* ftrace_enabled is a method to turn ftrace on or off */
86int ftrace_enabled __read_mostly;
d61f82d0 87static int last_ftrace_enabled;
b0fc494f 88
2f5f6ad9
SR
89/* Current function tracing op */
90struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
405e1d83
SRRH
91/* What to set function_trace_op to */
92static struct ftrace_ops *set_function_trace_op;
60a7ecf4 93
756d17ee 94/* List for set_ftrace_pid's pids. */
95LIST_HEAD(ftrace_pids);
96struct ftrace_pid {
97 struct list_head list;
98 struct pid *pid;
99};
100
4eebcc81
SR
101/*
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
104 */
105static int ftrace_disabled __read_mostly;
106
52baf119 107static DEFINE_MUTEX(ftrace_lock);
b0fc494f 108
e248491a 109static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
b848914c 110static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
16444a8a 111ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 112ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
2b499381 113static struct ftrace_ops global_ops;
e248491a 114static struct ftrace_ops control_ops;
16444a8a 115
f1ff6348
SRRH
116static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 struct ftrace_ops *op, struct pt_regs *regs);
118
2f5f6ad9
SR
119#if ARCH_SUPPORTS_FTRACE_OPS
120static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 121 struct ftrace_ops *op, struct pt_regs *regs);
2f5f6ad9
SR
122#else
123/* See comment below, where ftrace_ops_list_func is defined */
124static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
125#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
126#endif
b848914c 127
0a016409
SR
128/*
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
1bb539ca 130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
0a016409 131 * are simply leaked, so there is no need to interact with a grace-period
1bb539ca 132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
0a016409
SR
133 * concurrent insertions into the ftrace_global_list.
134 *
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
136 */
137#define do_for_each_ftrace_op(op, list) \
1bb539ca 138 op = rcu_dereference_raw_notrace(list); \
0a016409
SR
139 do
140
141/*
142 * Optimized for just a single item in the list (as that is the normal case).
143 */
144#define while_for_each_ftrace_op(op) \
1bb539ca 145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
0a016409
SR
146 unlikely((op) != &ftrace_list_end))
147
f04f24fb
MH
148static inline void ftrace_ops_init(struct ftrace_ops *ops)
149{
150#ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
33b7f99c
SRRH
152 mutex_init(&ops->local_hash.regex_lock);
153 ops->func_hash = &ops->local_hash;
f04f24fb
MH
154 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
155 }
156#endif
157}
158
ea701f11
SR
159/**
160 * ftrace_nr_registered_ops - return number of ops registered
161 *
162 * Returns the number of ftrace_ops registered and tracing functions
163 */
164int ftrace_nr_registered_ops(void)
165{
166 struct ftrace_ops *ops;
167 int cnt = 0;
168
169 mutex_lock(&ftrace_lock);
170
171 for (ops = ftrace_ops_list;
172 ops != &ftrace_list_end; ops = ops->next)
173 cnt++;
174
175 mutex_unlock(&ftrace_lock);
176
177 return cnt;
178}
179
2f5f6ad9 180static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 181 struct ftrace_ops *op, struct pt_regs *regs)
df4fc315 182{
0ef8cde5 183 if (!test_tsk_trace_trace(current))
df4fc315
SR
184 return;
185
a1e2e31d 186 ftrace_pid_function(ip, parent_ip, op, regs);
df4fc315
SR
187}
188
189static void set_ftrace_pid_function(ftrace_func_t func)
190{
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194}
195
16444a8a 196/**
3d083395 197 * clear_ftrace_function - reset the ftrace function
16444a8a 198 *
3d083395
SR
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
16444a8a 201 */
3d083395 202void clear_ftrace_function(void)
16444a8a 203{
3d083395 204 ftrace_trace_function = ftrace_stub;
df4fc315 205 ftrace_pid_function = ftrace_stub;
3d083395
SR
206}
207
e248491a
JO
208static void control_ops_disable_all(struct ftrace_ops *ops)
209{
210 int cpu;
211
212 for_each_possible_cpu(cpu)
213 *per_cpu_ptr(ops->disabled, cpu) = 1;
214}
215
216static int control_ops_alloc(struct ftrace_ops *ops)
217{
218 int __percpu *disabled;
219
220 disabled = alloc_percpu(int);
221 if (!disabled)
222 return -ENOMEM;
223
224 ops->disabled = disabled;
225 control_ops_disable_all(ops);
226 return 0;
227}
228
405e1d83
SRRH
229static void ftrace_sync(struct work_struct *work)
230{
231 /*
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
235 *
236 * Yes, function tracing is rude.
237 */
238}
239
240static void ftrace_sync_ipi(void *data)
241{
242 /* Probably not needed, but do it anyway */
243 smp_rmb();
244}
245
23a8e844
SRRH
246#ifdef CONFIG_FUNCTION_GRAPH_TRACER
247static void update_function_graph_func(void);
248#else
249static inline void update_function_graph_func(void) { }
250#endif
251
2b499381
SR
252static void update_ftrace_function(void)
253{
254 ftrace_func_t func;
255
f7aad4e1
SRRH
256 /*
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
260 */
261 set_function_trace_op = ftrace_ops_list;
262
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list == &ftrace_list_end) {
265 func = ftrace_stub;
266
cdbe61bf
SR
267 /*
268 * If we are at the end of the list and this ops is
4740974a
SR
269 * recursion safe and not dynamic and the arch supports passing ops,
270 * then have the mcount trampoline call the function directly.
cdbe61bf 271 */
f7aad4e1 272 } else if (ftrace_ops_list->next == &ftrace_list_end) {
87354059 273 func = ftrace_ops_get_func(ftrace_ops_list);
f7aad4e1 274
2f5f6ad9
SR
275 } else {
276 /* Just use the default ftrace_ops */
405e1d83 277 set_function_trace_op = &ftrace_list_end;
b848914c 278 func = ftrace_ops_list_func;
2f5f6ad9 279 }
2b499381 280
5f8bf2d2
SRRH
281 update_function_graph_func();
282
405e1d83
SRRH
283 /* If there's no change, then do nothing more here */
284 if (ftrace_trace_function == func)
285 return;
286
287 /*
288 * If we are using the list function, it doesn't care
289 * about the function_trace_ops.
290 */
291 if (func == ftrace_ops_list_func) {
292 ftrace_trace_function = func;
293 /*
294 * Don't even bother setting function_trace_ops,
295 * it would be racy to do so anyway.
296 */
297 return;
298 }
299
300#ifndef CONFIG_DYNAMIC_FTRACE
301 /*
302 * For static tracing, we need to be a bit more careful.
303 * The function change takes affect immediately. Thus,
304 * we need to coorditate the setting of the function_trace_ops
305 * with the setting of the ftrace_trace_function.
306 *
307 * Set the function to the list ops, which will call the
308 * function we want, albeit indirectly, but it handles the
309 * ftrace_ops and doesn't depend on function_trace_op.
310 */
311 ftrace_trace_function = ftrace_ops_list_func;
312 /*
313 * Make sure all CPUs see this. Yes this is slow, but static
314 * tracing is slow and nasty to have enabled.
315 */
316 schedule_on_each_cpu(ftrace_sync);
317 /* Now all cpus are using the list ops. */
318 function_trace_op = set_function_trace_op;
319 /* Make sure the function_trace_op is visible on all CPUs */
320 smp_wmb();
321 /* Nasty way to force a rmb on all cpus */
322 smp_call_function(ftrace_sync_ipi, NULL, 1);
323 /* OK, we are all set to update the ftrace_trace_function now! */
324#endif /* !CONFIG_DYNAMIC_FTRACE */
325
491d0dcf 326 ftrace_trace_function = func;
491d0dcf
SR
327}
328
7eea4fce
JW
329int using_ftrace_ops_list_func(void)
330{
331 return ftrace_trace_function == ftrace_ops_list_func;
332}
333
2b499381 334static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
3d083395 335{
2b499381 336 ops->next = *list;
16444a8a 337 /*
b848914c 338 * We are entering ops into the list but another
16444a8a
ACM
339 * CPU might be walking that list. We need to make sure
340 * the ops->next pointer is valid before another CPU sees
b848914c 341 * the ops pointer included into the list.
16444a8a 342 */
2b499381 343 rcu_assign_pointer(*list, ops);
16444a8a
ACM
344}
345
2b499381 346static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
16444a8a 347{
16444a8a 348 struct ftrace_ops **p;
16444a8a
ACM
349
350 /*
3d083395
SR
351 * If we are removing the last function, then simply point
352 * to the ftrace_stub.
16444a8a 353 */
2b499381
SR
354 if (*list == ops && ops->next == &ftrace_list_end) {
355 *list = &ftrace_list_end;
e6ea44e9 356 return 0;
16444a8a
ACM
357 }
358
2b499381 359 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
16444a8a
ACM
360 if (*p == ops)
361 break;
362
e6ea44e9
SR
363 if (*p != ops)
364 return -1;
16444a8a
ACM
365
366 *p = (*p)->next;
2b499381
SR
367 return 0;
368}
16444a8a 369
e248491a
JO
370static void add_ftrace_list_ops(struct ftrace_ops **list,
371 struct ftrace_ops *main_ops,
372 struct ftrace_ops *ops)
373{
374 int first = *list == &ftrace_list_end;
375 add_ftrace_ops(list, ops);
376 if (first)
377 add_ftrace_ops(&ftrace_ops_list, main_ops);
378}
379
380static int remove_ftrace_list_ops(struct ftrace_ops **list,
381 struct ftrace_ops *main_ops,
382 struct ftrace_ops *ops)
383{
384 int ret = remove_ftrace_ops(list, ops);
385 if (!ret && *list == &ftrace_list_end)
386 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
387 return ret;
388}
389
2b499381
SR
390static int __register_ftrace_function(struct ftrace_ops *ops)
391{
591dffda
SRRH
392 if (ops->flags & FTRACE_OPS_FL_DELETED)
393 return -EINVAL;
394
b848914c
SR
395 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
396 return -EBUSY;
397
06aeaaea 398#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
399 /*
400 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
401 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
402 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
403 */
404 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
405 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
406 return -EINVAL;
407
408 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
409 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
410#endif
411
cdbe61bf
SR
412 if (!core_kernel_data((unsigned long)ops))
413 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
414
4104d326 415 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
e248491a
JO
416 if (control_ops_alloc(ops))
417 return -ENOMEM;
418 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
b848914c
SR
419 } else
420 add_ftrace_ops(&ftrace_ops_list, ops);
421
2b499381
SR
422 if (ftrace_enabled)
423 update_ftrace_function();
424
425 return 0;
426}
427
428static int __unregister_ftrace_function(struct ftrace_ops *ops)
429{
430 int ret;
431
b848914c
SR
432 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
433 return -EBUSY;
434
4104d326 435 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
e248491a
JO
436 ret = remove_ftrace_list_ops(&ftrace_control_list,
437 &control_ops, ops);
b848914c
SR
438 } else
439 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
440
2b499381
SR
441 if (ret < 0)
442 return ret;
b848914c 443
491d0dcf
SR
444 if (ftrace_enabled)
445 update_ftrace_function();
16444a8a 446
e6ea44e9 447 return 0;
3d083395
SR
448}
449
df4fc315
SR
450static void ftrace_update_pid_func(void)
451{
491d0dcf 452 /* Only do something if we are tracing something */
df4fc315 453 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 454 return;
df4fc315 455
491d0dcf 456 update_ftrace_function();
df4fc315
SR
457}
458
493762fc
SR
459#ifdef CONFIG_FUNCTION_PROFILER
460struct ftrace_profile {
461 struct hlist_node node;
462 unsigned long ip;
463 unsigned long counter;
0706f1c4
SR
464#ifdef CONFIG_FUNCTION_GRAPH_TRACER
465 unsigned long long time;
e330b3bc 466 unsigned long long time_squared;
0706f1c4 467#endif
8fc0c701
SR
468};
469
493762fc
SR
470struct ftrace_profile_page {
471 struct ftrace_profile_page *next;
472 unsigned long index;
473 struct ftrace_profile records[];
d61f82d0
SR
474};
475
cafb168a
SR
476struct ftrace_profile_stat {
477 atomic_t disabled;
478 struct hlist_head *hash;
479 struct ftrace_profile_page *pages;
480 struct ftrace_profile_page *start;
481 struct tracer_stat stat;
482};
483
493762fc
SR
484#define PROFILE_RECORDS_SIZE \
485 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 486
493762fc
SR
487#define PROFILES_PER_PAGE \
488 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 489
fb9fb015
SR
490static int ftrace_profile_enabled __read_mostly;
491
492/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
493static DEFINE_MUTEX(ftrace_profile_lock);
494
cafb168a 495static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc 496
20079ebe
NK
497#define FTRACE_PROFILE_HASH_BITS 10
498#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
493762fc 499
bac429f0
SR
500static void *
501function_stat_next(void *v, int idx)
502{
493762fc
SR
503 struct ftrace_profile *rec = v;
504 struct ftrace_profile_page *pg;
bac429f0 505
493762fc 506 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
507
508 again:
0296e425
LZ
509 if (idx != 0)
510 rec++;
511
bac429f0
SR
512 if ((void *)rec >= (void *)&pg->records[pg->index]) {
513 pg = pg->next;
514 if (!pg)
515 return NULL;
516 rec = &pg->records[0];
493762fc
SR
517 if (!rec->counter)
518 goto again;
bac429f0
SR
519 }
520
bac429f0
SR
521 return rec;
522}
523
524static void *function_stat_start(struct tracer_stat *trace)
525{
cafb168a
SR
526 struct ftrace_profile_stat *stat =
527 container_of(trace, struct ftrace_profile_stat, stat);
528
529 if (!stat || !stat->start)
530 return NULL;
531
532 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
533}
534
0706f1c4
SR
535#ifdef CONFIG_FUNCTION_GRAPH_TRACER
536/* function graph compares on total time */
537static int function_stat_cmp(void *p1, void *p2)
538{
539 struct ftrace_profile *a = p1;
540 struct ftrace_profile *b = p2;
541
542 if (a->time < b->time)
543 return -1;
544 if (a->time > b->time)
545 return 1;
546 else
547 return 0;
548}
549#else
550/* not function graph compares against hits */
bac429f0
SR
551static int function_stat_cmp(void *p1, void *p2)
552{
493762fc
SR
553 struct ftrace_profile *a = p1;
554 struct ftrace_profile *b = p2;
bac429f0
SR
555
556 if (a->counter < b->counter)
557 return -1;
558 if (a->counter > b->counter)
559 return 1;
560 else
561 return 0;
562}
0706f1c4 563#endif
bac429f0
SR
564
565static int function_stat_headers(struct seq_file *m)
566{
0706f1c4 567#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b 568 seq_printf(m, " Function "
e330b3bc 569 "Hit Time Avg s^2\n"
34886c8b 570 " -------- "
e330b3bc 571 "--- ---- --- ---\n");
0706f1c4 572#else
bac429f0
SR
573 seq_printf(m, " Function Hit\n"
574 " -------- ---\n");
0706f1c4 575#endif
bac429f0
SR
576 return 0;
577}
578
579static int function_stat_show(struct seq_file *m, void *v)
580{
493762fc 581 struct ftrace_profile *rec = v;
bac429f0 582 char str[KSYM_SYMBOL_LEN];
3aaba20f 583 int ret = 0;
0706f1c4 584#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
585 static struct trace_seq s;
586 unsigned long long avg;
e330b3bc 587 unsigned long long stddev;
0706f1c4 588#endif
3aaba20f
LZ
589 mutex_lock(&ftrace_profile_lock);
590
591 /* we raced with function_profile_reset() */
592 if (unlikely(rec->counter == 0)) {
593 ret = -EBUSY;
594 goto out;
595 }
bac429f0
SR
596
597 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
598 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
599
600#ifdef CONFIG_FUNCTION_GRAPH_TRACER
601 seq_printf(m, " ");
34886c8b
SR
602 avg = rec->time;
603 do_div(avg, rec->counter);
604
e330b3bc
CD
605 /* Sample standard deviation (s^2) */
606 if (rec->counter <= 1)
607 stddev = 0;
608 else {
52d85d76
JL
609 /*
610 * Apply Welford's method:
611 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
612 */
613 stddev = rec->counter * rec->time_squared -
614 rec->time * rec->time;
615
e330b3bc
CD
616 /*
617 * Divide only 1000 for ns^2 -> us^2 conversion.
618 * trace_print_graph_duration will divide 1000 again.
619 */
52d85d76 620 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
e330b3bc
CD
621 }
622
34886c8b
SR
623 trace_seq_init(&s);
624 trace_print_graph_duration(rec->time, &s);
625 trace_seq_puts(&s, " ");
626 trace_print_graph_duration(avg, &s);
e330b3bc
CD
627 trace_seq_puts(&s, " ");
628 trace_print_graph_duration(stddev, &s);
0706f1c4 629 trace_print_seq(m, &s);
0706f1c4
SR
630#endif
631 seq_putc(m, '\n');
3aaba20f
LZ
632out:
633 mutex_unlock(&ftrace_profile_lock);
bac429f0 634
3aaba20f 635 return ret;
bac429f0
SR
636}
637
cafb168a 638static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 639{
493762fc 640 struct ftrace_profile_page *pg;
bac429f0 641
cafb168a 642 pg = stat->pages = stat->start;
bac429f0 643
493762fc
SR
644 while (pg) {
645 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
646 pg->index = 0;
647 pg = pg->next;
bac429f0
SR
648 }
649
cafb168a 650 memset(stat->hash, 0,
493762fc
SR
651 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
652}
bac429f0 653
cafb168a 654int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
655{
656 struct ftrace_profile_page *pg;
318e0a73
SR
657 int functions;
658 int pages;
493762fc 659 int i;
bac429f0 660
493762fc 661 /* If we already allocated, do nothing */
cafb168a 662 if (stat->pages)
493762fc 663 return 0;
bac429f0 664
cafb168a
SR
665 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
666 if (!stat->pages)
493762fc 667 return -ENOMEM;
bac429f0 668
318e0a73
SR
669#ifdef CONFIG_DYNAMIC_FTRACE
670 functions = ftrace_update_tot_cnt;
671#else
672 /*
673 * We do not know the number of functions that exist because
674 * dynamic tracing is what counts them. With past experience
675 * we have around 20K functions. That should be more than enough.
676 * It is highly unlikely we will execute every function in
677 * the kernel.
678 */
679 functions = 20000;
680#endif
681
cafb168a 682 pg = stat->start = stat->pages;
bac429f0 683
318e0a73
SR
684 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
685
39e30cd1 686 for (i = 1; i < pages; i++) {
493762fc 687 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 688 if (!pg->next)
318e0a73 689 goto out_free;
493762fc
SR
690 pg = pg->next;
691 }
692
693 return 0;
318e0a73
SR
694
695 out_free:
696 pg = stat->start;
697 while (pg) {
698 unsigned long tmp = (unsigned long)pg;
699
700 pg = pg->next;
701 free_page(tmp);
702 }
703
318e0a73
SR
704 stat->pages = NULL;
705 stat->start = NULL;
706
707 return -ENOMEM;
bac429f0
SR
708}
709
cafb168a 710static int ftrace_profile_init_cpu(int cpu)
bac429f0 711{
cafb168a 712 struct ftrace_profile_stat *stat;
493762fc 713 int size;
bac429f0 714
cafb168a
SR
715 stat = &per_cpu(ftrace_profile_stats, cpu);
716
717 if (stat->hash) {
493762fc 718 /* If the profile is already created, simply reset it */
cafb168a 719 ftrace_profile_reset(stat);
493762fc
SR
720 return 0;
721 }
bac429f0 722
493762fc
SR
723 /*
724 * We are profiling all functions, but usually only a few thousand
725 * functions are hit. We'll make a hash of 1024 items.
726 */
727 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 728
cafb168a 729 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 730
cafb168a 731 if (!stat->hash)
493762fc
SR
732 return -ENOMEM;
733
318e0a73 734 /* Preallocate the function profiling pages */
cafb168a
SR
735 if (ftrace_profile_pages_init(stat) < 0) {
736 kfree(stat->hash);
737 stat->hash = NULL;
493762fc
SR
738 return -ENOMEM;
739 }
740
741 return 0;
bac429f0
SR
742}
743
cafb168a
SR
744static int ftrace_profile_init(void)
745{
746 int cpu;
747 int ret = 0;
748
c4602c1c 749 for_each_possible_cpu(cpu) {
cafb168a
SR
750 ret = ftrace_profile_init_cpu(cpu);
751 if (ret)
752 break;
753 }
754
755 return ret;
756}
757
493762fc 758/* interrupts must be disabled */
cafb168a
SR
759static struct ftrace_profile *
760ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 761{
493762fc 762 struct ftrace_profile *rec;
bac429f0 763 struct hlist_head *hhd;
bac429f0
SR
764 unsigned long key;
765
20079ebe 766 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 767 hhd = &stat->hash[key];
bac429f0
SR
768
769 if (hlist_empty(hhd))
770 return NULL;
771
1bb539ca 772 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
bac429f0 773 if (rec->ip == ip)
493762fc
SR
774 return rec;
775 }
776
777 return NULL;
778}
779
cafb168a
SR
780static void ftrace_add_profile(struct ftrace_profile_stat *stat,
781 struct ftrace_profile *rec)
493762fc
SR
782{
783 unsigned long key;
784
20079ebe 785 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 786 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
787}
788
318e0a73
SR
789/*
790 * The memory is already allocated, this simply finds a new record to use.
791 */
493762fc 792static struct ftrace_profile *
318e0a73 793ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
794{
795 struct ftrace_profile *rec = NULL;
796
318e0a73 797 /* prevent recursion (from NMIs) */
cafb168a 798 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
799 goto out;
800
493762fc 801 /*
318e0a73
SR
802 * Try to find the function again since an NMI
803 * could have added it
493762fc 804 */
cafb168a 805 rec = ftrace_find_profiled_func(stat, ip);
493762fc 806 if (rec)
cafb168a 807 goto out;
493762fc 808
cafb168a
SR
809 if (stat->pages->index == PROFILES_PER_PAGE) {
810 if (!stat->pages->next)
811 goto out;
812 stat->pages = stat->pages->next;
bac429f0 813 }
493762fc 814
cafb168a 815 rec = &stat->pages->records[stat->pages->index++];
493762fc 816 rec->ip = ip;
cafb168a 817 ftrace_add_profile(stat, rec);
493762fc 818
bac429f0 819 out:
cafb168a 820 atomic_dec(&stat->disabled);
bac429f0
SR
821
822 return rec;
823}
824
825static void
2f5f6ad9 826function_profile_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 827 struct ftrace_ops *ops, struct pt_regs *regs)
bac429f0 828{
cafb168a 829 struct ftrace_profile_stat *stat;
493762fc 830 struct ftrace_profile *rec;
bac429f0
SR
831 unsigned long flags;
832
833 if (!ftrace_profile_enabled)
834 return;
835
836 local_irq_save(flags);
cafb168a 837
bdffd893 838 stat = this_cpu_ptr(&ftrace_profile_stats);
0f6ce3de 839 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
840 goto out;
841
842 rec = ftrace_find_profiled_func(stat, ip);
493762fc 843 if (!rec) {
318e0a73 844 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
845 if (!rec)
846 goto out;
847 }
bac429f0
SR
848
849 rec->counter++;
850 out:
851 local_irq_restore(flags);
852}
853
0706f1c4
SR
854#ifdef CONFIG_FUNCTION_GRAPH_TRACER
855static int profile_graph_entry(struct ftrace_graph_ent *trace)
856{
a1e2e31d 857 function_profile_call(trace->func, 0, NULL, NULL);
0706f1c4
SR
858 return 1;
859}
860
861static void profile_graph_return(struct ftrace_graph_ret *trace)
862{
cafb168a 863 struct ftrace_profile_stat *stat;
a2a16d6a 864 unsigned long long calltime;
0706f1c4 865 struct ftrace_profile *rec;
cafb168a 866 unsigned long flags;
0706f1c4
SR
867
868 local_irq_save(flags);
bdffd893 869 stat = this_cpu_ptr(&ftrace_profile_stats);
0f6ce3de 870 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
871 goto out;
872
37e44bc5
SR
873 /* If the calltime was zero'd ignore it */
874 if (!trace->calltime)
875 goto out;
876
a2a16d6a
SR
877 calltime = trace->rettime - trace->calltime;
878
879 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
880 int index;
881
882 index = trace->depth;
883
884 /* Append this call time to the parent time to subtract */
885 if (index)
886 current->ret_stack[index - 1].subtime += calltime;
887
888 if (current->ret_stack[index].subtime < calltime)
889 calltime -= current->ret_stack[index].subtime;
890 else
891 calltime = 0;
892 }
893
cafb168a 894 rec = ftrace_find_profiled_func(stat, trace->func);
e330b3bc 895 if (rec) {
a2a16d6a 896 rec->time += calltime;
e330b3bc
CD
897 rec->time_squared += calltime * calltime;
898 }
a2a16d6a 899
cafb168a 900 out:
0706f1c4
SR
901 local_irq_restore(flags);
902}
903
904static int register_ftrace_profiler(void)
905{
906 return register_ftrace_graph(&profile_graph_return,
907 &profile_graph_entry);
908}
909
910static void unregister_ftrace_profiler(void)
911{
912 unregister_ftrace_graph();
913}
914#else
bd38c0e6 915static struct ftrace_ops ftrace_profile_ops __read_mostly = {
fb9fb015 916 .func = function_profile_call,
f04f24fb 917 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
33b7f99c 918 INIT_OPS_HASH(ftrace_profile_ops)
bac429f0
SR
919};
920
0706f1c4
SR
921static int register_ftrace_profiler(void)
922{
923 return register_ftrace_function(&ftrace_profile_ops);
924}
925
926static void unregister_ftrace_profiler(void)
927{
928 unregister_ftrace_function(&ftrace_profile_ops);
929}
930#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
931
bac429f0
SR
932static ssize_t
933ftrace_profile_write(struct file *filp, const char __user *ubuf,
934 size_t cnt, loff_t *ppos)
935{
936 unsigned long val;
bac429f0
SR
937 int ret;
938
22fe9b54
PH
939 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
940 if (ret)
bac429f0
SR
941 return ret;
942
943 val = !!val;
944
945 mutex_lock(&ftrace_profile_lock);
946 if (ftrace_profile_enabled ^ val) {
947 if (val) {
493762fc
SR
948 ret = ftrace_profile_init();
949 if (ret < 0) {
950 cnt = ret;
951 goto out;
952 }
953
0706f1c4
SR
954 ret = register_ftrace_profiler();
955 if (ret < 0) {
956 cnt = ret;
957 goto out;
958 }
bac429f0
SR
959 ftrace_profile_enabled = 1;
960 } else {
961 ftrace_profile_enabled = 0;
0f6ce3de
SR
962 /*
963 * unregister_ftrace_profiler calls stop_machine
964 * so this acts like an synchronize_sched.
965 */
0706f1c4 966 unregister_ftrace_profiler();
bac429f0
SR
967 }
968 }
493762fc 969 out:
bac429f0
SR
970 mutex_unlock(&ftrace_profile_lock);
971
cf8517cf 972 *ppos += cnt;
bac429f0
SR
973
974 return cnt;
975}
976
493762fc
SR
977static ssize_t
978ftrace_profile_read(struct file *filp, char __user *ubuf,
979 size_t cnt, loff_t *ppos)
980{
fb9fb015 981 char buf[64]; /* big enough to hold a number */
493762fc
SR
982 int r;
983
984 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
986}
987
bac429f0
SR
988static const struct file_operations ftrace_profile_fops = {
989 .open = tracing_open_generic,
990 .read = ftrace_profile_read,
991 .write = ftrace_profile_write,
6038f373 992 .llseek = default_llseek,
bac429f0
SR
993};
994
cafb168a
SR
995/* used to initialize the real stat files */
996static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
997 .name = "functions",
998 .stat_start = function_stat_start,
999 .stat_next = function_stat_next,
1000 .stat_cmp = function_stat_cmp,
1001 .stat_headers = function_stat_headers,
1002 .stat_show = function_stat_show
cafb168a
SR
1003};
1004
6ab5d668 1005static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0 1006{
cafb168a 1007 struct ftrace_profile_stat *stat;
bac429f0 1008 struct dentry *entry;
cafb168a 1009 char *name;
bac429f0 1010 int ret;
cafb168a
SR
1011 int cpu;
1012
1013 for_each_possible_cpu(cpu) {
1014 stat = &per_cpu(ftrace_profile_stats, cpu);
1015
1016 /* allocate enough for function name + cpu number */
1017 name = kmalloc(32, GFP_KERNEL);
1018 if (!name) {
1019 /*
1020 * The files created are permanent, if something happens
1021 * we still do not free memory.
1022 */
cafb168a
SR
1023 WARN(1,
1024 "Could not allocate stat file for cpu %d\n",
1025 cpu);
1026 return;
1027 }
1028 stat->stat = function_stats;
1029 snprintf(name, 32, "function%d", cpu);
1030 stat->stat.name = name;
1031 ret = register_stat_tracer(&stat->stat);
1032 if (ret) {
1033 WARN(1,
1034 "Could not register function stat for cpu %d\n",
1035 cpu);
1036 kfree(name);
1037 return;
1038 }
bac429f0
SR
1039 }
1040
1041 entry = debugfs_create_file("function_profile_enabled", 0644,
1042 d_tracer, NULL, &ftrace_profile_fops);
1043 if (!entry)
1044 pr_warning("Could not create debugfs "
1045 "'function_profile_enabled' entry\n");
1046}
1047
bac429f0 1048#else /* CONFIG_FUNCTION_PROFILER */
6ab5d668 1049static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0
SR
1050{
1051}
bac429f0
SR
1052#endif /* CONFIG_FUNCTION_PROFILER */
1053
493762fc
SR
1054static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1055
1056#ifdef CONFIG_DYNAMIC_FTRACE
1057
79922b80
SRRH
1058static struct ftrace_ops *removed_ops;
1059
e1effa01
SRRH
1060/*
1061 * Set when doing a global update, like enabling all recs or disabling them.
1062 * It is not set when just updating a single ftrace_ops.
1063 */
1064static bool update_all_ops;
1065
493762fc
SR
1066#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1067# error Dynamic ftrace depends on MCOUNT_RECORD
1068#endif
1069
1070static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1071
1072struct ftrace_func_probe {
1073 struct hlist_node node;
1074 struct ftrace_probe_ops *ops;
1075 unsigned long flags;
1076 unsigned long ip;
1077 void *data;
7818b388 1078 struct list_head free_list;
493762fc
SR
1079};
1080
b448c4e3
SR
1081struct ftrace_func_entry {
1082 struct hlist_node hlist;
1083 unsigned long ip;
1084};
1085
1086struct ftrace_hash {
1087 unsigned long size_bits;
1088 struct hlist_head *buckets;
1089 unsigned long count;
07fd5515 1090 struct rcu_head rcu;
b448c4e3
SR
1091};
1092
33dc9b12
SR
1093/*
1094 * We make these constant because no one should touch them,
1095 * but they are used as the default "empty hash", to avoid allocating
1096 * it all the time. These are in a read only section such that if
1097 * anyone does try to modify it, it will cause an exception.
1098 */
1099static const struct hlist_head empty_buckets[1];
1100static const struct ftrace_hash empty_hash = {
1101 .buckets = (struct hlist_head *)empty_buckets,
1cf41dd7 1102};
33dc9b12 1103#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
493762fc 1104
2b499381 1105static struct ftrace_ops global_ops = {
33b7f99c
SRRH
1106 .func = ftrace_stub,
1107 .local_hash.notrace_hash = EMPTY_HASH,
1108 .local_hash.filter_hash = EMPTY_HASH,
1109 INIT_OPS_HASH(global_ops)
1110 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1111 FTRACE_OPS_FL_INITIALIZED,
f45948e8
SR
1112};
1113
493762fc
SR
1114struct ftrace_page {
1115 struct ftrace_page *next;
a7900875 1116 struct dyn_ftrace *records;
493762fc 1117 int index;
a7900875 1118 int size;
493762fc
SR
1119};
1120
a7900875
SR
1121#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1122#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
493762fc
SR
1123
1124/* estimate from running different kernels */
1125#define NR_TO_INIT 10000
1126
1127static struct ftrace_page *ftrace_pages_start;
1128static struct ftrace_page *ftrace_pages;
1129
68f40969 1130static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
06a51d93
SR
1131{
1132 return !hash || !hash->count;
1133}
1134
b448c4e3
SR
1135static struct ftrace_func_entry *
1136ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1137{
1138 unsigned long key;
1139 struct ftrace_func_entry *entry;
1140 struct hlist_head *hhd;
b448c4e3 1141
06a51d93 1142 if (ftrace_hash_empty(hash))
b448c4e3
SR
1143 return NULL;
1144
1145 if (hash->size_bits > 0)
1146 key = hash_long(ip, hash->size_bits);
1147 else
1148 key = 0;
1149
1150 hhd = &hash->buckets[key];
1151
1bb539ca 1152 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
b448c4e3
SR
1153 if (entry->ip == ip)
1154 return entry;
1155 }
1156 return NULL;
1157}
1158
33dc9b12
SR
1159static void __add_hash_entry(struct ftrace_hash *hash,
1160 struct ftrace_func_entry *entry)
b448c4e3 1161{
b448c4e3
SR
1162 struct hlist_head *hhd;
1163 unsigned long key;
1164
b448c4e3 1165 if (hash->size_bits)
33dc9b12 1166 key = hash_long(entry->ip, hash->size_bits);
b448c4e3
SR
1167 else
1168 key = 0;
1169
b448c4e3
SR
1170 hhd = &hash->buckets[key];
1171 hlist_add_head(&entry->hlist, hhd);
1172 hash->count++;
33dc9b12
SR
1173}
1174
1175static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1176{
1177 struct ftrace_func_entry *entry;
1178
1179 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1180 if (!entry)
1181 return -ENOMEM;
1182
1183 entry->ip = ip;
1184 __add_hash_entry(hash, entry);
b448c4e3
SR
1185
1186 return 0;
1187}
1188
1189static void
33dc9b12 1190free_hash_entry(struct ftrace_hash *hash,
b448c4e3
SR
1191 struct ftrace_func_entry *entry)
1192{
1193 hlist_del(&entry->hlist);
1194 kfree(entry);
1195 hash->count--;
1196}
1197
33dc9b12
SR
1198static void
1199remove_hash_entry(struct ftrace_hash *hash,
1200 struct ftrace_func_entry *entry)
1201{
1202 hlist_del(&entry->hlist);
1203 hash->count--;
1204}
1205
b448c4e3
SR
1206static void ftrace_hash_clear(struct ftrace_hash *hash)
1207{
1208 struct hlist_head *hhd;
b67bfe0d 1209 struct hlist_node *tn;
b448c4e3
SR
1210 struct ftrace_func_entry *entry;
1211 int size = 1 << hash->size_bits;
1212 int i;
1213
33dc9b12
SR
1214 if (!hash->count)
1215 return;
1216
b448c4e3
SR
1217 for (i = 0; i < size; i++) {
1218 hhd = &hash->buckets[i];
b67bfe0d 1219 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
33dc9b12 1220 free_hash_entry(hash, entry);
b448c4e3
SR
1221 }
1222 FTRACE_WARN_ON(hash->count);
1223}
1224
33dc9b12
SR
1225static void free_ftrace_hash(struct ftrace_hash *hash)
1226{
1227 if (!hash || hash == EMPTY_HASH)
1228 return;
1229 ftrace_hash_clear(hash);
1230 kfree(hash->buckets);
1231 kfree(hash);
1232}
1233
07fd5515
SR
1234static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1235{
1236 struct ftrace_hash *hash;
1237
1238 hash = container_of(rcu, struct ftrace_hash, rcu);
1239 free_ftrace_hash(hash);
1240}
1241
1242static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1243{
1244 if (!hash || hash == EMPTY_HASH)
1245 return;
1246 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1247}
1248
5500fa51
JO
1249void ftrace_free_filter(struct ftrace_ops *ops)
1250{
f04f24fb 1251 ftrace_ops_init(ops);
33b7f99c
SRRH
1252 free_ftrace_hash(ops->func_hash->filter_hash);
1253 free_ftrace_hash(ops->func_hash->notrace_hash);
5500fa51
JO
1254}
1255
33dc9b12
SR
1256static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1257{
1258 struct ftrace_hash *hash;
1259 int size;
1260
1261 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1262 if (!hash)
1263 return NULL;
1264
1265 size = 1 << size_bits;
47b0edcb 1266 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
33dc9b12
SR
1267
1268 if (!hash->buckets) {
1269 kfree(hash);
1270 return NULL;
1271 }
1272
1273 hash->size_bits = size_bits;
1274
1275 return hash;
1276}
1277
1278static struct ftrace_hash *
1279alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1280{
1281 struct ftrace_func_entry *entry;
1282 struct ftrace_hash *new_hash;
33dc9b12
SR
1283 int size;
1284 int ret;
1285 int i;
1286
1287 new_hash = alloc_ftrace_hash(size_bits);
1288 if (!new_hash)
1289 return NULL;
1290
1291 /* Empty hash? */
06a51d93 1292 if (ftrace_hash_empty(hash))
33dc9b12
SR
1293 return new_hash;
1294
1295 size = 1 << hash->size_bits;
1296 for (i = 0; i < size; i++) {
b67bfe0d 1297 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
33dc9b12
SR
1298 ret = add_hash_entry(new_hash, entry->ip);
1299 if (ret < 0)
1300 goto free_hash;
1301 }
1302 }
1303
1304 FTRACE_WARN_ON(new_hash->count != hash->count);
1305
1306 return new_hash;
1307
1308 free_hash:
1309 free_ftrace_hash(new_hash);
1310 return NULL;
1311}
1312
41fb61c2 1313static void
84261912 1314ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
41fb61c2 1315static void
84261912 1316ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
41fb61c2 1317
33dc9b12 1318static int
41fb61c2
SR
1319ftrace_hash_move(struct ftrace_ops *ops, int enable,
1320 struct ftrace_hash **dst, struct ftrace_hash *src)
33dc9b12
SR
1321{
1322 struct ftrace_func_entry *entry;
b67bfe0d 1323 struct hlist_node *tn;
33dc9b12 1324 struct hlist_head *hhd;
07fd5515 1325 struct ftrace_hash *new_hash;
33dc9b12
SR
1326 int size = src->count;
1327 int bits = 0;
1328 int i;
1329
1330 /*
1331 * If the new source is empty, just free dst and assign it
1332 * the empty_hash.
1333 */
1334 if (!src->count) {
5c27c775
MH
1335 new_hash = EMPTY_HASH;
1336 goto update;
33dc9b12
SR
1337 }
1338
33dc9b12
SR
1339 /*
1340 * Make the hash size about 1/2 the # found
1341 */
1342 for (size /= 2; size; size >>= 1)
1343 bits++;
1344
1345 /* Don't allocate too much */
1346 if (bits > FTRACE_HASH_MAX_BITS)
1347 bits = FTRACE_HASH_MAX_BITS;
1348
07fd5515
SR
1349 new_hash = alloc_ftrace_hash(bits);
1350 if (!new_hash)
5c27c775 1351 return -ENOMEM;
33dc9b12
SR
1352
1353 size = 1 << src->size_bits;
1354 for (i = 0; i < size; i++) {
1355 hhd = &src->buckets[i];
b67bfe0d 1356 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
33dc9b12 1357 remove_hash_entry(src, entry);
07fd5515 1358 __add_hash_entry(new_hash, entry);
33dc9b12
SR
1359 }
1360 }
1361
5c27c775
MH
1362update:
1363 /*
1364 * Remove the current set, update the hash and add
1365 * them back.
1366 */
84261912 1367 ftrace_hash_rec_disable_modify(ops, enable);
5c27c775 1368
07fd5515 1369 rcu_assign_pointer(*dst, new_hash);
07fd5515 1370
84261912 1371 ftrace_hash_rec_enable_modify(ops, enable);
41fb61c2 1372
5c27c775 1373 return 0;
33dc9b12
SR
1374}
1375
fef5aeee
SRRH
1376static bool hash_contains_ip(unsigned long ip,
1377 struct ftrace_ops_hash *hash)
1378{
1379 /*
1380 * The function record is a match if it exists in the filter
1381 * hash and not in the notrace hash. Note, an emty hash is
1382 * considered a match for the filter hash, but an empty
1383 * notrace hash is considered not in the notrace hash.
1384 */
1385 return (ftrace_hash_empty(hash->filter_hash) ||
1386 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1387 (ftrace_hash_empty(hash->notrace_hash) ||
1388 !ftrace_lookup_ip(hash->notrace_hash, ip));
1389}
1390
b848914c
SR
1391/*
1392 * Test the hashes for this ops to see if we want to call
1393 * the ops->func or not.
1394 *
1395 * It's a match if the ip is in the ops->filter_hash or
1396 * the filter_hash does not exist or is empty,
1397 * AND
1398 * the ip is not in the ops->notrace_hash.
cdbe61bf
SR
1399 *
1400 * This needs to be called with preemption disabled as
1401 * the hashes are freed with call_rcu_sched().
b848914c
SR
1402 */
1403static int
195a8afc 1404ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c 1405{
fef5aeee 1406 struct ftrace_ops_hash hash;
b848914c
SR
1407 int ret;
1408
195a8afc
SRRH
1409#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1410 /*
1411 * There's a small race when adding ops that the ftrace handler
1412 * that wants regs, may be called without them. We can not
1413 * allow that handler to be called if regs is NULL.
1414 */
1415 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1416 return 0;
1417#endif
1418
fef5aeee
SRRH
1419 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1420 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
b848914c 1421
fef5aeee 1422 if (hash_contains_ip(ip, &hash))
b848914c
SR
1423 ret = 1;
1424 else
1425 ret = 0;
b848914c
SR
1426
1427 return ret;
1428}
1429
493762fc
SR
1430/*
1431 * This is a double for. Do not use 'break' to break out of the loop,
1432 * you must use a goto.
1433 */
1434#define do_for_each_ftrace_rec(pg, rec) \
1435 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1436 int _____i; \
1437 for (_____i = 0; _____i < pg->index; _____i++) { \
1438 rec = &pg->records[_____i];
1439
1440#define while_for_each_ftrace_rec() \
1441 } \
1442 }
1443
5855fead
SR
1444
1445static int ftrace_cmp_recs(const void *a, const void *b)
1446{
a650e02a
SR
1447 const struct dyn_ftrace *key = a;
1448 const struct dyn_ftrace *rec = b;
5855fead 1449
a650e02a 1450 if (key->flags < rec->ip)
5855fead 1451 return -1;
a650e02a
SR
1452 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1453 return 1;
5855fead
SR
1454 return 0;
1455}
1456
f0cf973a 1457static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
c88fd863
SR
1458{
1459 struct ftrace_page *pg;
1460 struct dyn_ftrace *rec;
5855fead 1461 struct dyn_ftrace key;
c88fd863 1462
a650e02a
SR
1463 key.ip = start;
1464 key.flags = end; /* overload flags, as it is unsigned long */
5855fead
SR
1465
1466 for (pg = ftrace_pages_start; pg; pg = pg->next) {
a650e02a
SR
1467 if (end < pg->records[0].ip ||
1468 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
9644302e 1469 continue;
5855fead
SR
1470 rec = bsearch(&key, pg->records, pg->index,
1471 sizeof(struct dyn_ftrace),
1472 ftrace_cmp_recs);
1473 if (rec)
f0cf973a 1474 return rec->ip;
5855fead 1475 }
c88fd863
SR
1476
1477 return 0;
1478}
1479
a650e02a
SR
1480/**
1481 * ftrace_location - return true if the ip giving is a traced location
1482 * @ip: the instruction pointer to check
1483 *
f0cf973a 1484 * Returns rec->ip if @ip given is a pointer to a ftrace location.
a650e02a
SR
1485 * That is, the instruction that is either a NOP or call to
1486 * the function tracer. It checks the ftrace internal tables to
1487 * determine if the address belongs or not.
1488 */
f0cf973a 1489unsigned long ftrace_location(unsigned long ip)
a650e02a
SR
1490{
1491 return ftrace_location_range(ip, ip);
1492}
1493
1494/**
1495 * ftrace_text_reserved - return true if range contains an ftrace location
1496 * @start: start of range to search
1497 * @end: end of range to search (inclusive). @end points to the last byte to check.
1498 *
1499 * Returns 1 if @start and @end contains a ftrace location.
1500 * That is, the instruction that is either a NOP or call to
1501 * the function tracer. It checks the ftrace internal tables to
1502 * determine if the address belongs or not.
1503 */
d88471cb 1504int ftrace_text_reserved(const void *start, const void *end)
a650e02a 1505{
f0cf973a
SR
1506 unsigned long ret;
1507
1508 ret = ftrace_location_range((unsigned long)start,
1509 (unsigned long)end);
1510
1511 return (int)!!ret;
a650e02a
SR
1512}
1513
4fbb48cb
SRRH
1514/* Test if ops registered to this rec needs regs */
1515static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1516{
1517 struct ftrace_ops *ops;
1518 bool keep_regs = false;
1519
1520 for (ops = ftrace_ops_list;
1521 ops != &ftrace_list_end; ops = ops->next) {
1522 /* pass rec in as regs to have non-NULL val */
1523 if (ftrace_ops_test(ops, rec->ip, rec)) {
1524 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1525 keep_regs = true;
1526 break;
1527 }
1528 }
1529 }
1530
1531 return keep_regs;
1532}
1533
ed926f9b
SR
1534static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1535 int filter_hash,
1536 bool inc)
1537{
1538 struct ftrace_hash *hash;
1539 struct ftrace_hash *other_hash;
1540 struct ftrace_page *pg;
1541 struct dyn_ftrace *rec;
1542 int count = 0;
1543 int all = 0;
1544
1545 /* Only update if the ops has been registered */
1546 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1547 return;
1548
1549 /*
1550 * In the filter_hash case:
1551 * If the count is zero, we update all records.
1552 * Otherwise we just update the items in the hash.
1553 *
1554 * In the notrace_hash case:
1555 * We enable the update in the hash.
1556 * As disabling notrace means enabling the tracing,
1557 * and enabling notrace means disabling, the inc variable
1558 * gets inversed.
1559 */
1560 if (filter_hash) {
33b7f99c
SRRH
1561 hash = ops->func_hash->filter_hash;
1562 other_hash = ops->func_hash->notrace_hash;
06a51d93 1563 if (ftrace_hash_empty(hash))
ed926f9b
SR
1564 all = 1;
1565 } else {
1566 inc = !inc;
33b7f99c
SRRH
1567 hash = ops->func_hash->notrace_hash;
1568 other_hash = ops->func_hash->filter_hash;
ed926f9b
SR
1569 /*
1570 * If the notrace hash has no items,
1571 * then there's nothing to do.
1572 */
06a51d93 1573 if (ftrace_hash_empty(hash))
ed926f9b
SR
1574 return;
1575 }
1576
1577 do_for_each_ftrace_rec(pg, rec) {
1578 int in_other_hash = 0;
1579 int in_hash = 0;
1580 int match = 0;
1581
1582 if (all) {
1583 /*
1584 * Only the filter_hash affects all records.
1585 * Update if the record is not in the notrace hash.
1586 */
b848914c 1587 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
ed926f9b
SR
1588 match = 1;
1589 } else {
06a51d93
SR
1590 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1591 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
ed926f9b
SR
1592
1593 /*
19eab4a4
SRRH
1594 * If filter_hash is set, we want to match all functions
1595 * that are in the hash but not in the other hash.
ed926f9b 1596 *
19eab4a4
SRRH
1597 * If filter_hash is not set, then we are decrementing.
1598 * That means we match anything that is in the hash
1599 * and also in the other_hash. That is, we need to turn
1600 * off functions in the other hash because they are disabled
1601 * by this hash.
ed926f9b
SR
1602 */
1603 if (filter_hash && in_hash && !in_other_hash)
1604 match = 1;
1605 else if (!filter_hash && in_hash &&
06a51d93 1606 (in_other_hash || ftrace_hash_empty(other_hash)))
ed926f9b
SR
1607 match = 1;
1608 }
1609 if (!match)
1610 continue;
1611
1612 if (inc) {
1613 rec->flags++;
0376bde1 1614 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
ed926f9b 1615 return;
79922b80
SRRH
1616
1617 /*
1618 * If there's only a single callback registered to a
1619 * function, and the ops has a trampoline registered
1620 * for it, then we can call it directly.
1621 */
fef5aeee 1622 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
79922b80 1623 rec->flags |= FTRACE_FL_TRAMP;
fef5aeee 1624 else
79922b80
SRRH
1625 /*
1626 * If we are adding another function callback
1627 * to this function, and the previous had a
bce0b6c5
SRRH
1628 * custom trampoline in use, then we need to go
1629 * back to the default trampoline.
79922b80 1630 */
fef5aeee 1631 rec->flags &= ~FTRACE_FL_TRAMP;
79922b80 1632
08f6fba5
SR
1633 /*
1634 * If any ops wants regs saved for this function
1635 * then all ops will get saved regs.
1636 */
1637 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1638 rec->flags |= FTRACE_FL_REGS;
ed926f9b 1639 } else {
0376bde1 1640 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
ed926f9b
SR
1641 return;
1642 rec->flags--;
79922b80 1643
4fbb48cb
SRRH
1644 /*
1645 * If the rec had REGS enabled and the ops that is
1646 * being removed had REGS set, then see if there is
1647 * still any ops for this record that wants regs.
1648 * If not, we can stop recording them.
1649 */
0376bde1 1650 if (ftrace_rec_count(rec) > 0 &&
4fbb48cb
SRRH
1651 rec->flags & FTRACE_FL_REGS &&
1652 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1653 if (!test_rec_ops_needs_regs(rec))
1654 rec->flags &= ~FTRACE_FL_REGS;
1655 }
79922b80 1656
fef5aeee
SRRH
1657 /*
1658 * If the rec had TRAMP enabled, then it needs to
1659 * be cleared. As TRAMP can only be enabled iff
1660 * there is only a single ops attached to it.
1661 * In otherwords, always disable it on decrementing.
1662 * In the future, we may set it if rec count is
1663 * decremented to one, and the ops that is left
1664 * has a trampoline.
1665 */
1666 rec->flags &= ~FTRACE_FL_TRAMP;
1667
79922b80
SRRH
1668 /*
1669 * flags will be cleared in ftrace_check_record()
1670 * if rec count is zero.
1671 */
ed926f9b
SR
1672 }
1673 count++;
1674 /* Shortcut, if we handled all records, we are done. */
1675 if (!all && count == hash->count)
1676 return;
1677 } while_for_each_ftrace_rec();
1678}
1679
1680static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1681 int filter_hash)
1682{
1683 __ftrace_hash_rec_update(ops, filter_hash, 0);
1684}
1685
1686static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1687 int filter_hash)
1688{
1689 __ftrace_hash_rec_update(ops, filter_hash, 1);
1690}
1691
84261912
SRRH
1692static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1693 int filter_hash, int inc)
1694{
1695 struct ftrace_ops *op;
1696
1697 __ftrace_hash_rec_update(ops, filter_hash, inc);
1698
1699 if (ops->func_hash != &global_ops.local_hash)
1700 return;
1701
1702 /*
1703 * If the ops shares the global_ops hash, then we need to update
1704 * all ops that are enabled and use this hash.
1705 */
1706 do_for_each_ftrace_op(op, ftrace_ops_list) {
1707 /* Already done */
1708 if (op == ops)
1709 continue;
1710 if (op->func_hash == &global_ops.local_hash)
1711 __ftrace_hash_rec_update(op, filter_hash, inc);
1712 } while_for_each_ftrace_op(op);
1713}
1714
1715static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1716 int filter_hash)
1717{
1718 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1719}
1720
1721static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1722 int filter_hash)
1723{
1724 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1725}
1726
b17e8a37
SR
1727static void print_ip_ins(const char *fmt, unsigned char *p)
1728{
1729 int i;
1730
1731 printk(KERN_CONT "%s", fmt);
1732
1733 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1734 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1735}
1736
c88fd863
SR
1737/**
1738 * ftrace_bug - report and shutdown function tracer
1739 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1740 * @ip: The address that failed
1741 *
1742 * The arch code that enables or disables the function tracing
1743 * can call ftrace_bug() when it has detected a problem in
1744 * modifying the code. @failed should be one of either:
1745 * EFAULT - if the problem happens on reading the @ip address
1746 * EINVAL - if what is read at @ip is not what was expected
1747 * EPERM - if the problem happens on writting to the @ip address
1748 */
1749void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
1750{
1751 switch (failed) {
1752 case -EFAULT:
1753 FTRACE_WARN_ON_ONCE(1);
1754 pr_info("ftrace faulted on modifying ");
1755 print_ip_sym(ip);
1756 break;
1757 case -EINVAL:
1758 FTRACE_WARN_ON_ONCE(1);
1759 pr_info("ftrace failed to modify ");
1760 print_ip_sym(ip);
b17e8a37 1761 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
1762 printk(KERN_CONT "\n");
1763 break;
1764 case -EPERM:
1765 FTRACE_WARN_ON_ONCE(1);
1766 pr_info("ftrace faulted on writing ");
1767 print_ip_sym(ip);
1768 break;
1769 default:
1770 FTRACE_WARN_ON_ONCE(1);
1771 pr_info("ftrace faulted on unknown error ");
1772 print_ip_sym(ip);
1773 }
1774}
1775
c88fd863 1776static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
5072c59f 1777{
64fbcd16 1778 unsigned long flag = 0UL;
e7d3737e 1779
982c350b 1780 /*
30fb6aa7 1781 * If we are updating calls:
982c350b 1782 *
ed926f9b
SR
1783 * If the record has a ref count, then we need to enable it
1784 * because someone is using it.
982c350b 1785 *
ed926f9b
SR
1786 * Otherwise we make sure its disabled.
1787 *
30fb6aa7 1788 * If we are disabling calls, then disable all records that
ed926f9b 1789 * are enabled.
982c350b 1790 */
0376bde1 1791 if (enable && ftrace_rec_count(rec))
ed926f9b 1792 flag = FTRACE_FL_ENABLED;
982c350b 1793
08f6fba5 1794 /*
79922b80
SRRH
1795 * If enabling and the REGS flag does not match the REGS_EN, or
1796 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1797 * this record. Set flags to fail the compare against ENABLED.
08f6fba5 1798 */
79922b80
SRRH
1799 if (flag) {
1800 if (!(rec->flags & FTRACE_FL_REGS) !=
1801 !(rec->flags & FTRACE_FL_REGS_EN))
1802 flag |= FTRACE_FL_REGS;
1803
1804 if (!(rec->flags & FTRACE_FL_TRAMP) !=
1805 !(rec->flags & FTRACE_FL_TRAMP_EN))
1806 flag |= FTRACE_FL_TRAMP;
1807 }
08f6fba5 1808
64fbcd16
XG
1809 /* If the state of this record hasn't changed, then do nothing */
1810 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
c88fd863 1811 return FTRACE_UPDATE_IGNORE;
982c350b 1812
64fbcd16 1813 if (flag) {
08f6fba5
SR
1814 /* Save off if rec is being enabled (for return value) */
1815 flag ^= rec->flags & FTRACE_FL_ENABLED;
1816
1817 if (update) {
c88fd863 1818 rec->flags |= FTRACE_FL_ENABLED;
08f6fba5
SR
1819 if (flag & FTRACE_FL_REGS) {
1820 if (rec->flags & FTRACE_FL_REGS)
1821 rec->flags |= FTRACE_FL_REGS_EN;
1822 else
1823 rec->flags &= ~FTRACE_FL_REGS_EN;
1824 }
79922b80
SRRH
1825 if (flag & FTRACE_FL_TRAMP) {
1826 if (rec->flags & FTRACE_FL_TRAMP)
1827 rec->flags |= FTRACE_FL_TRAMP_EN;
1828 else
1829 rec->flags &= ~FTRACE_FL_TRAMP_EN;
1830 }
08f6fba5
SR
1831 }
1832
1833 /*
1834 * If this record is being updated from a nop, then
1835 * return UPDATE_MAKE_CALL.
08f6fba5
SR
1836 * Otherwise,
1837 * return UPDATE_MODIFY_CALL to tell the caller to convert
f1b2f2bd 1838 * from the save regs, to a non-save regs function or
79922b80 1839 * vice versa, or from a trampoline call.
08f6fba5
SR
1840 */
1841 if (flag & FTRACE_FL_ENABLED)
1842 return FTRACE_UPDATE_MAKE_CALL;
f1b2f2bd
SRRH
1843
1844 return FTRACE_UPDATE_MODIFY_CALL;
c88fd863
SR
1845 }
1846
08f6fba5
SR
1847 if (update) {
1848 /* If there's no more users, clear all flags */
0376bde1 1849 if (!ftrace_rec_count(rec))
08f6fba5
SR
1850 rec->flags = 0;
1851 else
1852 /* Just disable the record (keep REGS state) */
1853 rec->flags &= ~FTRACE_FL_ENABLED;
1854 }
c88fd863
SR
1855
1856 return FTRACE_UPDATE_MAKE_NOP;
1857}
1858
1859/**
1860 * ftrace_update_record, set a record that now is tracing or not
1861 * @rec: the record to update
1862 * @enable: set to 1 if the record is tracing, zero to force disable
1863 *
1864 * The records that represent all functions that can be traced need
1865 * to be updated when tracing has been enabled.
1866 */
1867int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1868{
1869 return ftrace_check_record(rec, enable, 1);
1870}
1871
1872/**
1873 * ftrace_test_record, check if the record has been enabled or not
1874 * @rec: the record to test
1875 * @enable: set to 1 to check if enabled, 0 if it is disabled
1876 *
1877 * The arch code may need to test if a record is already set to
1878 * tracing to determine how to modify the function code that it
1879 * represents.
1880 */
1881int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1882{
1883 return ftrace_check_record(rec, enable, 0);
1884}
1885
5fecaa04
SRRH
1886static struct ftrace_ops *
1887ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
1888{
1889 struct ftrace_ops *op;
fef5aeee 1890 unsigned long ip = rec->ip;
5fecaa04
SRRH
1891
1892 do_for_each_ftrace_op(op, ftrace_ops_list) {
1893
1894 if (!op->trampoline)
1895 continue;
1896
fef5aeee 1897 if (hash_contains_ip(ip, op->func_hash))
5fecaa04
SRRH
1898 return op;
1899 } while_for_each_ftrace_op(op);
1900
1901 return NULL;
1902}
1903
79922b80
SRRH
1904static struct ftrace_ops *
1905ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1906{
1907 struct ftrace_ops *op;
fef5aeee 1908 unsigned long ip = rec->ip;
79922b80 1909
fef5aeee
SRRH
1910 /*
1911 * Need to check removed ops first.
1912 * If they are being removed, and this rec has a tramp,
1913 * and this rec is in the ops list, then it would be the
1914 * one with the tramp.
1915 */
1916 if (removed_ops) {
1917 if (hash_contains_ip(ip, &removed_ops->old_hash))
79922b80
SRRH
1918 return removed_ops;
1919 }
1920
fef5aeee
SRRH
1921 /*
1922 * Need to find the current trampoline for a rec.
1923 * Now, a trampoline is only attached to a rec if there
1924 * was a single 'ops' attached to it. But this can be called
1925 * when we are adding another op to the rec or removing the
1926 * current one. Thus, if the op is being added, we can
1927 * ignore it because it hasn't attached itself to the rec
1928 * yet. That means we just need to find the op that has a
1929 * trampoline and is not beeing added.
1930 */
79922b80 1931 do_for_each_ftrace_op(op, ftrace_ops_list) {
fef5aeee
SRRH
1932
1933 if (!op->trampoline)
1934 continue;
1935
1936 /*
1937 * If the ops is being added, it hasn't gotten to
1938 * the point to be removed from this tree yet.
1939 */
1940 if (op->flags & FTRACE_OPS_FL_ADDING)
79922b80
SRRH
1941 continue;
1942
fef5aeee
SRRH
1943 /*
1944 * If the ops is not being added and has a trampoline,
1945 * then it must be the one that we want!
1946 */
1947 if (hash_contains_ip(ip, op->func_hash))
1948 return op;
1949
1950 /* If the ops is being modified, it may be in the old hash. */
1951 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1952 hash_contains_ip(ip, &op->old_hash))
79922b80
SRRH
1953 return op;
1954
1955 } while_for_each_ftrace_op(op);
1956
1957 return NULL;
1958}
1959
1960static struct ftrace_ops *
1961ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1962{
1963 struct ftrace_ops *op;
fef5aeee 1964 unsigned long ip = rec->ip;
79922b80
SRRH
1965
1966 do_for_each_ftrace_op(op, ftrace_ops_list) {
1967 /* pass rec in as regs to have non-NULL val */
fef5aeee 1968 if (hash_contains_ip(ip, op->func_hash))
79922b80
SRRH
1969 return op;
1970 } while_for_each_ftrace_op(op);
1971
1972 return NULL;
1973}
1974
7413af1f
SRRH
1975/**
1976 * ftrace_get_addr_new - Get the call address to set to
1977 * @rec: The ftrace record descriptor
1978 *
1979 * If the record has the FTRACE_FL_REGS set, that means that it
1980 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1981 * is not not set, then it wants to convert to the normal callback.
1982 *
1983 * Returns the address of the trampoline to set to
1984 */
1985unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
1986{
79922b80
SRRH
1987 struct ftrace_ops *ops;
1988
1989 /* Trampolines take precedence over regs */
1990 if (rec->flags & FTRACE_FL_TRAMP) {
1991 ops = ftrace_find_tramp_ops_new(rec);
1992 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
bce0b6c5
SRRH
1993 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
1994 (void *)rec->ip, (void *)rec->ip, rec->flags);
79922b80
SRRH
1995 /* Ftrace is shutting down, return anything */
1996 return (unsigned long)FTRACE_ADDR;
1997 }
1998 return ops->trampoline;
1999 }
2000
7413af1f
SRRH
2001 if (rec->flags & FTRACE_FL_REGS)
2002 return (unsigned long)FTRACE_REGS_ADDR;
2003 else
2004 return (unsigned long)FTRACE_ADDR;
2005}
2006
2007/**
2008 * ftrace_get_addr_curr - Get the call address that is already there
2009 * @rec: The ftrace record descriptor
2010 *
2011 * The FTRACE_FL_REGS_EN is set when the record already points to
2012 * a function that saves all the regs. Basically the '_EN' version
2013 * represents the current state of the function.
2014 *
2015 * Returns the address of the trampoline that is currently being called
2016 */
2017unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2018{
79922b80
SRRH
2019 struct ftrace_ops *ops;
2020
2021 /* Trampolines take precedence over regs */
2022 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2023 ops = ftrace_find_tramp_ops_curr(rec);
2024 if (FTRACE_WARN_ON(!ops)) {
2025 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2026 (void *)rec->ip, (void *)rec->ip);
2027 /* Ftrace is shutting down, return anything */
2028 return (unsigned long)FTRACE_ADDR;
2029 }
2030 return ops->trampoline;
2031 }
2032
7413af1f
SRRH
2033 if (rec->flags & FTRACE_FL_REGS_EN)
2034 return (unsigned long)FTRACE_REGS_ADDR;
2035 else
2036 return (unsigned long)FTRACE_ADDR;
2037}
2038
c88fd863
SR
2039static int
2040__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2041{
08f6fba5 2042 unsigned long ftrace_old_addr;
c88fd863
SR
2043 unsigned long ftrace_addr;
2044 int ret;
2045
7c0868e0 2046 ftrace_addr = ftrace_get_addr_new(rec);
c88fd863 2047
7c0868e0
SRRH
2048 /* This needs to be done before we call ftrace_update_record */
2049 ftrace_old_addr = ftrace_get_addr_curr(rec);
2050
2051 ret = ftrace_update_record(rec, enable);
08f6fba5 2052
c88fd863
SR
2053 switch (ret) {
2054 case FTRACE_UPDATE_IGNORE:
2055 return 0;
2056
2057 case FTRACE_UPDATE_MAKE_CALL:
64fbcd16 2058 return ftrace_make_call(rec, ftrace_addr);
c88fd863
SR
2059
2060 case FTRACE_UPDATE_MAKE_NOP:
39b5552c 2061 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
08f6fba5 2062
08f6fba5 2063 case FTRACE_UPDATE_MODIFY_CALL:
08f6fba5 2064 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
5072c59f
SR
2065 }
2066
c88fd863 2067 return -1; /* unknow ftrace bug */
5072c59f
SR
2068}
2069
e4f5d544 2070void __weak ftrace_replace_code(int enable)
3c1720f0 2071{
3c1720f0
SR
2072 struct dyn_ftrace *rec;
2073 struct ftrace_page *pg;
6a24a244 2074 int failed;
3c1720f0 2075
45a4a237
SR
2076 if (unlikely(ftrace_disabled))
2077 return;
2078
265c831c 2079 do_for_each_ftrace_rec(pg, rec) {
e4f5d544 2080 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 2081 if (failed) {
3279ba37
SR
2082 ftrace_bug(failed, rec->ip);
2083 /* Stop processing */
2084 return;
3c1720f0 2085 }
265c831c 2086 } while_for_each_ftrace_rec();
3c1720f0
SR
2087}
2088
c88fd863
SR
2089struct ftrace_rec_iter {
2090 struct ftrace_page *pg;
2091 int index;
2092};
2093
2094/**
2095 * ftrace_rec_iter_start, start up iterating over traced functions
2096 *
2097 * Returns an iterator handle that is used to iterate over all
2098 * the records that represent address locations where functions
2099 * are traced.
2100 *
2101 * May return NULL if no records are available.
2102 */
2103struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2104{
2105 /*
2106 * We only use a single iterator.
2107 * Protected by the ftrace_lock mutex.
2108 */
2109 static struct ftrace_rec_iter ftrace_rec_iter;
2110 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2111
2112 iter->pg = ftrace_pages_start;
2113 iter->index = 0;
2114
2115 /* Could have empty pages */
2116 while (iter->pg && !iter->pg->index)
2117 iter->pg = iter->pg->next;
2118
2119 if (!iter->pg)
2120 return NULL;
2121
2122 return iter;
2123}
2124
2125/**
2126 * ftrace_rec_iter_next, get the next record to process.
2127 * @iter: The handle to the iterator.
2128 *
2129 * Returns the next iterator after the given iterator @iter.
2130 */
2131struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2132{
2133 iter->index++;
2134
2135 if (iter->index >= iter->pg->index) {
2136 iter->pg = iter->pg->next;
2137 iter->index = 0;
2138
2139 /* Could have empty pages */
2140 while (iter->pg && !iter->pg->index)
2141 iter->pg = iter->pg->next;
2142 }
2143
2144 if (!iter->pg)
2145 return NULL;
2146
2147 return iter;
2148}
2149
2150/**
2151 * ftrace_rec_iter_record, get the record at the iterator location
2152 * @iter: The current iterator location
2153 *
2154 * Returns the record that the current @iter is at.
2155 */
2156struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2157{
2158 return &iter->pg->records[iter->index];
2159}
2160
492a7ea5 2161static int
31e88909 2162ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
2163{
2164 unsigned long ip;
593eb8a2 2165 int ret;
3c1720f0
SR
2166
2167 ip = rec->ip;
2168
45a4a237
SR
2169 if (unlikely(ftrace_disabled))
2170 return 0;
2171
25aac9dc 2172 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 2173 if (ret) {
31e88909 2174 ftrace_bug(ret, ip);
492a7ea5 2175 return 0;
37ad5084 2176 }
492a7ea5 2177 return 1;
3c1720f0
SR
2178}
2179
000ab691
SR
2180/*
2181 * archs can override this function if they must do something
2182 * before the modifying code is performed.
2183 */
2184int __weak ftrace_arch_code_modify_prepare(void)
2185{
2186 return 0;
2187}
2188
2189/*
2190 * archs can override this function if they must do something
2191 * after the modifying code is performed.
2192 */
2193int __weak ftrace_arch_code_modify_post_process(void)
2194{
2195 return 0;
2196}
2197
8ed3e2cf 2198void ftrace_modify_all_code(int command)
3d083395 2199{
59338f75 2200 int update = command & FTRACE_UPDATE_TRACE_FUNC;
cd21067f 2201 int err = 0;
59338f75
SRRH
2202
2203 /*
2204 * If the ftrace_caller calls a ftrace_ops func directly,
2205 * we need to make sure that it only traces functions it
2206 * expects to trace. When doing the switch of functions,
2207 * we need to update to the ftrace_ops_list_func first
2208 * before the transition between old and new calls are set,
2209 * as the ftrace_ops_list_func will check the ops hashes
2210 * to make sure the ops are having the right functions
2211 * traced.
2212 */
cd21067f
PM
2213 if (update) {
2214 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2215 if (FTRACE_WARN_ON(err))
2216 return;
2217 }
59338f75 2218
8ed3e2cf 2219 if (command & FTRACE_UPDATE_CALLS)
d61f82d0 2220 ftrace_replace_code(1);
8ed3e2cf 2221 else if (command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
2222 ftrace_replace_code(0);
2223
405e1d83
SRRH
2224 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2225 function_trace_op = set_function_trace_op;
2226 smp_wmb();
2227 /* If irqs are disabled, we are in stop machine */
2228 if (!irqs_disabled())
2229 smp_call_function(ftrace_sync_ipi, NULL, 1);
cd21067f
PM
2230 err = ftrace_update_ftrace_func(ftrace_trace_function);
2231 if (FTRACE_WARN_ON(err))
2232 return;
405e1d83 2233 }
d61f82d0 2234
8ed3e2cf 2235 if (command & FTRACE_START_FUNC_RET)
cd21067f 2236 err = ftrace_enable_ftrace_graph_caller();
8ed3e2cf 2237 else if (command & FTRACE_STOP_FUNC_RET)
cd21067f
PM
2238 err = ftrace_disable_ftrace_graph_caller();
2239 FTRACE_WARN_ON(err);
8ed3e2cf
SR
2240}
2241
2242static int __ftrace_modify_code(void *data)
2243{
2244 int *command = data;
2245
2246 ftrace_modify_all_code(*command);
5a45cfe1 2247
d61f82d0 2248 return 0;
3d083395
SR
2249}
2250
c88fd863
SR
2251/**
2252 * ftrace_run_stop_machine, go back to the stop machine method
2253 * @command: The command to tell ftrace what to do
2254 *
2255 * If an arch needs to fall back to the stop machine method, the
2256 * it can call this function.
2257 */
2258void ftrace_run_stop_machine(int command)
2259{
2260 stop_machine(__ftrace_modify_code, &command, NULL);
2261}
2262
2263/**
2264 * arch_ftrace_update_code, modify the code to trace or not trace
2265 * @command: The command that needs to be done
2266 *
2267 * Archs can override this function if it does not need to
2268 * run stop_machine() to modify code.
2269 */
2270void __weak arch_ftrace_update_code(int command)
2271{
2272 ftrace_run_stop_machine(command);
2273}
2274
e309b41d 2275static void ftrace_run_update_code(int command)
3d083395 2276{
000ab691
SR
2277 int ret;
2278
2279 ret = ftrace_arch_code_modify_prepare();
2280 FTRACE_WARN_ON(ret);
2281 if (ret)
2282 return;
2283
c88fd863
SR
2284 /*
2285 * By default we use stop_machine() to modify the code.
2286 * But archs can do what ever they want as long as it
2287 * is safe. The stop_machine() is the safest, but also
2288 * produces the most overhead.
2289 */
2290 arch_ftrace_update_code(command);
2291
000ab691
SR
2292 ret = ftrace_arch_code_modify_post_process();
2293 FTRACE_WARN_ON(ret);
3d083395
SR
2294}
2295
e1effa01
SRRH
2296static void ftrace_run_modify_code(struct ftrace_ops *ops, int command)
2297{
2298 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2299 ftrace_run_update_code(command);
2300 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2301}
2302
d61f82d0 2303static ftrace_func_t saved_ftrace_func;
60a7ecf4 2304static int ftrace_start_up;
df4fc315 2305
db0fbadc
JS
2306static void control_ops_free(struct ftrace_ops *ops)
2307{
2308 free_percpu(ops->disabled);
2309}
2310
df4fc315
SR
2311static void ftrace_startup_enable(int command)
2312{
2313 if (saved_ftrace_func != ftrace_trace_function) {
2314 saved_ftrace_func = ftrace_trace_function;
2315 command |= FTRACE_UPDATE_TRACE_FUNC;
2316 }
2317
2318 if (!command || !ftrace_enabled)
2319 return;
2320
2321 ftrace_run_update_code(command);
2322}
d61f82d0 2323
e1effa01
SRRH
2324static void ftrace_startup_all(int command)
2325{
2326 update_all_ops = true;
2327 ftrace_startup_enable(command);
2328 update_all_ops = false;
2329}
2330
a1cd6173 2331static int ftrace_startup(struct ftrace_ops *ops, int command)
3d083395 2332{
8a56d776 2333 int ret;
b848914c 2334
4eebcc81 2335 if (unlikely(ftrace_disabled))
a1cd6173 2336 return -ENODEV;
4eebcc81 2337
8a56d776
SRRH
2338 ret = __register_ftrace_function(ops);
2339 if (ret)
2340 return ret;
2341
60a7ecf4 2342 ftrace_start_up++;
30fb6aa7 2343 command |= FTRACE_UPDATE_CALLS;
d61f82d0 2344
e1effa01
SRRH
2345 /*
2346 * Note that ftrace probes uses this to start up
2347 * and modify functions it will probe. But we still
2348 * set the ADDING flag for modification, as probes
2349 * do not have trampolines. If they add them in the
2350 * future, then the probes will need to distinguish
2351 * between adding and updating probes.
2352 */
2353 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
66209a5b
SRRH
2354
2355 ftrace_hash_rec_enable(ops, 1);
ed926f9b 2356
df4fc315 2357 ftrace_startup_enable(command);
a1cd6173 2358
e1effa01
SRRH
2359 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2360
a1cd6173 2361 return 0;
3d083395
SR
2362}
2363
8a56d776 2364static int ftrace_shutdown(struct ftrace_ops *ops, int command)
3d083395 2365{
8a56d776 2366 int ret;
b848914c 2367
4eebcc81 2368 if (unlikely(ftrace_disabled))
8a56d776
SRRH
2369 return -ENODEV;
2370
2371 ret = __unregister_ftrace_function(ops);
2372 if (ret)
2373 return ret;
4eebcc81 2374
60a7ecf4 2375 ftrace_start_up--;
9ea1a153
FW
2376 /*
2377 * Just warn in case of unbalance, no need to kill ftrace, it's not
2378 * critical but the ftrace_call callers may be never nopped again after
2379 * further ftrace uses.
2380 */
2381 WARN_ON_ONCE(ftrace_start_up < 0);
2382
66209a5b 2383 ftrace_hash_rec_disable(ops, 1);
ed926f9b 2384
a737e6dd 2385 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
b848914c 2386
30fb6aa7 2387 command |= FTRACE_UPDATE_CALLS;
3d083395 2388
d61f82d0
SR
2389 if (saved_ftrace_func != ftrace_trace_function) {
2390 saved_ftrace_func = ftrace_trace_function;
2391 command |= FTRACE_UPDATE_TRACE_FUNC;
2392 }
3d083395 2393
a4c35ed2
SRRH
2394 if (!command || !ftrace_enabled) {
2395 /*
2396 * If these are control ops, they still need their
2397 * per_cpu field freed. Since, function tracing is
2398 * not currently active, we can just free them
2399 * without synchronizing all CPUs.
2400 */
2401 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2402 control_ops_free(ops);
8a56d776 2403 return 0;
a4c35ed2 2404 }
d61f82d0 2405
79922b80
SRRH
2406 /*
2407 * If the ops uses a trampoline, then it needs to be
2408 * tested first on update.
2409 */
e1effa01 2410 ops->flags |= FTRACE_OPS_FL_REMOVING;
79922b80
SRRH
2411 removed_ops = ops;
2412
fef5aeee
SRRH
2413 /* The trampoline logic checks the old hashes */
2414 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2415 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2416
d61f82d0 2417 ftrace_run_update_code(command);
a4c35ed2 2418
84bde62c
SRRH
2419 /*
2420 * If there's no more ops registered with ftrace, run a
2421 * sanity check to make sure all rec flags are cleared.
2422 */
2423 if (ftrace_ops_list == &ftrace_list_end) {
2424 struct ftrace_page *pg;
2425 struct dyn_ftrace *rec;
2426
2427 do_for_each_ftrace_rec(pg, rec) {
2428 if (FTRACE_WARN_ON_ONCE(rec->flags))
2429 pr_warn(" %pS flags:%lx\n",
2430 (void *)rec->ip, rec->flags);
2431 } while_for_each_ftrace_rec();
2432 }
2433
fef5aeee
SRRH
2434 ops->old_hash.filter_hash = NULL;
2435 ops->old_hash.notrace_hash = NULL;
2436
2437 removed_ops = NULL;
e1effa01 2438 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
79922b80 2439
a4c35ed2
SRRH
2440 /*
2441 * Dynamic ops may be freed, we must make sure that all
2442 * callers are done before leaving this function.
2443 * The same goes for freeing the per_cpu data of the control
2444 * ops.
2445 *
2446 * Again, normal synchronize_sched() is not good enough.
2447 * We need to do a hard force of sched synchronization.
2448 * This is because we use preempt_disable() to do RCU, but
2449 * the function tracers can be called where RCU is not watching
2450 * (like before user_exit()). We can not rely on the RCU
2451 * infrastructure to do the synchronization, thus we must do it
2452 * ourselves.
2453 */
2454 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2455 schedule_on_each_cpu(ftrace_sync);
2456
2457 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2458 control_ops_free(ops);
2459 }
2460
8a56d776 2461 return 0;
3d083395
SR
2462}
2463
e309b41d 2464static void ftrace_startup_sysctl(void)
b0fc494f 2465{
4eebcc81
SR
2466 if (unlikely(ftrace_disabled))
2467 return;
2468
d61f82d0
SR
2469 /* Force update next time */
2470 saved_ftrace_func = NULL;
60a7ecf4
SR
2471 /* ftrace_start_up is true if we want ftrace running */
2472 if (ftrace_start_up)
30fb6aa7 2473 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
b0fc494f
SR
2474}
2475
e309b41d 2476static void ftrace_shutdown_sysctl(void)
b0fc494f 2477{
4eebcc81
SR
2478 if (unlikely(ftrace_disabled))
2479 return;
2480
60a7ecf4
SR
2481 /* ftrace_start_up is true if ftrace is running */
2482 if (ftrace_start_up)
79e406d7 2483 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
b0fc494f
SR
2484}
2485
3d083395 2486static cycle_t ftrace_update_time;
3d083395
SR
2487unsigned long ftrace_update_tot_cnt;
2488
8c4f3c3f 2489static inline int ops_traces_mod(struct ftrace_ops *ops)
f7bc8b61 2490{
8c4f3c3f
SRRH
2491 /*
2492 * Filter_hash being empty will default to trace module.
2493 * But notrace hash requires a test of individual module functions.
2494 */
33b7f99c
SRRH
2495 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2496 ftrace_hash_empty(ops->func_hash->notrace_hash);
8c4f3c3f
SRRH
2497}
2498
2499/*
2500 * Check if the current ops references the record.
2501 *
2502 * If the ops traces all functions, then it was already accounted for.
2503 * If the ops does not trace the current record function, skip it.
2504 * If the ops ignores the function via notrace filter, skip it.
2505 */
2506static inline bool
2507ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2508{
2509 /* If ops isn't enabled, ignore it */
2510 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2511 return 0;
2512
2513 /* If ops traces all mods, we already accounted for it */
2514 if (ops_traces_mod(ops))
2515 return 0;
2516
2517 /* The function must be in the filter */
33b7f99c
SRRH
2518 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2519 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
8c4f3c3f 2520 return 0;
f7bc8b61 2521
8c4f3c3f 2522 /* If in notrace hash, we ignore it too */
33b7f99c 2523 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
8c4f3c3f
SRRH
2524 return 0;
2525
2526 return 1;
2527}
2528
2529static int referenced_filters(struct dyn_ftrace *rec)
2530{
2531 struct ftrace_ops *ops;
2532 int cnt = 0;
2533
2534 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2535 if (ops_references_rec(ops, rec))
2536 cnt++;
2537 }
2538
2539 return cnt;
f7bc8b61
SR
2540}
2541
1dc43cf0 2542static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3d083395 2543{
85ae32ae 2544 struct ftrace_page *pg;
e94142a6 2545 struct dyn_ftrace *p;
f22f9a89 2546 cycle_t start, stop;
1dc43cf0 2547 unsigned long update_cnt = 0;
f7bc8b61 2548 unsigned long ref = 0;
8c4f3c3f 2549 bool test = false;
85ae32ae 2550 int i;
f7bc8b61
SR
2551
2552 /*
2553 * When adding a module, we need to check if tracers are
2554 * currently enabled and if they are set to trace all functions.
2555 * If they are, we need to enable the module functions as well
2556 * as update the reference counts for those function records.
2557 */
2558 if (mod) {
2559 struct ftrace_ops *ops;
2560
2561 for (ops = ftrace_ops_list;
2562 ops != &ftrace_list_end; ops = ops->next) {
8c4f3c3f
SRRH
2563 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2564 if (ops_traces_mod(ops))
2565 ref++;
2566 else
2567 test = true;
2568 }
f7bc8b61
SR
2569 }
2570 }
3d083395 2571
750ed1a4 2572 start = ftrace_now(raw_smp_processor_id());
3d083395 2573
1dc43cf0 2574 for (pg = new_pgs; pg; pg = pg->next) {
3d083395 2575
85ae32ae 2576 for (i = 0; i < pg->index; i++) {
8c4f3c3f
SRRH
2577 int cnt = ref;
2578
85ae32ae
SR
2579 /* If something went wrong, bail without enabling anything */
2580 if (unlikely(ftrace_disabled))
2581 return -1;
f22f9a89 2582
85ae32ae 2583 p = &pg->records[i];
8c4f3c3f
SRRH
2584 if (test)
2585 cnt += referenced_filters(p);
2586 p->flags = cnt;
f22f9a89 2587
85ae32ae
SR
2588 /*
2589 * Do the initial record conversion from mcount jump
2590 * to the NOP instructions.
2591 */
2592 if (!ftrace_code_disable(mod, p))
2593 break;
5cb084bb 2594
1dc43cf0 2595 update_cnt++;
5cb084bb 2596
85ae32ae
SR
2597 /*
2598 * If the tracing is enabled, go ahead and enable the record.
2599 *
2600 * The reason not to enable the record immediatelly is the
2601 * inherent check of ftrace_make_nop/ftrace_make_call for
2602 * correct previous instructions. Making first the NOP
2603 * conversion puts the module to the correct state, thus
2604 * passing the ftrace_make_call check.
2605 */
8c4f3c3f 2606 if (ftrace_start_up && cnt) {
85ae32ae
SR
2607 int failed = __ftrace_replace_code(p, 1);
2608 if (failed)
2609 ftrace_bug(failed, p->ip);
2610 }
5cb084bb 2611 }
3d083395
SR
2612 }
2613
750ed1a4 2614 stop = ftrace_now(raw_smp_processor_id());
3d083395 2615 ftrace_update_time = stop - start;
1dc43cf0 2616 ftrace_update_tot_cnt += update_cnt;
3d083395 2617
16444a8a
ACM
2618 return 0;
2619}
2620
a7900875 2621static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3c1720f0 2622{
a7900875 2623 int order;
3c1720f0 2624 int cnt;
3c1720f0 2625
a7900875
SR
2626 if (WARN_ON(!count))
2627 return -EINVAL;
2628
2629 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3c1720f0
SR
2630
2631 /*
a7900875
SR
2632 * We want to fill as much as possible. No more than a page
2633 * may be empty.
3c1720f0 2634 */
a7900875
SR
2635 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2636 order--;
3c1720f0 2637
a7900875
SR
2638 again:
2639 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3c1720f0 2640
a7900875
SR
2641 if (!pg->records) {
2642 /* if we can't allocate this size, try something smaller */
2643 if (!order)
2644 return -ENOMEM;
2645 order >>= 1;
2646 goto again;
2647 }
3c1720f0 2648
a7900875
SR
2649 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2650 pg->size = cnt;
3c1720f0 2651
a7900875
SR
2652 if (cnt > count)
2653 cnt = count;
2654
2655 return cnt;
2656}
2657
2658static struct ftrace_page *
2659ftrace_allocate_pages(unsigned long num_to_init)
2660{
2661 struct ftrace_page *start_pg;
2662 struct ftrace_page *pg;
2663 int order;
2664 int cnt;
2665
2666 if (!num_to_init)
2667 return 0;
2668
2669 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2670 if (!pg)
2671 return NULL;
2672
2673 /*
2674 * Try to allocate as much as possible in one continues
2675 * location that fills in all of the space. We want to
2676 * waste as little space as possible.
2677 */
2678 for (;;) {
2679 cnt = ftrace_allocate_records(pg, num_to_init);
2680 if (cnt < 0)
2681 goto free_pages;
2682
2683 num_to_init -= cnt;
2684 if (!num_to_init)
3c1720f0
SR
2685 break;
2686
a7900875
SR
2687 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2688 if (!pg->next)
2689 goto free_pages;
2690
3c1720f0
SR
2691 pg = pg->next;
2692 }
2693
a7900875
SR
2694 return start_pg;
2695
2696 free_pages:
1f61be00
NK
2697 pg = start_pg;
2698 while (pg) {
a7900875
SR
2699 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2700 free_pages((unsigned long)pg->records, order);
2701 start_pg = pg->next;
2702 kfree(pg);
2703 pg = start_pg;
2704 }
2705 pr_info("ftrace: FAILED to allocate memory for functions\n");
2706 return NULL;
2707}
2708
5072c59f
SR
2709#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2710
2711struct ftrace_iterator {
98c4fd04 2712 loff_t pos;
4aeb6967
SR
2713 loff_t func_pos;
2714 struct ftrace_page *pg;
2715 struct dyn_ftrace *func;
2716 struct ftrace_func_probe *probe;
2717 struct trace_parser parser;
1cf41dd7 2718 struct ftrace_hash *hash;
33dc9b12 2719 struct ftrace_ops *ops;
4aeb6967
SR
2720 int hidx;
2721 int idx;
2722 unsigned flags;
5072c59f
SR
2723};
2724
8fc0c701 2725static void *
4aeb6967 2726t_hash_next(struct seq_file *m, loff_t *pos)
8fc0c701
SR
2727{
2728 struct ftrace_iterator *iter = m->private;
4aeb6967 2729 struct hlist_node *hnd = NULL;
8fc0c701
SR
2730 struct hlist_head *hhd;
2731
8fc0c701 2732 (*pos)++;
98c4fd04 2733 iter->pos = *pos;
8fc0c701 2734
4aeb6967
SR
2735 if (iter->probe)
2736 hnd = &iter->probe->node;
8fc0c701
SR
2737 retry:
2738 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2739 return NULL;
2740
2741 hhd = &ftrace_func_hash[iter->hidx];
2742
2743 if (hlist_empty(hhd)) {
2744 iter->hidx++;
2745 hnd = NULL;
2746 goto retry;
2747 }
2748
2749 if (!hnd)
2750 hnd = hhd->first;
2751 else {
2752 hnd = hnd->next;
2753 if (!hnd) {
2754 iter->hidx++;
2755 goto retry;
2756 }
2757 }
2758
4aeb6967
SR
2759 if (WARN_ON_ONCE(!hnd))
2760 return NULL;
2761
2762 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2763
2764 return iter;
8fc0c701
SR
2765}
2766
2767static void *t_hash_start(struct seq_file *m, loff_t *pos)
2768{
2769 struct ftrace_iterator *iter = m->private;
2770 void *p = NULL;
d82d6244
LZ
2771 loff_t l;
2772
69a3083c
SR
2773 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2774 return NULL;
2775
2bccfffd
SR
2776 if (iter->func_pos > *pos)
2777 return NULL;
8fc0c701 2778
d82d6244 2779 iter->hidx = 0;
2bccfffd 2780 for (l = 0; l <= (*pos - iter->func_pos); ) {
4aeb6967 2781 p = t_hash_next(m, &l);
d82d6244
LZ
2782 if (!p)
2783 break;
2784 }
4aeb6967
SR
2785 if (!p)
2786 return NULL;
2787
98c4fd04
SR
2788 /* Only set this if we have an item */
2789 iter->flags |= FTRACE_ITER_HASH;
2790
4aeb6967 2791 return iter;
8fc0c701
SR
2792}
2793
4aeb6967
SR
2794static int
2795t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
8fc0c701 2796{
b6887d79 2797 struct ftrace_func_probe *rec;
8fc0c701 2798
4aeb6967
SR
2799 rec = iter->probe;
2800 if (WARN_ON_ONCE(!rec))
2801 return -EIO;
8fc0c701 2802
809dcf29
SR
2803 if (rec->ops->print)
2804 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2805
b375a11a 2806 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
2807
2808 if (rec->data)
2809 seq_printf(m, ":%p", rec->data);
2810 seq_putc(m, '\n');
2811
2812 return 0;
2813}
2814
e309b41d 2815static void *
5072c59f
SR
2816t_next(struct seq_file *m, void *v, loff_t *pos)
2817{
2818 struct ftrace_iterator *iter = m->private;
fc13cb0c 2819 struct ftrace_ops *ops = iter->ops;
5072c59f
SR
2820 struct dyn_ftrace *rec = NULL;
2821
45a4a237
SR
2822 if (unlikely(ftrace_disabled))
2823 return NULL;
2824
8fc0c701 2825 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 2826 return t_hash_next(m, pos);
8fc0c701 2827
5072c59f 2828 (*pos)++;
1106b699 2829 iter->pos = iter->func_pos = *pos;
5072c59f 2830
0c75a3ed 2831 if (iter->flags & FTRACE_ITER_PRINTALL)
57c072c7 2832 return t_hash_start(m, pos);
0c75a3ed 2833
5072c59f
SR
2834 retry:
2835 if (iter->idx >= iter->pg->index) {
2836 if (iter->pg->next) {
2837 iter->pg = iter->pg->next;
2838 iter->idx = 0;
2839 goto retry;
2840 }
2841 } else {
2842 rec = &iter->pg->records[iter->idx++];
32082309 2843 if (((iter->flags & FTRACE_ITER_FILTER) &&
33b7f99c 2844 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
0183fb1c 2845
41c52c0d 2846 ((iter->flags & FTRACE_ITER_NOTRACE) &&
33b7f99c 2847 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
647bcd03
SR
2848
2849 ((iter->flags & FTRACE_ITER_ENABLED) &&
23ea9c4d 2850 !(rec->flags & FTRACE_FL_ENABLED))) {
647bcd03 2851
5072c59f
SR
2852 rec = NULL;
2853 goto retry;
2854 }
2855 }
2856
4aeb6967 2857 if (!rec)
57c072c7 2858 return t_hash_start(m, pos);
4aeb6967
SR
2859
2860 iter->func = rec;
2861
2862 return iter;
5072c59f
SR
2863}
2864
98c4fd04
SR
2865static void reset_iter_read(struct ftrace_iterator *iter)
2866{
2867 iter->pos = 0;
2868 iter->func_pos = 0;
70f77b3f 2869 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
5072c59f
SR
2870}
2871
2872static void *t_start(struct seq_file *m, loff_t *pos)
2873{
2874 struct ftrace_iterator *iter = m->private;
fc13cb0c 2875 struct ftrace_ops *ops = iter->ops;
5072c59f 2876 void *p = NULL;
694ce0a5 2877 loff_t l;
5072c59f 2878
8fc0c701 2879 mutex_lock(&ftrace_lock);
45a4a237
SR
2880
2881 if (unlikely(ftrace_disabled))
2882 return NULL;
2883
98c4fd04
SR
2884 /*
2885 * If an lseek was done, then reset and start from beginning.
2886 */
2887 if (*pos < iter->pos)
2888 reset_iter_read(iter);
2889
0c75a3ed
SR
2890 /*
2891 * For set_ftrace_filter reading, if we have the filter
2892 * off, we can short cut and just print out that all
2893 * functions are enabled.
2894 */
8c006cf7 2895 if ((iter->flags & FTRACE_ITER_FILTER &&
33b7f99c 2896 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
8c006cf7 2897 (iter->flags & FTRACE_ITER_NOTRACE &&
33b7f99c 2898 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
0c75a3ed 2899 if (*pos > 0)
8fc0c701 2900 return t_hash_start(m, pos);
0c75a3ed 2901 iter->flags |= FTRACE_ITER_PRINTALL;
df091625
CW
2902 /* reset in case of seek/pread */
2903 iter->flags &= ~FTRACE_ITER_HASH;
0c75a3ed
SR
2904 return iter;
2905 }
2906
8fc0c701
SR
2907 if (iter->flags & FTRACE_ITER_HASH)
2908 return t_hash_start(m, pos);
2909
98c4fd04
SR
2910 /*
2911 * Unfortunately, we need to restart at ftrace_pages_start
2912 * every time we let go of the ftrace_mutex. This is because
2913 * those pointers can change without the lock.
2914 */
694ce0a5
LZ
2915 iter->pg = ftrace_pages_start;
2916 iter->idx = 0;
2917 for (l = 0; l <= *pos; ) {
2918 p = t_next(m, p, &l);
2919 if (!p)
2920 break;
50cdaf08 2921 }
5821e1b7 2922
69a3083c
SR
2923 if (!p)
2924 return t_hash_start(m, pos);
4aeb6967
SR
2925
2926 return iter;
5072c59f
SR
2927}
2928
2929static void t_stop(struct seq_file *m, void *p)
2930{
8fc0c701 2931 mutex_unlock(&ftrace_lock);
5072c59f
SR
2932}
2933
2934static int t_show(struct seq_file *m, void *v)
2935{
0c75a3ed 2936 struct ftrace_iterator *iter = m->private;
4aeb6967 2937 struct dyn_ftrace *rec;
5072c59f 2938
8fc0c701 2939 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 2940 return t_hash_show(m, iter);
8fc0c701 2941
0c75a3ed 2942 if (iter->flags & FTRACE_ITER_PRINTALL) {
8c006cf7
NK
2943 if (iter->flags & FTRACE_ITER_NOTRACE)
2944 seq_printf(m, "#### no functions disabled ####\n");
2945 else
2946 seq_printf(m, "#### all functions enabled ####\n");
0c75a3ed
SR
2947 return 0;
2948 }
2949
4aeb6967
SR
2950 rec = iter->func;
2951
5072c59f
SR
2952 if (!rec)
2953 return 0;
2954
647bcd03 2955 seq_printf(m, "%ps", (void *)rec->ip);
9674b2fa 2956 if (iter->flags & FTRACE_ITER_ENABLED) {
08f6fba5 2957 seq_printf(m, " (%ld)%s",
0376bde1 2958 ftrace_rec_count(rec),
9674b2fa
SRRH
2959 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2960 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2961 struct ftrace_ops *ops;
2962
5fecaa04 2963 ops = ftrace_find_tramp_ops_any(rec);
fef5aeee 2964 if (ops)
9674b2fa
SRRH
2965 seq_printf(m, "\ttramp: %pS",
2966 (void *)ops->trampoline);
2967 else
2968 seq_printf(m, "\ttramp: ERROR!");
2969 }
2970 }
2971
647bcd03 2972 seq_printf(m, "\n");
5072c59f
SR
2973
2974 return 0;
2975}
2976
88e9d34c 2977static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
2978 .start = t_start,
2979 .next = t_next,
2980 .stop = t_stop,
2981 .show = t_show,
2982};
2983
e309b41d 2984static int
5072c59f
SR
2985ftrace_avail_open(struct inode *inode, struct file *file)
2986{
2987 struct ftrace_iterator *iter;
5072c59f 2988
4eebcc81
SR
2989 if (unlikely(ftrace_disabled))
2990 return -ENODEV;
2991
50e18b94
JO
2992 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2993 if (iter) {
2994 iter->pg = ftrace_pages_start;
2995 iter->ops = &global_ops;
4bf39a94 2996 }
5072c59f 2997
50e18b94 2998 return iter ? 0 : -ENOMEM;
5072c59f
SR
2999}
3000
647bcd03
SR
3001static int
3002ftrace_enabled_open(struct inode *inode, struct file *file)
3003{
3004 struct ftrace_iterator *iter;
647bcd03
SR
3005
3006 if (unlikely(ftrace_disabled))
3007 return -ENODEV;
3008
50e18b94
JO
3009 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3010 if (iter) {
3011 iter->pg = ftrace_pages_start;
3012 iter->flags = FTRACE_ITER_ENABLED;
3013 iter->ops = &global_ops;
647bcd03
SR
3014 }
3015
50e18b94 3016 return iter ? 0 : -ENOMEM;
647bcd03
SR
3017}
3018
fc13cb0c
SR
3019/**
3020 * ftrace_regex_open - initialize function tracer filter files
3021 * @ops: The ftrace_ops that hold the hash filters
3022 * @flag: The type of filter to process
3023 * @inode: The inode, usually passed in to your open routine
3024 * @file: The file, usually passed in to your open routine
3025 *
3026 * ftrace_regex_open() initializes the filter files for the
3027 * @ops. Depending on @flag it may process the filter hash or
3028 * the notrace hash of @ops. With this called from the open
3029 * routine, you can use ftrace_filter_write() for the write
3030 * routine if @flag has FTRACE_ITER_FILTER set, or
3031 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
098c879e 3032 * tracing_lseek() should be used as the lseek routine, and
fc13cb0c
SR
3033 * release must call ftrace_regex_release().
3034 */
3035int
f45948e8 3036ftrace_regex_open(struct ftrace_ops *ops, int flag,
1cf41dd7 3037 struct inode *inode, struct file *file)
5072c59f
SR
3038{
3039 struct ftrace_iterator *iter;
f45948e8 3040 struct ftrace_hash *hash;
5072c59f
SR
3041 int ret = 0;
3042
f04f24fb
MH
3043 ftrace_ops_init(ops);
3044
4eebcc81
SR
3045 if (unlikely(ftrace_disabled))
3046 return -ENODEV;
3047
5072c59f
SR
3048 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3049 if (!iter)
3050 return -ENOMEM;
3051
689fd8b6 3052 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3053 kfree(iter);
3054 return -ENOMEM;
3055 }
3056
3f2367ba
MH
3057 iter->ops = ops;
3058 iter->flags = flag;
3059
33b7f99c 3060 mutex_lock(&ops->func_hash->regex_lock);
3f2367ba 3061
f45948e8 3062 if (flag & FTRACE_ITER_NOTRACE)
33b7f99c 3063 hash = ops->func_hash->notrace_hash;
f45948e8 3064 else
33b7f99c 3065 hash = ops->func_hash->filter_hash;
f45948e8 3066
33dc9b12 3067 if (file->f_mode & FMODE_WRITE) {
ef2fbe16
NK
3068 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3069
3070 if (file->f_flags & O_TRUNC)
3071 iter->hash = alloc_ftrace_hash(size_bits);
3072 else
3073 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3074
33dc9b12
SR
3075 if (!iter->hash) {
3076 trace_parser_put(&iter->parser);
3077 kfree(iter);
3f2367ba
MH
3078 ret = -ENOMEM;
3079 goto out_unlock;
33dc9b12
SR
3080 }
3081 }
1cf41dd7 3082
5072c59f
SR
3083 if (file->f_mode & FMODE_READ) {
3084 iter->pg = ftrace_pages_start;
5072c59f
SR
3085
3086 ret = seq_open(file, &show_ftrace_seq_ops);
3087 if (!ret) {
3088 struct seq_file *m = file->private_data;
3089 m->private = iter;
79fe249c 3090 } else {
33dc9b12
SR
3091 /* Failed */
3092 free_ftrace_hash(iter->hash);
79fe249c 3093 trace_parser_put(&iter->parser);
5072c59f 3094 kfree(iter);
79fe249c 3095 }
5072c59f
SR
3096 } else
3097 file->private_data = iter;
3f2367ba
MH
3098
3099 out_unlock:
33b7f99c 3100 mutex_unlock(&ops->func_hash->regex_lock);
5072c59f
SR
3101
3102 return ret;
3103}
3104
41c52c0d
SR
3105static int
3106ftrace_filter_open(struct inode *inode, struct file *file)
3107{
e3b3e2e8
SRRH
3108 struct ftrace_ops *ops = inode->i_private;
3109
3110 return ftrace_regex_open(ops,
69a3083c
SR
3111 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3112 inode, file);
41c52c0d
SR
3113}
3114
3115static int
3116ftrace_notrace_open(struct inode *inode, struct file *file)
3117{
e3b3e2e8
SRRH
3118 struct ftrace_ops *ops = inode->i_private;
3119
3120 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
1cf41dd7 3121 inode, file);
41c52c0d
SR
3122}
3123
64e7c440 3124static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 3125{
9f4801e3 3126 int matched = 0;
751e9983 3127 int slen;
9f4801e3 3128
9f4801e3
SR
3129 switch (type) {
3130 case MATCH_FULL:
3131 if (strcmp(str, regex) == 0)
3132 matched = 1;
3133 break;
3134 case MATCH_FRONT_ONLY:
3135 if (strncmp(str, regex, len) == 0)
3136 matched = 1;
3137 break;
3138 case MATCH_MIDDLE_ONLY:
3139 if (strstr(str, regex))
3140 matched = 1;
3141 break;
3142 case MATCH_END_ONLY:
751e9983
LZ
3143 slen = strlen(str);
3144 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
9f4801e3
SR
3145 matched = 1;
3146 break;
3147 }
3148
3149 return matched;
3150}
3151
b448c4e3 3152static int
1cf41dd7 3153enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
996e87be 3154{
b448c4e3 3155 struct ftrace_func_entry *entry;
b448c4e3
SR
3156 int ret = 0;
3157
1cf41dd7
SR
3158 entry = ftrace_lookup_ip(hash, rec->ip);
3159 if (not) {
3160 /* Do nothing if it doesn't exist */
3161 if (!entry)
3162 return 0;
b448c4e3 3163
33dc9b12 3164 free_hash_entry(hash, entry);
1cf41dd7
SR
3165 } else {
3166 /* Do nothing if it exists */
3167 if (entry)
3168 return 0;
b448c4e3 3169
1cf41dd7 3170 ret = add_hash_entry(hash, rec->ip);
b448c4e3
SR
3171 }
3172 return ret;
996e87be
SR
3173}
3174
64e7c440 3175static int
b9df92d2
SR
3176ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3177 char *regex, int len, int type)
64e7c440
SR
3178{
3179 char str[KSYM_SYMBOL_LEN];
b9df92d2
SR
3180 char *modname;
3181
3182 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3183
3184 if (mod) {
3185 /* module lookup requires matching the module */
3186 if (!modname || strcmp(modname, mod))
3187 return 0;
3188
3189 /* blank search means to match all funcs in the mod */
3190 if (!len)
3191 return 1;
3192 }
64e7c440 3193
64e7c440
SR
3194 return ftrace_match(str, regex, len, type);
3195}
3196
1cf41dd7
SR
3197static int
3198match_records(struct ftrace_hash *hash, char *buff,
3199 int len, char *mod, int not)
9f4801e3 3200{
b9df92d2 3201 unsigned search_len = 0;
9f4801e3
SR
3202 struct ftrace_page *pg;
3203 struct dyn_ftrace *rec;
b9df92d2
SR
3204 int type = MATCH_FULL;
3205 char *search = buff;
311d16da 3206 int found = 0;
b448c4e3 3207 int ret;
9f4801e3 3208
b9df92d2
SR
3209 if (len) {
3210 type = filter_parse_regex(buff, len, &search, &not);
3211 search_len = strlen(search);
3212 }
9f4801e3 3213
52baf119 3214 mutex_lock(&ftrace_lock);
265c831c 3215
b9df92d2
SR
3216 if (unlikely(ftrace_disabled))
3217 goto out_unlock;
9f4801e3 3218
265c831c 3219 do_for_each_ftrace_rec(pg, rec) {
b9df92d2 3220 if (ftrace_match_record(rec, mod, search, search_len, type)) {
1cf41dd7 3221 ret = enter_record(hash, rec, not);
b448c4e3
SR
3222 if (ret < 0) {
3223 found = ret;
3224 goto out_unlock;
3225 }
311d16da 3226 found = 1;
265c831c
SR
3227 }
3228 } while_for_each_ftrace_rec();
b9df92d2 3229 out_unlock:
52baf119 3230 mutex_unlock(&ftrace_lock);
311d16da
LZ
3231
3232 return found;
5072c59f
SR
3233}
3234
64e7c440 3235static int
1cf41dd7 3236ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
64e7c440 3237{
1cf41dd7 3238 return match_records(hash, buff, len, NULL, 0);
64e7c440
SR
3239}
3240
1cf41dd7
SR
3241static int
3242ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
64e7c440 3243{
64e7c440 3244 int not = 0;
6a24a244 3245
64e7c440
SR
3246 /* blank or '*' mean the same */
3247 if (strcmp(buff, "*") == 0)
3248 buff[0] = 0;
3249
3250 /* handle the case of 'dont filter this module' */
3251 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3252 buff[0] = 0;
3253 not = 1;
3254 }
3255
1cf41dd7 3256 return match_records(hash, buff, strlen(buff), mod, not);
64e7c440
SR
3257}
3258
f6180773
SR
3259/*
3260 * We register the module command as a template to show others how
3261 * to register the a command as well.
3262 */
3263
3264static int
43dd61c9
SR
3265ftrace_mod_callback(struct ftrace_hash *hash,
3266 char *func, char *cmd, char *param, int enable)
f6180773
SR
3267{
3268 char *mod;
b448c4e3 3269 int ret = -EINVAL;
f6180773
SR
3270
3271 /*
3272 * cmd == 'mod' because we only registered this func
3273 * for the 'mod' ftrace_func_command.
3274 * But if you register one func with multiple commands,
3275 * you can tell which command was used by the cmd
3276 * parameter.
3277 */
3278
3279 /* we must have a module name */
3280 if (!param)
b448c4e3 3281 return ret;
f6180773
SR
3282
3283 mod = strsep(&param, ":");
3284 if (!strlen(mod))
b448c4e3 3285 return ret;
f6180773 3286
1cf41dd7 3287 ret = ftrace_match_module_records(hash, func, mod);
b448c4e3
SR
3288 if (!ret)
3289 ret = -EINVAL;
3290 if (ret < 0)
3291 return ret;
3292
3293 return 0;
f6180773
SR
3294}
3295
3296static struct ftrace_func_command ftrace_mod_cmd = {
3297 .name = "mod",
3298 .func = ftrace_mod_callback,
3299};
3300
3301static int __init ftrace_mod_cmd_init(void)
3302{
3303 return register_ftrace_command(&ftrace_mod_cmd);
3304}
6f415672 3305core_initcall(ftrace_mod_cmd_init);
f6180773 3306
2f5f6ad9 3307static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 3308 struct ftrace_ops *op, struct pt_regs *pt_regs)
59df055f 3309{
b6887d79 3310 struct ftrace_func_probe *entry;
59df055f 3311 struct hlist_head *hhd;
59df055f 3312 unsigned long key;
59df055f
SR
3313
3314 key = hash_long(ip, FTRACE_HASH_BITS);
3315
3316 hhd = &ftrace_func_hash[key];
3317
3318 if (hlist_empty(hhd))
3319 return;
3320
3321 /*
3322 * Disable preemption for these calls to prevent a RCU grace
3323 * period. This syncs the hash iteration and freeing of items
3324 * on the hash. rcu_read_lock is too dangerous here.
3325 */
5168ae50 3326 preempt_disable_notrace();
1bb539ca 3327 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
59df055f
SR
3328 if (entry->ip == ip)
3329 entry->ops->func(ip, parent_ip, &entry->data);
3330 }
5168ae50 3331 preempt_enable_notrace();
59df055f
SR
3332}
3333
b6887d79 3334static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 3335{
fb9fb015 3336 .func = function_trace_probe_call,
f04f24fb 3337 .flags = FTRACE_OPS_FL_INITIALIZED,
33b7f99c 3338 INIT_OPS_HASH(trace_probe_ops)
59df055f
SR
3339};
3340
b6887d79 3341static int ftrace_probe_registered;
59df055f 3342
b6887d79 3343static void __enable_ftrace_function_probe(void)
59df055f 3344{
b848914c 3345 int ret;
59df055f
SR
3346 int i;
3347
19dd603e
SRRH
3348 if (ftrace_probe_registered) {
3349 /* still need to update the function call sites */
3350 if (ftrace_enabled)
e1effa01 3351 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS);
59df055f 3352 return;
19dd603e 3353 }
59df055f
SR
3354
3355 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3356 struct hlist_head *hhd = &ftrace_func_hash[i];
3357 if (hhd->first)
3358 break;
3359 }
3360 /* Nothing registered? */
3361 if (i == FTRACE_FUNC_HASHSIZE)
3362 return;
3363
8a56d776 3364 ret = ftrace_startup(&trace_probe_ops, 0);
b848914c 3365
b6887d79 3366 ftrace_probe_registered = 1;
59df055f
SR
3367}
3368
b6887d79 3369static void __disable_ftrace_function_probe(void)
59df055f
SR
3370{
3371 int i;
3372
b6887d79 3373 if (!ftrace_probe_registered)
59df055f
SR
3374 return;
3375
3376 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3377 struct hlist_head *hhd = &ftrace_func_hash[i];
3378 if (hhd->first)
3379 return;
3380 }
3381
3382 /* no more funcs left */
8a56d776 3383 ftrace_shutdown(&trace_probe_ops, 0);
b848914c 3384
b6887d79 3385 ftrace_probe_registered = 0;
59df055f
SR
3386}
3387
3388
7818b388 3389static void ftrace_free_entry(struct ftrace_func_probe *entry)
59df055f 3390{
59df055f 3391 if (entry->ops->free)
e67efb93 3392 entry->ops->free(entry->ops, entry->ip, &entry->data);
59df055f
SR
3393 kfree(entry);
3394}
3395
59df055f 3396int
b6887d79 3397register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3398 void *data)
3399{
b6887d79 3400 struct ftrace_func_probe *entry;
33b7f99c 3401 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3296fc4e 3402 struct ftrace_hash *old_hash = *orig_hash;
e1df4cb6 3403 struct ftrace_hash *hash;
59df055f
SR
3404 struct ftrace_page *pg;
3405 struct dyn_ftrace *rec;
59df055f 3406 int type, len, not;
6a24a244 3407 unsigned long key;
59df055f
SR
3408 int count = 0;
3409 char *search;
e1df4cb6 3410 int ret;
59df055f 3411
3f6fe06d 3412 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
3413 len = strlen(search);
3414
b6887d79 3415 /* we do not support '!' for function probes */
59df055f
SR
3416 if (WARN_ON(not))
3417 return -EINVAL;
3418
33b7f99c 3419 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
59df055f 3420
3296fc4e 3421 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
e1df4cb6
SRRH
3422 if (!hash) {
3423 count = -ENOMEM;
5ae0bf59 3424 goto out;
e1df4cb6
SRRH
3425 }
3426
3427 if (unlikely(ftrace_disabled)) {
3428 count = -ENODEV;
5ae0bf59 3429 goto out;
e1df4cb6 3430 }
59df055f 3431
5ae0bf59
SRRH
3432 mutex_lock(&ftrace_lock);
3433
45a4a237 3434 do_for_each_ftrace_rec(pg, rec) {
59df055f 3435
b9df92d2 3436 if (!ftrace_match_record(rec, NULL, search, len, type))
59df055f
SR
3437 continue;
3438
3439 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3440 if (!entry) {
b6887d79 3441 /* If we did not process any, then return error */
59df055f
SR
3442 if (!count)
3443 count = -ENOMEM;
3444 goto out_unlock;
3445 }
3446
3447 count++;
3448
3449 entry->data = data;
3450
3451 /*
3452 * The caller might want to do something special
3453 * for each function we find. We call the callback
3454 * to give the caller an opportunity to do so.
3455 */
e67efb93
SRRH
3456 if (ops->init) {
3457 if (ops->init(ops, rec->ip, &entry->data) < 0) {
59df055f
SR
3458 /* caller does not like this func */
3459 kfree(entry);
3460 continue;
3461 }
3462 }
3463
e1df4cb6
SRRH
3464 ret = enter_record(hash, rec, 0);
3465 if (ret < 0) {
3466 kfree(entry);
3467 count = ret;
3468 goto out_unlock;
3469 }
3470
59df055f
SR
3471 entry->ops = ops;
3472 entry->ip = rec->ip;
3473
3474 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3475 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3476
3477 } while_for_each_ftrace_rec();
e1df4cb6
SRRH
3478
3479 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3296fc4e
SRRH
3480 if (!ret)
3481 free_ftrace_hash_rcu(old_hash);
3482 else
e1df4cb6
SRRH
3483 count = ret;
3484
b6887d79 3485 __enable_ftrace_function_probe();
59df055f
SR
3486
3487 out_unlock:
5ae0bf59
SRRH
3488 mutex_unlock(&ftrace_lock);
3489 out:
33b7f99c 3490 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
e1df4cb6 3491 free_ftrace_hash(hash);
59df055f
SR
3492
3493 return count;
3494}
3495
3496enum {
b6887d79
SR
3497 PROBE_TEST_FUNC = 1,
3498 PROBE_TEST_DATA = 2
59df055f
SR
3499};
3500
3501static void
b6887d79 3502__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3503 void *data, int flags)
3504{
e1df4cb6 3505 struct ftrace_func_entry *rec_entry;
b6887d79 3506 struct ftrace_func_probe *entry;
7818b388 3507 struct ftrace_func_probe *p;
33b7f99c 3508 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3296fc4e 3509 struct ftrace_hash *old_hash = *orig_hash;
7818b388 3510 struct list_head free_list;
e1df4cb6 3511 struct ftrace_hash *hash;
b67bfe0d 3512 struct hlist_node *tmp;
59df055f
SR
3513 char str[KSYM_SYMBOL_LEN];
3514 int type = MATCH_FULL;
3515 int i, len = 0;
3516 char *search;
3296fc4e 3517 int ret;
59df055f 3518
b36461da 3519 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
59df055f 3520 glob = NULL;
b36461da 3521 else if (glob) {
59df055f
SR
3522 int not;
3523
3f6fe06d 3524 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
3525 len = strlen(search);
3526
b6887d79 3527 /* we do not support '!' for function probes */
59df055f
SR
3528 if (WARN_ON(not))
3529 return;
3530 }
3531
33b7f99c 3532 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
e1df4cb6
SRRH
3533
3534 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3535 if (!hash)
3536 /* Hmm, should report this somehow */
3537 goto out_unlock;
3538
7818b388
SRRH
3539 INIT_LIST_HEAD(&free_list);
3540
59df055f
SR
3541 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3542 struct hlist_head *hhd = &ftrace_func_hash[i];
3543
b67bfe0d 3544 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
59df055f
SR
3545
3546 /* break up if statements for readability */
b6887d79 3547 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
3548 continue;
3549
b6887d79 3550 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
3551 continue;
3552
3553 /* do this last, since it is the most expensive */
3554 if (glob) {
3555 kallsyms_lookup(entry->ip, NULL, NULL,
3556 NULL, str);
3557 if (!ftrace_match(str, glob, len, type))
3558 continue;
3559 }
3560
e1df4cb6
SRRH
3561 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3562 /* It is possible more than one entry had this ip */
3563 if (rec_entry)
3564 free_hash_entry(hash, rec_entry);
3565
740466bc 3566 hlist_del_rcu(&entry->node);
7818b388 3567 list_add(&entry->free_list, &free_list);
59df055f
SR
3568 }
3569 }
3f2367ba 3570 mutex_lock(&ftrace_lock);
b6887d79 3571 __disable_ftrace_function_probe();
e1df4cb6
SRRH
3572 /*
3573 * Remove after the disable is called. Otherwise, if the last
3574 * probe is removed, a null hash means *all enabled*.
3575 */
3296fc4e 3576 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
7818b388 3577 synchronize_sched();
3296fc4e
SRRH
3578 if (!ret)
3579 free_ftrace_hash_rcu(old_hash);
3580
7818b388
SRRH
3581 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3582 list_del(&entry->free_list);
3583 ftrace_free_entry(entry);
3584 }
3f2367ba 3585 mutex_unlock(&ftrace_lock);
7818b388 3586
e1df4cb6 3587 out_unlock:
33b7f99c 3588 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
e1df4cb6 3589 free_ftrace_hash(hash);
59df055f
SR
3590}
3591
3592void
b6887d79 3593unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3594 void *data)
3595{
b6887d79
SR
3596 __unregister_ftrace_function_probe(glob, ops, data,
3597 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
3598}
3599
3600void
b6887d79 3601unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 3602{
b6887d79 3603 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
3604}
3605
b6887d79 3606void unregister_ftrace_function_probe_all(char *glob)
59df055f 3607{
b6887d79 3608 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
3609}
3610
f6180773
SR
3611static LIST_HEAD(ftrace_commands);
3612static DEFINE_MUTEX(ftrace_cmd_mutex);
3613
38de93ab
TZ
3614/*
3615 * Currently we only register ftrace commands from __init, so mark this
3616 * __init too.
3617 */
3618__init int register_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
3619{
3620 struct ftrace_func_command *p;
3621 int ret = 0;
3622
3623 mutex_lock(&ftrace_cmd_mutex);
3624 list_for_each_entry(p, &ftrace_commands, list) {
3625 if (strcmp(cmd->name, p->name) == 0) {
3626 ret = -EBUSY;
3627 goto out_unlock;
3628 }
3629 }
3630 list_add(&cmd->list, &ftrace_commands);
3631 out_unlock:
3632 mutex_unlock(&ftrace_cmd_mutex);
3633
3634 return ret;
3635}
3636
38de93ab
TZ
3637/*
3638 * Currently we only unregister ftrace commands from __init, so mark
3639 * this __init too.
3640 */
3641__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
3642{
3643 struct ftrace_func_command *p, *n;
3644 int ret = -ENODEV;
3645
3646 mutex_lock(&ftrace_cmd_mutex);
3647 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3648 if (strcmp(cmd->name, p->name) == 0) {
3649 ret = 0;
3650 list_del_init(&p->list);
3651 goto out_unlock;
3652 }
3653 }
3654 out_unlock:
3655 mutex_unlock(&ftrace_cmd_mutex);
3656
3657 return ret;
3658}
3659
33dc9b12
SR
3660static int ftrace_process_regex(struct ftrace_hash *hash,
3661 char *buff, int len, int enable)
64e7c440 3662{
f6180773 3663 char *func, *command, *next = buff;
6a24a244 3664 struct ftrace_func_command *p;
0aff1c0c 3665 int ret = -EINVAL;
64e7c440
SR
3666
3667 func = strsep(&next, ":");
3668
3669 if (!next) {
1cf41dd7 3670 ret = ftrace_match_records(hash, func, len);
b448c4e3
SR
3671 if (!ret)
3672 ret = -EINVAL;
3673 if (ret < 0)
3674 return ret;
3675 return 0;
64e7c440
SR
3676 }
3677
f6180773 3678 /* command found */
64e7c440
SR
3679
3680 command = strsep(&next, ":");
3681
f6180773
SR
3682 mutex_lock(&ftrace_cmd_mutex);
3683 list_for_each_entry(p, &ftrace_commands, list) {
3684 if (strcmp(p->name, command) == 0) {
43dd61c9 3685 ret = p->func(hash, func, command, next, enable);
f6180773
SR
3686 goto out_unlock;
3687 }
64e7c440 3688 }
f6180773
SR
3689 out_unlock:
3690 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 3691
f6180773 3692 return ret;
64e7c440
SR
3693}
3694
e309b41d 3695static ssize_t
41c52c0d
SR
3696ftrace_regex_write(struct file *file, const char __user *ubuf,
3697 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
3698{
3699 struct ftrace_iterator *iter;
689fd8b6 3700 struct trace_parser *parser;
3701 ssize_t ret, read;
5072c59f 3702
4ba7978e 3703 if (!cnt)
5072c59f
SR
3704 return 0;
3705
5072c59f
SR
3706 if (file->f_mode & FMODE_READ) {
3707 struct seq_file *m = file->private_data;
3708 iter = m->private;
3709 } else
3710 iter = file->private_data;
3711
f04f24fb 3712 if (unlikely(ftrace_disabled))
3f2367ba
MH
3713 return -ENODEV;
3714
3715 /* iter->hash is a local copy, so we don't need regex_lock */
f04f24fb 3716
689fd8b6 3717 parser = &iter->parser;
3718 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 3719
4ba7978e 3720 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 3721 !trace_parser_cont(parser)) {
33dc9b12 3722 ret = ftrace_process_regex(iter->hash, parser->buffer,
689fd8b6 3723 parser->idx, enable);
313254a9 3724 trace_parser_clear(parser);
7c088b51 3725 if (ret < 0)
3f2367ba 3726 goto out;
eda1e328 3727 }
5072c59f 3728
5072c59f 3729 ret = read;
3f2367ba 3730 out:
5072c59f
SR
3731 return ret;
3732}
3733
fc13cb0c 3734ssize_t
41c52c0d
SR
3735ftrace_filter_write(struct file *file, const char __user *ubuf,
3736 size_t cnt, loff_t *ppos)
3737{
3738 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3739}
3740
fc13cb0c 3741ssize_t
41c52c0d
SR
3742ftrace_notrace_write(struct file *file, const char __user *ubuf,
3743 size_t cnt, loff_t *ppos)
3744{
3745 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3746}
3747
33dc9b12 3748static int
647664ea
MH
3749ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3750{
3751 struct ftrace_func_entry *entry;
3752
3753 if (!ftrace_location(ip))
3754 return -EINVAL;
3755
3756 if (remove) {
3757 entry = ftrace_lookup_ip(hash, ip);
3758 if (!entry)
3759 return -ENOENT;
3760 free_hash_entry(hash, entry);
3761 return 0;
3762 }
3763
3764 return add_hash_entry(hash, ip);
3765}
3766
1c80c432
SRRH
3767static void ftrace_ops_update_code(struct ftrace_ops *ops)
3768{
3769 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
e1effa01 3770 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS);
1c80c432
SRRH
3771}
3772
647664ea
MH
3773static int
3774ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3775 unsigned long ip, int remove, int reset, int enable)
41c52c0d 3776{
33dc9b12 3777 struct ftrace_hash **orig_hash;
3296fc4e 3778 struct ftrace_hash *old_hash;
f45948e8 3779 struct ftrace_hash *hash;
33dc9b12 3780 int ret;
f45948e8 3781
41c52c0d 3782 if (unlikely(ftrace_disabled))
33dc9b12 3783 return -ENODEV;
41c52c0d 3784
33b7f99c 3785 mutex_lock(&ops->func_hash->regex_lock);
3f2367ba 3786
f45948e8 3787 if (enable)
33b7f99c 3788 orig_hash = &ops->func_hash->filter_hash;
f45948e8 3789 else
33b7f99c 3790 orig_hash = &ops->func_hash->notrace_hash;
33dc9b12 3791
b972cc58
WN
3792 if (reset)
3793 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3794 else
3795 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3796
3f2367ba
MH
3797 if (!hash) {
3798 ret = -ENOMEM;
3799 goto out_regex_unlock;
3800 }
f45948e8 3801
ac483c44
JO
3802 if (buf && !ftrace_match_records(hash, buf, len)) {
3803 ret = -EINVAL;
3804 goto out_regex_unlock;
3805 }
647664ea
MH
3806 if (ip) {
3807 ret = ftrace_match_addr(hash, ip, remove);
3808 if (ret < 0)
3809 goto out_regex_unlock;
3810 }
33dc9b12
SR
3811
3812 mutex_lock(&ftrace_lock);
3296fc4e 3813 old_hash = *orig_hash;
41fb61c2 3814 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3296fc4e 3815 if (!ret) {
1c80c432 3816 ftrace_ops_update_code(ops);
3296fc4e
SRRH
3817 free_ftrace_hash_rcu(old_hash);
3818 }
33dc9b12
SR
3819 mutex_unlock(&ftrace_lock);
3820
ac483c44 3821 out_regex_unlock:
33b7f99c 3822 mutex_unlock(&ops->func_hash->regex_lock);
33dc9b12
SR
3823
3824 free_ftrace_hash(hash);
3825 return ret;
41c52c0d
SR
3826}
3827
647664ea
MH
3828static int
3829ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3830 int reset, int enable)
3831{
3832 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3833}
3834
3835/**
3836 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3837 * @ops - the ops to set the filter with
3838 * @ip - the address to add to or remove from the filter.
3839 * @remove - non zero to remove the ip from the filter
3840 * @reset - non zero to reset all filters before applying this filter.
3841 *
3842 * Filters denote which functions should be enabled when tracing is enabled
3843 * If @ip is NULL, it failes to update filter.
3844 */
3845int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3846 int remove, int reset)
3847{
f04f24fb 3848 ftrace_ops_init(ops);
647664ea
MH
3849 return ftrace_set_addr(ops, ip, remove, reset, 1);
3850}
3851EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3852
3853static int
3854ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3855 int reset, int enable)
3856{
3857 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3858}
3859
77a2b37d
SR
3860/**
3861 * ftrace_set_filter - set a function to filter on in ftrace
936e074b
SR
3862 * @ops - the ops to set the filter with
3863 * @buf - the string that holds the function filter text.
3864 * @len - the length of the string.
3865 * @reset - non zero to reset all filters before applying this filter.
3866 *
3867 * Filters denote which functions should be enabled when tracing is enabled.
3868 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3869 */
ac483c44 3870int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
3871 int len, int reset)
3872{
f04f24fb 3873 ftrace_ops_init(ops);
ac483c44 3874 return ftrace_set_regex(ops, buf, len, reset, 1);
936e074b
SR
3875}
3876EXPORT_SYMBOL_GPL(ftrace_set_filter);
3877
3878/**
3879 * ftrace_set_notrace - set a function to not trace in ftrace
3880 * @ops - the ops to set the notrace filter with
3881 * @buf - the string that holds the function notrace text.
3882 * @len - the length of the string.
3883 * @reset - non zero to reset all filters before applying this filter.
3884 *
3885 * Notrace Filters denote which functions should not be enabled when tracing
3886 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3887 * for tracing.
3888 */
ac483c44 3889int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
3890 int len, int reset)
3891{
f04f24fb 3892 ftrace_ops_init(ops);
ac483c44 3893 return ftrace_set_regex(ops, buf, len, reset, 0);
936e074b
SR
3894}
3895EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3896/**
8d1b065d 3897 * ftrace_set_global_filter - set a function to filter on with global tracers
77a2b37d
SR
3898 * @buf - the string that holds the function filter text.
3899 * @len - the length of the string.
3900 * @reset - non zero to reset all filters before applying this filter.
3901 *
3902 * Filters denote which functions should be enabled when tracing is enabled.
3903 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3904 */
936e074b 3905void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
77a2b37d 3906{
f45948e8 3907 ftrace_set_regex(&global_ops, buf, len, reset, 1);
41c52c0d 3908}
936e074b 3909EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4eebcc81 3910
41c52c0d 3911/**
8d1b065d 3912 * ftrace_set_global_notrace - set a function to not trace with global tracers
41c52c0d
SR
3913 * @buf - the string that holds the function notrace text.
3914 * @len - the length of the string.
3915 * @reset - non zero to reset all filters before applying this filter.
3916 *
3917 * Notrace Filters denote which functions should not be enabled when tracing
3918 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3919 * for tracing.
3920 */
936e074b 3921void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
41c52c0d 3922{
f45948e8 3923 ftrace_set_regex(&global_ops, buf, len, reset, 0);
77a2b37d 3924}
936e074b 3925EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
77a2b37d 3926
2af15d6a
SR
3927/*
3928 * command line interface to allow users to set filters on boot up.
3929 */
3930#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3931static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3932static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3933
f1ed7c74
SRRH
3934/* Used by function selftest to not test if filter is set */
3935bool ftrace_filter_param __initdata;
3936
2af15d6a
SR
3937static int __init set_ftrace_notrace(char *str)
3938{
f1ed7c74 3939 ftrace_filter_param = true;
75761cc1 3940 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
3941 return 1;
3942}
3943__setup("ftrace_notrace=", set_ftrace_notrace);
3944
3945static int __init set_ftrace_filter(char *str)
3946{
f1ed7c74 3947 ftrace_filter_param = true;
75761cc1 3948 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
3949 return 1;
3950}
3951__setup("ftrace_filter=", set_ftrace_filter);
3952
369bc18f 3953#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 3954static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
0d7d9a16 3955static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
faf982a6 3956static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
801c29fd 3957
369bc18f
SA
3958static int __init set_graph_function(char *str)
3959{
06f43d66 3960 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
3961 return 1;
3962}
3963__setup("ftrace_graph_filter=", set_graph_function);
3964
0d7d9a16
NK
3965static int __init set_graph_notrace_function(char *str)
3966{
3967 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
3968 return 1;
3969}
3970__setup("ftrace_graph_notrace=", set_graph_notrace_function);
3971
3972static void __init set_ftrace_early_graph(char *buf, int enable)
369bc18f
SA
3973{
3974 int ret;
3975 char *func;
0d7d9a16
NK
3976 unsigned long *table = ftrace_graph_funcs;
3977 int *count = &ftrace_graph_count;
3978
3979 if (!enable) {
3980 table = ftrace_graph_notrace_funcs;
3981 count = &ftrace_graph_notrace_count;
3982 }
369bc18f
SA
3983
3984 while (buf) {
3985 func = strsep(&buf, ",");
3986 /* we allow only one expression at a time */
0d7d9a16 3987 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
369bc18f
SA
3988 if (ret)
3989 printk(KERN_DEBUG "ftrace: function %s not "
3990 "traceable\n", func);
3991 }
3992}
3993#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3994
2a85a37f
SR
3995void __init
3996ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2af15d6a
SR
3997{
3998 char *func;
3999
f04f24fb
MH
4000 ftrace_ops_init(ops);
4001
2af15d6a
SR
4002 while (buf) {
4003 func = strsep(&buf, ",");
f45948e8 4004 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2af15d6a
SR
4005 }
4006}
4007
4008static void __init set_ftrace_early_filters(void)
4009{
4010 if (ftrace_filter_buf[0])
2a85a37f 4011 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
2af15d6a 4012 if (ftrace_notrace_buf[0])
2a85a37f 4013 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
369bc18f
SA
4014#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4015 if (ftrace_graph_buf[0])
0d7d9a16
NK
4016 set_ftrace_early_graph(ftrace_graph_buf, 1);
4017 if (ftrace_graph_notrace_buf[0])
4018 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
369bc18f 4019#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
4020}
4021
fc13cb0c 4022int ftrace_regex_release(struct inode *inode, struct file *file)
5072c59f
SR
4023{
4024 struct seq_file *m = (struct seq_file *)file->private_data;
4025 struct ftrace_iterator *iter;
33dc9b12 4026 struct ftrace_hash **orig_hash;
3296fc4e 4027 struct ftrace_hash *old_hash;
689fd8b6 4028 struct trace_parser *parser;
ed926f9b 4029 int filter_hash;
33dc9b12 4030 int ret;
5072c59f 4031
5072c59f
SR
4032 if (file->f_mode & FMODE_READ) {
4033 iter = m->private;
5072c59f
SR
4034 seq_release(inode, file);
4035 } else
4036 iter = file->private_data;
4037
689fd8b6 4038 parser = &iter->parser;
4039 if (trace_parser_loaded(parser)) {
4040 parser->buffer[parser->idx] = 0;
1cf41dd7 4041 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5072c59f
SR
4042 }
4043
689fd8b6 4044 trace_parser_put(parser);
689fd8b6 4045
33b7f99c 4046 mutex_lock(&iter->ops->func_hash->regex_lock);
3f2367ba 4047
058e297d 4048 if (file->f_mode & FMODE_WRITE) {
ed926f9b
SR
4049 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4050
4051 if (filter_hash)
33b7f99c 4052 orig_hash = &iter->ops->func_hash->filter_hash;
ed926f9b 4053 else
33b7f99c 4054 orig_hash = &iter->ops->func_hash->notrace_hash;
33dc9b12 4055
058e297d 4056 mutex_lock(&ftrace_lock);
3296fc4e 4057 old_hash = *orig_hash;
41fb61c2
SR
4058 ret = ftrace_hash_move(iter->ops, filter_hash,
4059 orig_hash, iter->hash);
3296fc4e 4060 if (!ret) {
1c80c432 4061 ftrace_ops_update_code(iter->ops);
3296fc4e
SRRH
4062 free_ftrace_hash_rcu(old_hash);
4063 }
058e297d
SR
4064 mutex_unlock(&ftrace_lock);
4065 }
3f2367ba 4066
33b7f99c 4067 mutex_unlock(&iter->ops->func_hash->regex_lock);
33dc9b12
SR
4068 free_ftrace_hash(iter->hash);
4069 kfree(iter);
058e297d 4070
5072c59f
SR
4071 return 0;
4072}
4073
5e2336a0 4074static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
4075 .open = ftrace_avail_open,
4076 .read = seq_read,
4077 .llseek = seq_lseek,
3be04b47 4078 .release = seq_release_private,
5072c59f
SR
4079};
4080
647bcd03
SR
4081static const struct file_operations ftrace_enabled_fops = {
4082 .open = ftrace_enabled_open,
4083 .read = seq_read,
4084 .llseek = seq_lseek,
4085 .release = seq_release_private,
4086};
4087
5e2336a0 4088static const struct file_operations ftrace_filter_fops = {
5072c59f 4089 .open = ftrace_filter_open,
850a80cf 4090 .read = seq_read,
5072c59f 4091 .write = ftrace_filter_write,
098c879e 4092 .llseek = tracing_lseek,
1cf41dd7 4093 .release = ftrace_regex_release,
5072c59f
SR
4094};
4095
5e2336a0 4096static const struct file_operations ftrace_notrace_fops = {
41c52c0d 4097 .open = ftrace_notrace_open,
850a80cf 4098 .read = seq_read,
41c52c0d 4099 .write = ftrace_notrace_write,
098c879e 4100 .llseek = tracing_lseek,
1cf41dd7 4101 .release = ftrace_regex_release,
41c52c0d
SR
4102};
4103
ea4e2bc4
SR
4104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4105
4106static DEFINE_MUTEX(graph_lock);
4107
4108int ftrace_graph_count;
29ad23b0 4109int ftrace_graph_notrace_count;
ea4e2bc4 4110unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
29ad23b0 4111unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
ea4e2bc4 4112
faf982a6
NK
4113struct ftrace_graph_data {
4114 unsigned long *table;
4115 size_t size;
4116 int *count;
4117 const struct seq_operations *seq_ops;
4118};
4119
ea4e2bc4 4120static void *
85951842 4121__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 4122{
faf982a6
NK
4123 struct ftrace_graph_data *fgd = m->private;
4124
4125 if (*pos >= *fgd->count)
ea4e2bc4 4126 return NULL;
faf982a6 4127 return &fgd->table[*pos];
85951842 4128}
ea4e2bc4 4129
85951842
LZ
4130static void *
4131g_next(struct seq_file *m, void *v, loff_t *pos)
4132{
4133 (*pos)++;
4134 return __g_next(m, pos);
ea4e2bc4
SR
4135}
4136
4137static void *g_start(struct seq_file *m, loff_t *pos)
4138{
faf982a6
NK
4139 struct ftrace_graph_data *fgd = m->private;
4140
ea4e2bc4
SR
4141 mutex_lock(&graph_lock);
4142
f9349a8f 4143 /* Nothing, tell g_show to print all functions are enabled */
faf982a6 4144 if (!*fgd->count && !*pos)
f9349a8f
FW
4145 return (void *)1;
4146
85951842 4147 return __g_next(m, pos);
ea4e2bc4
SR
4148}
4149
4150static void g_stop(struct seq_file *m, void *p)
4151{
4152 mutex_unlock(&graph_lock);
4153}
4154
4155static int g_show(struct seq_file *m, void *v)
4156{
4157 unsigned long *ptr = v;
ea4e2bc4
SR
4158
4159 if (!ptr)
4160 return 0;
4161
f9349a8f 4162 if (ptr == (unsigned long *)1) {
280d1429
NK
4163 struct ftrace_graph_data *fgd = m->private;
4164
4165 if (fgd->table == ftrace_graph_funcs)
4166 seq_printf(m, "#### all functions enabled ####\n");
4167 else
4168 seq_printf(m, "#### no functions disabled ####\n");
f9349a8f
FW
4169 return 0;
4170 }
4171
b375a11a 4172 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
4173
4174 return 0;
4175}
4176
88e9d34c 4177static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
4178 .start = g_start,
4179 .next = g_next,
4180 .stop = g_stop,
4181 .show = g_show,
4182};
4183
4184static int
faf982a6
NK
4185__ftrace_graph_open(struct inode *inode, struct file *file,
4186 struct ftrace_graph_data *fgd)
ea4e2bc4
SR
4187{
4188 int ret = 0;
4189
ea4e2bc4
SR
4190 mutex_lock(&graph_lock);
4191 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 4192 (file->f_flags & O_TRUNC)) {
faf982a6
NK
4193 *fgd->count = 0;
4194 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
ea4e2bc4 4195 }
a4ec5e0c 4196 mutex_unlock(&graph_lock);
ea4e2bc4 4197
faf982a6
NK
4198 if (file->f_mode & FMODE_READ) {
4199 ret = seq_open(file, fgd->seq_ops);
4200 if (!ret) {
4201 struct seq_file *m = file->private_data;
4202 m->private = fgd;
4203 }
4204 } else
4205 file->private_data = fgd;
ea4e2bc4
SR
4206
4207 return ret;
4208}
4209
faf982a6
NK
4210static int
4211ftrace_graph_open(struct inode *inode, struct file *file)
4212{
4213 struct ftrace_graph_data *fgd;
4214
4215 if (unlikely(ftrace_disabled))
4216 return -ENODEV;
4217
4218 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4219 if (fgd == NULL)
4220 return -ENOMEM;
4221
4222 fgd->table = ftrace_graph_funcs;
4223 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4224 fgd->count = &ftrace_graph_count;
4225 fgd->seq_ops = &ftrace_graph_seq_ops;
4226
4227 return __ftrace_graph_open(inode, file, fgd);
4228}
4229
29ad23b0
NK
4230static int
4231ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4232{
4233 struct ftrace_graph_data *fgd;
4234
4235 if (unlikely(ftrace_disabled))
4236 return -ENODEV;
4237
4238 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4239 if (fgd == NULL)
4240 return -ENOMEM;
4241
4242 fgd->table = ftrace_graph_notrace_funcs;
4243 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4244 fgd->count = &ftrace_graph_notrace_count;
4245 fgd->seq_ops = &ftrace_graph_seq_ops;
4246
4247 return __ftrace_graph_open(inode, file, fgd);
4248}
4249
87827111
LZ
4250static int
4251ftrace_graph_release(struct inode *inode, struct file *file)
4252{
faf982a6
NK
4253 if (file->f_mode & FMODE_READ) {
4254 struct seq_file *m = file->private_data;
4255
4256 kfree(m->private);
87827111 4257 seq_release(inode, file);
faf982a6
NK
4258 } else {
4259 kfree(file->private_data);
4260 }
4261
87827111
LZ
4262 return 0;
4263}
4264
ea4e2bc4 4265static int
faf982a6 4266ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
ea4e2bc4 4267{
ea4e2bc4
SR
4268 struct dyn_ftrace *rec;
4269 struct ftrace_page *pg;
f9349a8f 4270 int search_len;
c7c6b1fe 4271 int fail = 1;
f9349a8f
FW
4272 int type, not;
4273 char *search;
4274 bool exists;
4275 int i;
ea4e2bc4 4276
f9349a8f 4277 /* decode regex */
3f6fe06d 4278 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
faf982a6 4279 if (!not && *idx >= size)
c7c6b1fe 4280 return -EBUSY;
f9349a8f
FW
4281
4282 search_len = strlen(search);
4283
52baf119 4284 mutex_lock(&ftrace_lock);
45a4a237
SR
4285
4286 if (unlikely(ftrace_disabled)) {
4287 mutex_unlock(&ftrace_lock);
4288 return -ENODEV;
4289 }
4290
265c831c
SR
4291 do_for_each_ftrace_rec(pg, rec) {
4292
b9df92d2 4293 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
c7c6b1fe 4294 /* if it is in the array */
f9349a8f 4295 exists = false;
c7c6b1fe 4296 for (i = 0; i < *idx; i++) {
f9349a8f
FW
4297 if (array[i] == rec->ip) {
4298 exists = true;
265c831c
SR
4299 break;
4300 }
c7c6b1fe
LZ
4301 }
4302
4303 if (!not) {
4304 fail = 0;
4305 if (!exists) {
4306 array[(*idx)++] = rec->ip;
faf982a6 4307 if (*idx >= size)
c7c6b1fe
LZ
4308 goto out;
4309 }
4310 } else {
4311 if (exists) {
4312 array[i] = array[--(*idx)];
4313 array[*idx] = 0;
4314 fail = 0;
4315 }
4316 }
ea4e2bc4 4317 }
265c831c 4318 } while_for_each_ftrace_rec();
c7c6b1fe 4319out:
52baf119 4320 mutex_unlock(&ftrace_lock);
ea4e2bc4 4321
c7c6b1fe
LZ
4322 if (fail)
4323 return -EINVAL;
4324
c7c6b1fe 4325 return 0;
ea4e2bc4
SR
4326}
4327
4328static ssize_t
4329ftrace_graph_write(struct file *file, const char __user *ubuf,
4330 size_t cnt, loff_t *ppos)
4331{
689fd8b6 4332 struct trace_parser parser;
6a10108b 4333 ssize_t read, ret = 0;
faf982a6 4334 struct ftrace_graph_data *fgd = file->private_data;
ea4e2bc4 4335
c7c6b1fe 4336 if (!cnt)
ea4e2bc4
SR
4337 return 0;
4338
6a10108b
NK
4339 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4340 return -ENOMEM;
ea4e2bc4 4341
689fd8b6 4342 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 4343
4ba7978e 4344 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 4345 parser.buffer[parser.idx] = 0;
4346
6a10108b
NK
4347 mutex_lock(&graph_lock);
4348
689fd8b6 4349 /* we allow only one expression at a time */
faf982a6
NK
4350 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4351 parser.buffer);
6a10108b
NK
4352
4353 mutex_unlock(&graph_lock);
ea4e2bc4 4354 }
ea4e2bc4 4355
6a10108b
NK
4356 if (!ret)
4357 ret = read;
1eb90f13 4358
689fd8b6 4359 trace_parser_put(&parser);
ea4e2bc4
SR
4360
4361 return ret;
4362}
4363
4364static const struct file_operations ftrace_graph_fops = {
87827111
LZ
4365 .open = ftrace_graph_open,
4366 .read = seq_read,
4367 .write = ftrace_graph_write,
098c879e 4368 .llseek = tracing_lseek,
87827111 4369 .release = ftrace_graph_release,
ea4e2bc4 4370};
29ad23b0
NK
4371
4372static const struct file_operations ftrace_graph_notrace_fops = {
4373 .open = ftrace_graph_notrace_open,
4374 .read = seq_read,
4375 .write = ftrace_graph_write,
098c879e 4376 .llseek = tracing_lseek,
29ad23b0
NK
4377 .release = ftrace_graph_release,
4378};
ea4e2bc4
SR
4379#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4380
591dffda
SRRH
4381void ftrace_create_filter_files(struct ftrace_ops *ops,
4382 struct dentry *parent)
4383{
4384
4385 trace_create_file("set_ftrace_filter", 0644, parent,
4386 ops, &ftrace_filter_fops);
4387
4388 trace_create_file("set_ftrace_notrace", 0644, parent,
4389 ops, &ftrace_notrace_fops);
4390}
4391
4392/*
4393 * The name "destroy_filter_files" is really a misnomer. Although
4394 * in the future, it may actualy delete the files, but this is
4395 * really intended to make sure the ops passed in are disabled
4396 * and that when this function returns, the caller is free to
4397 * free the ops.
4398 *
4399 * The "destroy" name is only to match the "create" name that this
4400 * should be paired with.
4401 */
4402void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4403{
4404 mutex_lock(&ftrace_lock);
4405 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4406 ftrace_shutdown(ops, 0);
4407 ops->flags |= FTRACE_OPS_FL_DELETED;
4408 mutex_unlock(&ftrace_lock);
4409}
4410
df4fc315 4411static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 4412{
5072c59f 4413
5452af66
FW
4414 trace_create_file("available_filter_functions", 0444,
4415 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 4416
647bcd03
SR
4417 trace_create_file("enabled_functions", 0444,
4418 d_tracer, NULL, &ftrace_enabled_fops);
4419
591dffda 4420 ftrace_create_filter_files(&global_ops, d_tracer);
ad90c0e3 4421
ea4e2bc4 4422#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 4423 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
4424 NULL,
4425 &ftrace_graph_fops);
29ad23b0
NK
4426 trace_create_file("set_graph_notrace", 0444, d_tracer,
4427 NULL,
4428 &ftrace_graph_notrace_fops);
ea4e2bc4
SR
4429#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4430
5072c59f
SR
4431 return 0;
4432}
4433
9fd49328 4434static int ftrace_cmp_ips(const void *a, const void *b)
68950619 4435{
9fd49328
SR
4436 const unsigned long *ipa = a;
4437 const unsigned long *ipb = b;
68950619 4438
9fd49328
SR
4439 if (*ipa > *ipb)
4440 return 1;
4441 if (*ipa < *ipb)
4442 return -1;
4443 return 0;
4444}
4445
4446static void ftrace_swap_ips(void *a, void *b, int size)
4447{
4448 unsigned long *ipa = a;
4449 unsigned long *ipb = b;
4450 unsigned long t;
4451
4452 t = *ipa;
4453 *ipa = *ipb;
4454 *ipb = t;
68950619
SR
4455}
4456
5cb084bb 4457static int ftrace_process_locs(struct module *mod,
31e88909 4458 unsigned long *start,
68bf21aa
SR
4459 unsigned long *end)
4460{
706c81f8 4461 struct ftrace_page *start_pg;
a7900875 4462 struct ftrace_page *pg;
706c81f8 4463 struct dyn_ftrace *rec;
a7900875 4464 unsigned long count;
68bf21aa
SR
4465 unsigned long *p;
4466 unsigned long addr;
4376cac6 4467 unsigned long flags = 0; /* Shut up gcc */
a7900875
SR
4468 int ret = -ENOMEM;
4469
4470 count = end - start;
4471
4472 if (!count)
4473 return 0;
4474
9fd49328
SR
4475 sort(start, count, sizeof(*start),
4476 ftrace_cmp_ips, ftrace_swap_ips);
4477
706c81f8
SR
4478 start_pg = ftrace_allocate_pages(count);
4479 if (!start_pg)
a7900875 4480 return -ENOMEM;
68bf21aa 4481
e6ea44e9 4482 mutex_lock(&ftrace_lock);
a7900875 4483
32082309
SR
4484 /*
4485 * Core and each module needs their own pages, as
4486 * modules will free them when they are removed.
4487 * Force a new page to be allocated for modules.
4488 */
a7900875
SR
4489 if (!mod) {
4490 WARN_ON(ftrace_pages || ftrace_pages_start);
4491 /* First initialization */
706c81f8 4492 ftrace_pages = ftrace_pages_start = start_pg;
a7900875 4493 } else {
32082309 4494 if (!ftrace_pages)
a7900875 4495 goto out;
32082309 4496
a7900875
SR
4497 if (WARN_ON(ftrace_pages->next)) {
4498 /* Hmm, we have free pages? */
4499 while (ftrace_pages->next)
4500 ftrace_pages = ftrace_pages->next;
32082309 4501 }
a7900875 4502
706c81f8 4503 ftrace_pages->next = start_pg;
32082309
SR
4504 }
4505
68bf21aa 4506 p = start;
706c81f8 4507 pg = start_pg;
68bf21aa
SR
4508 while (p < end) {
4509 addr = ftrace_call_adjust(*p++);
20e5227e
SR
4510 /*
4511 * Some architecture linkers will pad between
4512 * the different mcount_loc sections of different
4513 * object files to satisfy alignments.
4514 * Skip any NULL pointers.
4515 */
4516 if (!addr)
4517 continue;
706c81f8
SR
4518
4519 if (pg->index == pg->size) {
4520 /* We should have allocated enough */
4521 if (WARN_ON(!pg->next))
4522 break;
4523 pg = pg->next;
4524 }
4525
4526 rec = &pg->records[pg->index++];
4527 rec->ip = addr;
68bf21aa
SR
4528 }
4529
706c81f8
SR
4530 /* We should have used all pages */
4531 WARN_ON(pg->next);
4532
4533 /* Assign the last page to ftrace_pages */
4534 ftrace_pages = pg;
4535
a4f18ed1 4536 /*
4376cac6
SR
4537 * We only need to disable interrupts on start up
4538 * because we are modifying code that an interrupt
4539 * may execute, and the modification is not atomic.
4540 * But for modules, nothing runs the code we modify
4541 * until we are finished with it, and there's no
4542 * reason to cause large interrupt latencies while we do it.
a4f18ed1 4543 */
4376cac6
SR
4544 if (!mod)
4545 local_irq_save(flags);
1dc43cf0 4546 ftrace_update_code(mod, start_pg);
4376cac6
SR
4547 if (!mod)
4548 local_irq_restore(flags);
a7900875
SR
4549 ret = 0;
4550 out:
e6ea44e9 4551 mutex_unlock(&ftrace_lock);
68bf21aa 4552
a7900875 4553 return ret;
68bf21aa
SR
4554}
4555
93eb677d 4556#ifdef CONFIG_MODULES
32082309
SR
4557
4558#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4559
e7247a15 4560void ftrace_release_mod(struct module *mod)
93eb677d
SR
4561{
4562 struct dyn_ftrace *rec;
32082309 4563 struct ftrace_page **last_pg;
93eb677d 4564 struct ftrace_page *pg;
a7900875 4565 int order;
93eb677d 4566
45a4a237
SR
4567 mutex_lock(&ftrace_lock);
4568
e7247a15 4569 if (ftrace_disabled)
45a4a237 4570 goto out_unlock;
93eb677d 4571
32082309
SR
4572 /*
4573 * Each module has its own ftrace_pages, remove
4574 * them from the list.
4575 */
4576 last_pg = &ftrace_pages_start;
4577 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4578 rec = &pg->records[0];
e7247a15 4579 if (within_module_core(rec->ip, mod)) {
93eb677d 4580 /*
32082309
SR
4581 * As core pages are first, the first
4582 * page should never be a module page.
93eb677d 4583 */
32082309
SR
4584 if (WARN_ON(pg == ftrace_pages_start))
4585 goto out_unlock;
4586
4587 /* Check if we are deleting the last page */
4588 if (pg == ftrace_pages)
4589 ftrace_pages = next_to_ftrace_page(last_pg);
4590
4591 *last_pg = pg->next;
a7900875
SR
4592 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4593 free_pages((unsigned long)pg->records, order);
4594 kfree(pg);
32082309
SR
4595 } else
4596 last_pg = &pg->next;
4597 }
45a4a237 4598 out_unlock:
93eb677d
SR
4599 mutex_unlock(&ftrace_lock);
4600}
4601
4602static void ftrace_init_module(struct module *mod,
4603 unsigned long *start, unsigned long *end)
90d595fe 4604{
00fd61ae 4605 if (ftrace_disabled || start == end)
fed1939c 4606 return;
5cb084bb 4607 ftrace_process_locs(mod, start, end);
90d595fe
SR
4608}
4609
a949ae56 4610void ftrace_module_init(struct module *mod)
93eb677d 4611{
a949ae56
SRRH
4612 ftrace_init_module(mod, mod->ftrace_callsites,
4613 mod->ftrace_callsites +
4614 mod->num_ftrace_callsites);
8c189ea6
SRRH
4615}
4616
4617static int ftrace_module_notify_exit(struct notifier_block *self,
4618 unsigned long val, void *data)
4619{
4620 struct module *mod = data;
4621
4622 if (val == MODULE_STATE_GOING)
e7247a15 4623 ftrace_release_mod(mod);
93eb677d
SR
4624
4625 return 0;
4626}
4627#else
8c189ea6
SRRH
4628static int ftrace_module_notify_exit(struct notifier_block *self,
4629 unsigned long val, void *data)
93eb677d
SR
4630{
4631 return 0;
4632}
4633#endif /* CONFIG_MODULES */
4634
8c189ea6
SRRH
4635struct notifier_block ftrace_module_exit_nb = {
4636 .notifier_call = ftrace_module_notify_exit,
4637 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4638};
4639
68bf21aa
SR
4640void __init ftrace_init(void)
4641{
1dc43cf0
JS
4642 extern unsigned long __start_mcount_loc[];
4643 extern unsigned long __stop_mcount_loc[];
3a36cb11 4644 unsigned long count, flags;
68bf21aa
SR
4645 int ret;
4646
68bf21aa 4647 local_irq_save(flags);
3a36cb11 4648 ret = ftrace_dyn_arch_init();
68bf21aa 4649 local_irq_restore(flags);
af64a7cb 4650 if (ret)
68bf21aa
SR
4651 goto failed;
4652
4653 count = __stop_mcount_loc - __start_mcount_loc;
c867ccd8
JS
4654 if (!count) {
4655 pr_info("ftrace: No functions to be traced?\n");
68bf21aa 4656 goto failed;
c867ccd8
JS
4657 }
4658
4659 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4660 count, count / ENTRIES_PER_PAGE + 1);
68bf21aa
SR
4661
4662 last_ftrace_enabled = ftrace_enabled = 1;
4663
5cb084bb 4664 ret = ftrace_process_locs(NULL,
31e88909 4665 __start_mcount_loc,
68bf21aa
SR
4666 __stop_mcount_loc);
4667
8c189ea6 4668 ret = register_module_notifier(&ftrace_module_exit_nb);
24ed0c4b 4669 if (ret)
8c189ea6 4670 pr_warning("Failed to register trace ftrace module exit notifier\n");
93eb677d 4671
2af15d6a
SR
4672 set_ftrace_early_filters();
4673
68bf21aa
SR
4674 return;
4675 failed:
4676 ftrace_disabled = 1;
4677}
68bf21aa 4678
3d083395 4679#else
0b6e4d56 4680
2b499381 4681static struct ftrace_ops global_ops = {
bd69c30b 4682 .func = ftrace_stub,
f04f24fb 4683 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
bd69c30b
SR
4684};
4685
0b6e4d56
FW
4686static int __init ftrace_nodyn_init(void)
4687{
4688 ftrace_enabled = 1;
4689 return 0;
4690}
6f415672 4691core_initcall(ftrace_nodyn_init);
0b6e4d56 4692
df4fc315
SR
4693static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4694static inline void ftrace_startup_enable(int command) { }
e1effa01 4695static inline void ftrace_startup_all(int command) { }
5a45cfe1 4696/* Keep as macros so we do not need to define the commands */
8a56d776
SRRH
4697# define ftrace_startup(ops, command) \
4698 ({ \
4699 int ___ret = __register_ftrace_function(ops); \
4700 if (!___ret) \
4701 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4702 ___ret; \
3b6cfdb1 4703 })
1fcc1553
SRRH
4704# define ftrace_shutdown(ops, command) \
4705 ({ \
4706 int ___ret = __unregister_ftrace_function(ops); \
4707 if (!___ret) \
4708 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4709 ___ret; \
4710 })
8a56d776 4711
c7aafc54
IM
4712# define ftrace_startup_sysctl() do { } while (0)
4713# define ftrace_shutdown_sysctl() do { } while (0)
b848914c
SR
4714
4715static inline int
195a8afc 4716ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c
SR
4717{
4718 return 1;
4719}
4720
3d083395
SR
4721#endif /* CONFIG_DYNAMIC_FTRACE */
4722
4104d326
SRRH
4723__init void ftrace_init_global_array_ops(struct trace_array *tr)
4724{
4725 tr->ops = &global_ops;
4726 tr->ops->private = tr;
4727}
4728
4729void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4730{
4731 /* If we filter on pids, update to use the pid function */
4732 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4733 if (WARN_ON(tr->ops->func != ftrace_stub))
4734 printk("ftrace ops had %pS for function\n",
4735 tr->ops->func);
4736 /* Only the top level instance does pid tracing */
4737 if (!list_empty(&ftrace_pids)) {
4738 set_ftrace_pid_function(func);
4739 func = ftrace_pid_func;
4740 }
4741 }
4742 tr->ops->func = func;
4743 tr->ops->private = tr;
4744}
4745
4746void ftrace_reset_array_ops(struct trace_array *tr)
4747{
4748 tr->ops->func = ftrace_stub;
4749}
4750
e248491a 4751static void
2f5f6ad9 4752ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 4753 struct ftrace_ops *op, struct pt_regs *regs)
e248491a 4754{
e248491a
JO
4755 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4756 return;
4757
4758 /*
4759 * Some of the ops may be dynamically allocated,
4760 * they must be freed after a synchronize_sched().
4761 */
4762 preempt_disable_notrace();
4763 trace_recursion_set(TRACE_CONTROL_BIT);
b5aa3a47
SRRH
4764
4765 /*
4766 * Control funcs (perf) uses RCU. Only trace if
4767 * RCU is currently active.
4768 */
4769 if (!rcu_is_watching())
4770 goto out;
4771
0a016409 4772 do_for_each_ftrace_op(op, ftrace_control_list) {
395b97a3
SRRH
4773 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4774 !ftrace_function_local_disabled(op) &&
195a8afc 4775 ftrace_ops_test(op, ip, regs))
a1e2e31d 4776 op->func(ip, parent_ip, op, regs);
0a016409 4777 } while_for_each_ftrace_op(op);
b5aa3a47 4778 out:
e248491a
JO
4779 trace_recursion_clear(TRACE_CONTROL_BIT);
4780 preempt_enable_notrace();
4781}
4782
4783static struct ftrace_ops control_ops = {
f04f24fb
MH
4784 .func = ftrace_ops_control_func,
4785 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
33b7f99c 4786 INIT_OPS_HASH(control_ops)
e248491a
JO
4787};
4788
2f5f6ad9
SR
4789static inline void
4790__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 4791 struct ftrace_ops *ignored, struct pt_regs *regs)
b848914c 4792{
cdbe61bf 4793 struct ftrace_ops *op;
edc15caf 4794 int bit;
b848914c 4795
edc15caf
SR
4796 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4797 if (bit < 0)
4798 return;
b1cff0ad 4799
cdbe61bf
SR
4800 /*
4801 * Some of the ops may be dynamically allocated,
4802 * they must be freed after a synchronize_sched().
4803 */
4804 preempt_disable_notrace();
0a016409 4805 do_for_each_ftrace_op(op, ftrace_ops_list) {
4104d326 4806 if (ftrace_ops_test(op, ip, regs)) {
1d48d596
SRRH
4807 if (FTRACE_WARN_ON(!op->func)) {
4808 pr_warn("op=%p %pS\n", op, op);
4104d326
SRRH
4809 goto out;
4810 }
a1e2e31d 4811 op->func(ip, parent_ip, op, regs);
4104d326 4812 }
0a016409 4813 } while_for_each_ftrace_op(op);
4104d326 4814out:
cdbe61bf 4815 preempt_enable_notrace();
edc15caf 4816 trace_clear_recursion(bit);
b848914c
SR
4817}
4818
2f5f6ad9
SR
4819/*
4820 * Some archs only support passing ip and parent_ip. Even though
4821 * the list function ignores the op parameter, we do not want any
4822 * C side effects, where a function is called without the caller
4823 * sending a third parameter.
a1e2e31d
SR
4824 * Archs are to support both the regs and ftrace_ops at the same time.
4825 * If they support ftrace_ops, it is assumed they support regs.
4826 * If call backs want to use regs, they must either check for regs
06aeaaea
MH
4827 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4828 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
a1e2e31d
SR
4829 * An architecture can pass partial regs with ftrace_ops and still
4830 * set the ARCH_SUPPORT_FTARCE_OPS.
2f5f6ad9
SR
4831 */
4832#if ARCH_SUPPORTS_FTRACE_OPS
4833static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 4834 struct ftrace_ops *op, struct pt_regs *regs)
2f5f6ad9 4835{
a1e2e31d 4836 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
2f5f6ad9
SR
4837}
4838#else
4839static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4840{
a1e2e31d 4841 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2f5f6ad9
SR
4842}
4843#endif
4844
f1ff6348
SRRH
4845/*
4846 * If there's only one function registered but it does not support
4847 * recursion, this function will be called by the mcount trampoline.
4848 * This function will handle recursion protection.
4849 */
4850static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
4851 struct ftrace_ops *op, struct pt_regs *regs)
4852{
4853 int bit;
4854
4855 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4856 if (bit < 0)
4857 return;
4858
4859 op->func(ip, parent_ip, op, regs);
4860
4861 trace_clear_recursion(bit);
4862}
4863
87354059
SRRH
4864/**
4865 * ftrace_ops_get_func - get the function a trampoline should call
4866 * @ops: the ops to get the function for
4867 *
4868 * Normally the mcount trampoline will call the ops->func, but there
4869 * are times that it should not. For example, if the ops does not
4870 * have its own recursion protection, then it should call the
4871 * ftrace_ops_recurs_func() instead.
4872 *
4873 * Returns the function that the trampoline should call for @ops.
4874 */
4875ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
4876{
4877 /*
4878 * If this is a dynamic ops or we force list func,
4879 * then it needs to call the list anyway.
4880 */
4881 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
4882 return ftrace_ops_list_func;
4883
4884 /*
4885 * If the func handles its own recursion, call it directly.
4886 * Otherwise call the recursion protected function that
4887 * will call the ftrace ops function.
4888 */
4889 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
4890 return ftrace_ops_recurs_func;
4891
4892 return ops->func;
4893}
4894
e32d8956 4895static void clear_ftrace_swapper(void)
978f3a45
SR
4896{
4897 struct task_struct *p;
e32d8956 4898 int cpu;
978f3a45 4899
e32d8956
SR
4900 get_online_cpus();
4901 for_each_online_cpu(cpu) {
4902 p = idle_task(cpu);
978f3a45 4903 clear_tsk_trace_trace(p);
e32d8956
SR
4904 }
4905 put_online_cpus();
4906}
978f3a45 4907
e32d8956
SR
4908static void set_ftrace_swapper(void)
4909{
4910 struct task_struct *p;
4911 int cpu;
4912
4913 get_online_cpus();
4914 for_each_online_cpu(cpu) {
4915 p = idle_task(cpu);
4916 set_tsk_trace_trace(p);
4917 }
4918 put_online_cpus();
978f3a45
SR
4919}
4920
e32d8956
SR
4921static void clear_ftrace_pid(struct pid *pid)
4922{
4923 struct task_struct *p;
4924
229c4ef8 4925 rcu_read_lock();
e32d8956
SR
4926 do_each_pid_task(pid, PIDTYPE_PID, p) {
4927 clear_tsk_trace_trace(p);
4928 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
4929 rcu_read_unlock();
4930
e32d8956
SR
4931 put_pid(pid);
4932}
4933
4934static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
4935{
4936 struct task_struct *p;
4937
229c4ef8 4938 rcu_read_lock();
978f3a45
SR
4939 do_each_pid_task(pid, PIDTYPE_PID, p) {
4940 set_tsk_trace_trace(p);
4941 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 4942 rcu_read_unlock();
978f3a45
SR
4943}
4944
756d17ee 4945static void clear_ftrace_pid_task(struct pid *pid)
e32d8956 4946{
756d17ee 4947 if (pid == ftrace_swapper_pid)
e32d8956
SR
4948 clear_ftrace_swapper();
4949 else
756d17ee 4950 clear_ftrace_pid(pid);
e32d8956
SR
4951}
4952
4953static void set_ftrace_pid_task(struct pid *pid)
4954{
4955 if (pid == ftrace_swapper_pid)
4956 set_ftrace_swapper();
4957 else
4958 set_ftrace_pid(pid);
4959}
4960
756d17ee 4961static int ftrace_pid_add(int p)
df4fc315 4962{
978f3a45 4963 struct pid *pid;
756d17ee 4964 struct ftrace_pid *fpid;
4965 int ret = -EINVAL;
df4fc315 4966
756d17ee 4967 mutex_lock(&ftrace_lock);
df4fc315 4968
756d17ee 4969 if (!p)
4970 pid = ftrace_swapper_pid;
4971 else
4972 pid = find_get_pid(p);
df4fc315 4973
756d17ee 4974 if (!pid)
4975 goto out;
df4fc315 4976
756d17ee 4977 ret = 0;
df4fc315 4978
756d17ee 4979 list_for_each_entry(fpid, &ftrace_pids, list)
4980 if (fpid->pid == pid)
4981 goto out_put;
978f3a45 4982
756d17ee 4983 ret = -ENOMEM;
df4fc315 4984
756d17ee 4985 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4986 if (!fpid)
4987 goto out_put;
df4fc315 4988
756d17ee 4989 list_add(&fpid->list, &ftrace_pids);
4990 fpid->pid = pid;
0ef8cde5 4991
756d17ee 4992 set_ftrace_pid_task(pid);
978f3a45 4993
756d17ee 4994 ftrace_update_pid_func();
e1effa01
SRRH
4995
4996 ftrace_startup_all(0);
756d17ee 4997
4998 mutex_unlock(&ftrace_lock);
4999 return 0;
5000
5001out_put:
5002 if (pid != ftrace_swapper_pid)
5003 put_pid(pid);
978f3a45 5004
756d17ee 5005out:
5006 mutex_unlock(&ftrace_lock);
5007 return ret;
5008}
5009
5010static void ftrace_pid_reset(void)
5011{
5012 struct ftrace_pid *fpid, *safe;
978f3a45 5013
756d17ee 5014 mutex_lock(&ftrace_lock);
5015 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5016 struct pid *pid = fpid->pid;
5017
5018 clear_ftrace_pid_task(pid);
5019
5020 list_del(&fpid->list);
5021 kfree(fpid);
df4fc315
SR
5022 }
5023
df4fc315 5024 ftrace_update_pid_func();
e1effa01 5025 ftrace_startup_all(0);
df4fc315 5026
e6ea44e9 5027 mutex_unlock(&ftrace_lock);
756d17ee 5028}
df4fc315 5029
756d17ee 5030static void *fpid_start(struct seq_file *m, loff_t *pos)
5031{
5032 mutex_lock(&ftrace_lock);
5033
5034 if (list_empty(&ftrace_pids) && (!*pos))
5035 return (void *) 1;
5036
5037 return seq_list_start(&ftrace_pids, *pos);
5038}
5039
5040static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5041{
5042 if (v == (void *)1)
5043 return NULL;
5044
5045 return seq_list_next(v, &ftrace_pids, pos);
5046}
5047
5048static void fpid_stop(struct seq_file *m, void *p)
5049{
5050 mutex_unlock(&ftrace_lock);
5051}
5052
5053static int fpid_show(struct seq_file *m, void *v)
5054{
5055 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5056
5057 if (v == (void *)1) {
5058 seq_printf(m, "no pid\n");
5059 return 0;
5060 }
5061
5062 if (fpid->pid == ftrace_swapper_pid)
5063 seq_printf(m, "swapper tasks\n");
5064 else
5065 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5066
5067 return 0;
5068}
5069
5070static const struct seq_operations ftrace_pid_sops = {
5071 .start = fpid_start,
5072 .next = fpid_next,
5073 .stop = fpid_stop,
5074 .show = fpid_show,
5075};
5076
5077static int
5078ftrace_pid_open(struct inode *inode, struct file *file)
5079{
5080 int ret = 0;
5081
5082 if ((file->f_mode & FMODE_WRITE) &&
5083 (file->f_flags & O_TRUNC))
5084 ftrace_pid_reset();
5085
5086 if (file->f_mode & FMODE_READ)
5087 ret = seq_open(file, &ftrace_pid_sops);
5088
5089 return ret;
5090}
5091
df4fc315
SR
5092static ssize_t
5093ftrace_pid_write(struct file *filp, const char __user *ubuf,
5094 size_t cnt, loff_t *ppos)
5095{
457dc928 5096 char buf[64], *tmp;
df4fc315
SR
5097 long val;
5098 int ret;
5099
5100 if (cnt >= sizeof(buf))
5101 return -EINVAL;
5102
5103 if (copy_from_user(&buf, ubuf, cnt))
5104 return -EFAULT;
5105
5106 buf[cnt] = 0;
5107
756d17ee 5108 /*
5109 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5110 * to clean the filter quietly.
5111 */
457dc928
IM
5112 tmp = strstrip(buf);
5113 if (strlen(tmp) == 0)
756d17ee 5114 return 1;
5115
bcd83ea6 5116 ret = kstrtol(tmp, 10, &val);
df4fc315
SR
5117 if (ret < 0)
5118 return ret;
5119
756d17ee 5120 ret = ftrace_pid_add(val);
df4fc315 5121
756d17ee 5122 return ret ? ret : cnt;
5123}
df4fc315 5124
756d17ee 5125static int
5126ftrace_pid_release(struct inode *inode, struct file *file)
5127{
5128 if (file->f_mode & FMODE_READ)
5129 seq_release(inode, file);
df4fc315 5130
756d17ee 5131 return 0;
df4fc315
SR
5132}
5133
5e2336a0 5134static const struct file_operations ftrace_pid_fops = {
756d17ee 5135 .open = ftrace_pid_open,
5136 .write = ftrace_pid_write,
5137 .read = seq_read,
098c879e 5138 .llseek = tracing_lseek,
756d17ee 5139 .release = ftrace_pid_release,
df4fc315
SR
5140};
5141
5142static __init int ftrace_init_debugfs(void)
5143{
5144 struct dentry *d_tracer;
df4fc315
SR
5145
5146 d_tracer = tracing_init_dentry();
5147 if (!d_tracer)
5148 return 0;
5149
5150 ftrace_init_dyn_debugfs(d_tracer);
5151
5452af66
FW
5152 trace_create_file("set_ftrace_pid", 0644, d_tracer,
5153 NULL, &ftrace_pid_fops);
493762fc
SR
5154
5155 ftrace_profile_debugfs(d_tracer);
5156
df4fc315
SR
5157 return 0;
5158}
df4fc315
SR
5159fs_initcall(ftrace_init_debugfs);
5160
a2bb6a3d 5161/**
81adbdc0 5162 * ftrace_kill - kill ftrace
a2bb6a3d
SR
5163 *
5164 * This function should be used by panic code. It stops ftrace
5165 * but in a not so nice way. If you need to simply kill ftrace
5166 * from a non-atomic section, use ftrace_kill.
5167 */
81adbdc0 5168void ftrace_kill(void)
a2bb6a3d
SR
5169{
5170 ftrace_disabled = 1;
5171 ftrace_enabled = 0;
a2bb6a3d
SR
5172 clear_ftrace_function();
5173}
5174
e0a413f6
SR
5175/**
5176 * Test if ftrace is dead or not.
5177 */
5178int ftrace_is_dead(void)
5179{
5180 return ftrace_disabled;
5181}
5182
16444a8a 5183/**
3d083395
SR
5184 * register_ftrace_function - register a function for profiling
5185 * @ops - ops structure that holds the function for profiling.
16444a8a 5186 *
3d083395
SR
5187 * Register a function to be called by all functions in the
5188 * kernel.
5189 *
5190 * Note: @ops->func and all the functions it calls must be labeled
5191 * with "notrace", otherwise it will go into a
5192 * recursive loop.
16444a8a 5193 */
3d083395 5194int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 5195{
45a4a237 5196 int ret = -1;
4eebcc81 5197
f04f24fb
MH
5198 ftrace_ops_init(ops);
5199
e6ea44e9 5200 mutex_lock(&ftrace_lock);
e7d3737e 5201
8a56d776 5202 ret = ftrace_startup(ops, 0);
b848914c 5203
e6ea44e9 5204 mutex_unlock(&ftrace_lock);
8d240dd8 5205
b0fc494f 5206 return ret;
3d083395 5207}
cdbe61bf 5208EXPORT_SYMBOL_GPL(register_ftrace_function);
3d083395
SR
5209
5210/**
32632920 5211 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
5212 * @ops - ops structure that holds the function to unregister
5213 *
5214 * Unregister a function that was added to be called by ftrace profiling.
5215 */
5216int unregister_ftrace_function(struct ftrace_ops *ops)
5217{
5218 int ret;
5219
e6ea44e9 5220 mutex_lock(&ftrace_lock);
8a56d776 5221 ret = ftrace_shutdown(ops, 0);
e6ea44e9 5222 mutex_unlock(&ftrace_lock);
b0fc494f
SR
5223
5224 return ret;
5225}
cdbe61bf 5226EXPORT_SYMBOL_GPL(unregister_ftrace_function);
b0fc494f 5227
e309b41d 5228int
b0fc494f 5229ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 5230 void __user *buffer, size_t *lenp,
b0fc494f
SR
5231 loff_t *ppos)
5232{
45a4a237 5233 int ret = -ENODEV;
4eebcc81 5234
e6ea44e9 5235 mutex_lock(&ftrace_lock);
b0fc494f 5236
45a4a237
SR
5237 if (unlikely(ftrace_disabled))
5238 goto out;
5239
5240 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 5241
a32c7765 5242 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
5243 goto out;
5244
a32c7765 5245 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
5246
5247 if (ftrace_enabled) {
5248
5249 ftrace_startup_sysctl();
5250
5251 /* we are starting ftrace again */
5000c418
JK
5252 if (ftrace_ops_list != &ftrace_list_end)
5253 update_ftrace_function();
b0fc494f
SR
5254
5255 } else {
5256 /* stopping ftrace calls (just send to ftrace_stub) */
5257 ftrace_trace_function = ftrace_stub;
5258
5259 ftrace_shutdown_sysctl();
5260 }
5261
5262 out:
e6ea44e9 5263 mutex_unlock(&ftrace_lock);
3d083395 5264 return ret;
16444a8a 5265}
f17845e5 5266
fb52607a 5267#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 5268
5f151b24
SRRH
5269static struct ftrace_ops graph_ops = {
5270 .func = ftrace_stub,
5271 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5272 FTRACE_OPS_FL_INITIALIZED |
5273 FTRACE_OPS_FL_STUB,
5274#ifdef FTRACE_GRAPH_TRAMP_ADDR
5275 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5276#endif
5277 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5278};
5279
597af815 5280static int ftrace_graph_active;
e7d3737e 5281
e49dc19c
SR
5282int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5283{
5284 return 0;
5285}
5286
287b6e68
FW
5287/* The callbacks that hook a function */
5288trace_func_graph_ret_t ftrace_graph_return =
5289 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 5290trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
23a8e844 5291static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
5292
5293/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5294static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5295{
5296 int i;
5297 int ret = 0;
5298 unsigned long flags;
5299 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5300 struct task_struct *g, *t;
5301
5302 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5303 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5304 * sizeof(struct ftrace_ret_stack),
5305 GFP_KERNEL);
5306 if (!ret_stack_list[i]) {
5307 start = 0;
5308 end = i;
5309 ret = -ENOMEM;
5310 goto free;
5311 }
5312 }
5313
5314 read_lock_irqsave(&tasklist_lock, flags);
5315 do_each_thread(g, t) {
5316 if (start == end) {
5317 ret = -EAGAIN;
5318 goto unlock;
5319 }
5320
5321 if (t->ret_stack == NULL) {
380c4b14 5322 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 5323 atomic_set(&t->trace_overrun, 0);
26c01624
SR
5324 t->curr_ret_stack = -1;
5325 /* Make sure the tasks see the -1 first: */
5326 smp_wmb();
5327 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
5328 }
5329 } while_each_thread(g, t);
5330
5331unlock:
5332 read_unlock_irqrestore(&tasklist_lock, flags);
5333free:
5334 for (i = start; i < end; i++)
5335 kfree(ret_stack_list[i]);
5336 return ret;
5337}
5338
8aef2d28 5339static void
38516ab5
SR
5340ftrace_graph_probe_sched_switch(void *ignore,
5341 struct task_struct *prev, struct task_struct *next)
8aef2d28
SR
5342{
5343 unsigned long long timestamp;
5344 int index;
5345
be6f164a
SR
5346 /*
5347 * Does the user want to count the time a function was asleep.
5348 * If so, do not update the time stamps.
5349 */
5350 if (trace_flags & TRACE_ITER_SLEEP_TIME)
5351 return;
5352
8aef2d28
SR
5353 timestamp = trace_clock_local();
5354
5355 prev->ftrace_timestamp = timestamp;
5356
5357 /* only process tasks that we timestamped */
5358 if (!next->ftrace_timestamp)
5359 return;
5360
5361 /*
5362 * Update all the counters in next to make up for the
5363 * time next was sleeping.
5364 */
5365 timestamp -= next->ftrace_timestamp;
5366
5367 for (index = next->curr_ret_stack; index >= 0; index--)
5368 next->ret_stack[index].calltime += timestamp;
5369}
5370
f201ae23 5371/* Allocate a return stack for each task */
fb52607a 5372static int start_graph_tracing(void)
f201ae23
FW
5373{
5374 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 5375 int ret, cpu;
f201ae23
FW
5376
5377 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5378 sizeof(struct ftrace_ret_stack *),
5379 GFP_KERNEL);
5380
5381 if (!ret_stack_list)
5382 return -ENOMEM;
5383
5b058bcd 5384 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
5385 for_each_online_cpu(cpu) {
5386 if (!idle_task(cpu)->ret_stack)
868baf07 5387 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
179c498a 5388 }
5b058bcd 5389
f201ae23
FW
5390 do {
5391 ret = alloc_retstack_tasklist(ret_stack_list);
5392 } while (ret == -EAGAIN);
5393
8aef2d28 5394 if (!ret) {
38516ab5 5395 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
8aef2d28
SR
5396 if (ret)
5397 pr_info("ftrace_graph: Couldn't activate tracepoint"
5398 " probe to kernel_sched_switch\n");
5399 }
5400
f201ae23
FW
5401 kfree(ret_stack_list);
5402 return ret;
5403}
5404
4a2b8dda
FW
5405/*
5406 * Hibernation protection.
5407 * The state of the current task is too much unstable during
5408 * suspend/restore to disk. We want to protect against that.
5409 */
5410static int
5411ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5412 void *unused)
5413{
5414 switch (state) {
5415 case PM_HIBERNATION_PREPARE:
5416 pause_graph_tracing();
5417 break;
5418
5419 case PM_POST_HIBERNATION:
5420 unpause_graph_tracing();
5421 break;
5422 }
5423 return NOTIFY_DONE;
5424}
5425
23a8e844
SRRH
5426static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5427{
5428 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5429 return 0;
5430 return __ftrace_graph_entry(trace);
5431}
5432
5433/*
5434 * The function graph tracer should only trace the functions defined
5435 * by set_ftrace_filter and set_ftrace_notrace. If another function
5436 * tracer ops is registered, the graph tracer requires testing the
5437 * function against the global ops, and not just trace any function
5438 * that any ftrace_ops registered.
5439 */
5440static void update_function_graph_func(void)
5441{
5f151b24
SRRH
5442 struct ftrace_ops *op;
5443 bool do_test = false;
5444
5445 /*
5446 * The graph and global ops share the same set of functions
5447 * to test. If any other ops is on the list, then
5448 * the graph tracing needs to test if its the function
5449 * it should call.
5450 */
5451 do_for_each_ftrace_op(op, ftrace_ops_list) {
5452 if (op != &global_ops && op != &graph_ops &&
5453 op != &ftrace_list_end) {
5454 do_test = true;
5455 /* in double loop, break out with goto */
5456 goto out;
5457 }
5458 } while_for_each_ftrace_op(op);
5459 out:
5460 if (do_test)
23a8e844 5461 ftrace_graph_entry = ftrace_graph_entry_test;
5f151b24
SRRH
5462 else
5463 ftrace_graph_entry = __ftrace_graph_entry;
23a8e844
SRRH
5464}
5465
8275f69f
MK
5466static struct notifier_block ftrace_suspend_notifier = {
5467 .notifier_call = ftrace_suspend_notifier_call,
5468};
5469
287b6e68
FW
5470int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5471 trace_func_graph_ent_t entryfunc)
15e6cb36 5472{
e7d3737e
FW
5473 int ret = 0;
5474
e6ea44e9 5475 mutex_lock(&ftrace_lock);
e7d3737e 5476
05ce5818 5477 /* we currently allow only one tracer registered at a time */
597af815 5478 if (ftrace_graph_active) {
05ce5818
SR
5479 ret = -EBUSY;
5480 goto out;
5481 }
5482
4a2b8dda
FW
5483 register_pm_notifier(&ftrace_suspend_notifier);
5484
597af815 5485 ftrace_graph_active++;
fb52607a 5486 ret = start_graph_tracing();
f201ae23 5487 if (ret) {
597af815 5488 ftrace_graph_active--;
f201ae23
FW
5489 goto out;
5490 }
e53a6319 5491
287b6e68 5492 ftrace_graph_return = retfunc;
23a8e844
SRRH
5493
5494 /*
5495 * Update the indirect function to the entryfunc, and the
5496 * function that gets called to the entry_test first. Then
5497 * call the update fgraph entry function to determine if
5498 * the entryfunc should be called directly or not.
5499 */
5500 __ftrace_graph_entry = entryfunc;
5501 ftrace_graph_entry = ftrace_graph_entry_test;
5502 update_function_graph_func();
e53a6319 5503
5f151b24 5504 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
e7d3737e
FW
5505
5506out:
e6ea44e9 5507 mutex_unlock(&ftrace_lock);
e7d3737e 5508 return ret;
15e6cb36
FW
5509}
5510
fb52607a 5511void unregister_ftrace_graph(void)
15e6cb36 5512{
e6ea44e9 5513 mutex_lock(&ftrace_lock);
e7d3737e 5514
597af815 5515 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
5516 goto out;
5517
597af815 5518 ftrace_graph_active--;
287b6e68 5519 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 5520 ftrace_graph_entry = ftrace_graph_entry_stub;
23a8e844 5521 __ftrace_graph_entry = ftrace_graph_entry_stub;
5f151b24 5522 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
4a2b8dda 5523 unregister_pm_notifier(&ftrace_suspend_notifier);
38516ab5 5524 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
e7d3737e 5525
2aad1b76 5526 out:
e6ea44e9 5527 mutex_unlock(&ftrace_lock);
15e6cb36 5528}
f201ae23 5529
868baf07
SR
5530static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5531
5532static void
5533graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5534{
5535 atomic_set(&t->tracing_graph_pause, 0);
5536 atomic_set(&t->trace_overrun, 0);
5537 t->ftrace_timestamp = 0;
25985edc 5538 /* make curr_ret_stack visible before we add the ret_stack */
868baf07
SR
5539 smp_wmb();
5540 t->ret_stack = ret_stack;
5541}
5542
5543/*
5544 * Allocate a return stack for the idle task. May be the first
5545 * time through, or it may be done by CPU hotplug online.
5546 */
5547void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5548{
5549 t->curr_ret_stack = -1;
5550 /*
5551 * The idle task has no parent, it either has its own
5552 * stack or no stack at all.
5553 */
5554 if (t->ret_stack)
5555 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5556
5557 if (ftrace_graph_active) {
5558 struct ftrace_ret_stack *ret_stack;
5559
5560 ret_stack = per_cpu(idle_ret_stack, cpu);
5561 if (!ret_stack) {
5562 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5563 * sizeof(struct ftrace_ret_stack),
5564 GFP_KERNEL);
5565 if (!ret_stack)
5566 return;
5567 per_cpu(idle_ret_stack, cpu) = ret_stack;
5568 }
5569 graph_init_task(t, ret_stack);
5570 }
5571}
5572
f201ae23 5573/* Allocate a return stack for newly created task */
fb52607a 5574void ftrace_graph_init_task(struct task_struct *t)
f201ae23 5575{
84047e36
SR
5576 /* Make sure we do not use the parent ret_stack */
5577 t->ret_stack = NULL;
ea14eb71 5578 t->curr_ret_stack = -1;
84047e36 5579
597af815 5580 if (ftrace_graph_active) {
82310a32
SR
5581 struct ftrace_ret_stack *ret_stack;
5582
5583 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
5584 * sizeof(struct ftrace_ret_stack),
5585 GFP_KERNEL);
82310a32 5586 if (!ret_stack)
f201ae23 5587 return;
868baf07 5588 graph_init_task(t, ret_stack);
84047e36 5589 }
f201ae23
FW
5590}
5591
fb52607a 5592void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 5593{
eae849ca
FW
5594 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5595
f201ae23 5596 t->ret_stack = NULL;
eae849ca
FW
5597 /* NULL must become visible to IRQs before we free it: */
5598 barrier();
5599
5600 kfree(ret_stack);
f201ae23 5601}
15e6cb36 5602#endif