]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/ftrace_internal.h
bpf: Prevent re-mmap()'ing BPF map as writable for initially r/o mapping
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / ftrace_internal.h
CommitLineData
3306fc4a
SRV
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
3#define _LINUX_KERNEL_FTRACE_INTERNAL_H
4
5#ifdef CONFIG_FUNCTION_TRACER
6
7/*
8 * Traverse the ftrace_global_list, invoking all entries. The reason that we
0a5b99f5 9 * can use rcu_dereference_raw_check() is that elements removed from this list
3306fc4a 10 * are simply leaked, so there is no need to interact with a grace-period
0a5b99f5 11 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
3306fc4a
SRV
12 * concurrent insertions into the ftrace_global_list.
13 *
14 * Silly Alpha and silly pointer-speculation compiler optimizations!
15 */
16#define do_for_each_ftrace_op(op, list) \
0a5b99f5 17 op = rcu_dereference_raw_check(list); \
3306fc4a
SRV
18 do
19
20/*
21 * Optimized for just a single item in the list (as that is the normal case).
22 */
23#define while_for_each_ftrace_op(op) \
0a5b99f5 24 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
3306fc4a
SRV
25 unlikely((op) != &ftrace_list_end))
26
27extern struct ftrace_ops __rcu *ftrace_ops_list;
28extern struct ftrace_ops ftrace_list_end;
29extern struct mutex ftrace_lock;
30extern struct ftrace_ops global_ops;
31
32#ifdef CONFIG_DYNAMIC_FTRACE
33
34int ftrace_startup(struct ftrace_ops *ops, int command);
35int ftrace_shutdown(struct ftrace_ops *ops, int command);
36int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
37
38#else /* !CONFIG_DYNAMIC_FTRACE */
39
40int __register_ftrace_function(struct ftrace_ops *ops);
41int __unregister_ftrace_function(struct ftrace_ops *ops);
42/* Keep as macros so we do not need to define the commands */
43# define ftrace_startup(ops, command) \
44 ({ \
45 int ___ret = __register_ftrace_function(ops); \
46 if (!___ret) \
47 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
48 ___ret; \
49 })
50# define ftrace_shutdown(ops, command) \
51 ({ \
52 int ___ret = __unregister_ftrace_function(ops); \
53 if (!___ret) \
54 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
55 ___ret; \
56 })
57static inline int
58ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
59{
60 return 1;
61}
62#endif /* CONFIG_DYNAMIC_FTRACE */
63
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65extern int ftrace_graph_active;
66void update_function_graph_func(void);
67#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
68# define ftrace_graph_active 0
69static inline void update_function_graph_func(void) { }
70#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
71
72#else /* !CONFIG_FUNCTION_TRACER */
73#endif /* CONFIG_FUNCTION_TRACER */
74
75#endif