]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/ftrace.h
Merge branch 'tracing/fastboot' into tracing/ftrace
[mirror_ubuntu-zesty-kernel.git] / include / linux / ftrace.h
1 #ifndef _LINUX_FTRACE_H
2 #define _LINUX_FTRACE_H
3
4 #include <linux/linkage.h>
5 #include <linux/fs.h>
6 #include <linux/ktime.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 #include <linux/kallsyms.h>
10
11 #ifdef CONFIG_FUNCTION_TRACER
12
13 extern int ftrace_enabled;
14 extern int
15 ftrace_enable_sysctl(struct ctl_table *table, int write,
16 struct file *filp, void __user *buffer, size_t *lenp,
17 loff_t *ppos);
18
19 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
20
21 struct ftrace_ops {
22 ftrace_func_t func;
23 struct ftrace_ops *next;
24 };
25
26 /*
27 * The ftrace_ops must be a static and should also
28 * be read_mostly. These functions do modify read_mostly variables
29 * so use them sparely. Never free an ftrace_op or modify the
30 * next pointer after it has been registered. Even after unregistering
31 * it, the next pointer may still be used internally.
32 */
33 int register_ftrace_function(struct ftrace_ops *ops);
34 int unregister_ftrace_function(struct ftrace_ops *ops);
35 void clear_ftrace_function(void);
36
37 extern void ftrace_stub(unsigned long a0, unsigned long a1);
38
39 #else /* !CONFIG_FUNCTION_TRACER */
40 # define register_ftrace_function(ops) do { } while (0)
41 # define unregister_ftrace_function(ops) do { } while (0)
42 # define clear_ftrace_function(ops) do { } while (0)
43 static inline void ftrace_kill(void) { }
44 #endif /* CONFIG_FUNCTION_TRACER */
45
46 #ifdef CONFIG_DYNAMIC_FTRACE
47
48 enum {
49 FTRACE_FL_FREE = (1 << 0),
50 FTRACE_FL_FAILED = (1 << 1),
51 FTRACE_FL_FILTER = (1 << 2),
52 FTRACE_FL_ENABLED = (1 << 3),
53 FTRACE_FL_NOTRACE = (1 << 4),
54 FTRACE_FL_CONVERTED = (1 << 5),
55 FTRACE_FL_FROZEN = (1 << 6),
56 };
57
58 struct dyn_ftrace {
59 struct list_head list;
60 unsigned long ip; /* address of mcount call-site */
61 unsigned long flags;
62 };
63
64 int ftrace_force_update(void);
65 void ftrace_set_filter(unsigned char *buf, int len, int reset);
66
67 /* defined in arch */
68 extern int ftrace_ip_converted(unsigned long ip);
69 extern unsigned char *ftrace_nop_replace(void);
70 extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
71 extern int ftrace_dyn_arch_init(void *data);
72 extern int ftrace_update_ftrace_func(ftrace_func_t func);
73 extern void ftrace_caller(void);
74 extern void ftrace_call(void);
75 extern void mcount_call(void);
76
77 /* May be defined in arch */
78 extern int ftrace_arch_read_dyn_info(char *buf, int size);
79
80 /**
81 * ftrace_modify_code - modify code segment
82 * @ip: the address of the code segment
83 * @old_code: the contents of what is expected to be there
84 * @new_code: the code to patch in
85 *
86 * This is a very sensitive operation and great care needs
87 * to be taken by the arch. The operation should carefully
88 * read the location, check to see if what is read is indeed
89 * what we expect it to be, and then on success of the compare,
90 * it should write to the location.
91 *
92 * Return must be:
93 * 0 on success
94 * -EFAULT on error reading the location
95 * -EINVAL on a failed compare of the contents
96 * -EPERM on error writing to the location
97 * Any other value will be considered a failure.
98 */
99 extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
100 unsigned char *new_code);
101
102 extern int skip_trace(unsigned long ip);
103
104 extern void ftrace_release(void *start, unsigned long size);
105
106 extern void ftrace_disable_daemon(void);
107 extern void ftrace_enable_daemon(void);
108
109 #else
110 # define skip_trace(ip) ({ 0; })
111 # define ftrace_force_update() ({ 0; })
112 # define ftrace_set_filter(buf, len, reset) do { } while (0)
113 # define ftrace_disable_daemon() do { } while (0)
114 # define ftrace_enable_daemon() do { } while (0)
115 static inline void ftrace_release(void *start, unsigned long size) { }
116 #endif /* CONFIG_DYNAMIC_FTRACE */
117
118 /* totally disable ftrace - can not re-enable after this */
119 void ftrace_kill(void);
120
121 static inline void tracer_disable(void)
122 {
123 #ifdef CONFIG_FUNCTION_TRACER
124 ftrace_enabled = 0;
125 #endif
126 }
127
128 /*
129 * Ftrace disable/restore without lock. Some synchronization mechanism
130 * must be used to prevent ftrace_enabled to be changed between
131 * disable/restore.
132 */
133 static inline int __ftrace_enabled_save(void)
134 {
135 #ifdef CONFIG_FUNCTION_TRACER
136 int saved_ftrace_enabled = ftrace_enabled;
137 ftrace_enabled = 0;
138 return saved_ftrace_enabled;
139 #else
140 return 0;
141 #endif
142 }
143
144 static inline void __ftrace_enabled_restore(int enabled)
145 {
146 #ifdef CONFIG_FUNCTION_TRACER
147 ftrace_enabled = enabled;
148 #endif
149 }
150
151 #ifdef CONFIG_FRAME_POINTER
152 /* TODO: need to fix this for ARM */
153 # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
154 # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
155 # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
156 # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
157 # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
158 # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
159 # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
160 #else
161 # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
162 # define CALLER_ADDR1 0UL
163 # define CALLER_ADDR2 0UL
164 # define CALLER_ADDR3 0UL
165 # define CALLER_ADDR4 0UL
166 # define CALLER_ADDR5 0UL
167 # define CALLER_ADDR6 0UL
168 #endif
169
170 #ifdef CONFIG_IRQSOFF_TRACER
171 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
172 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
173 #else
174 # define time_hardirqs_on(a0, a1) do { } while (0)
175 # define time_hardirqs_off(a0, a1) do { } while (0)
176 #endif
177
178 #ifdef CONFIG_PREEMPT_TRACER
179 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
180 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
181 #else
182 # define trace_preempt_on(a0, a1) do { } while (0)
183 # define trace_preempt_off(a0, a1) do { } while (0)
184 #endif
185
186 #ifdef CONFIG_TRACING
187 extern int ftrace_dump_on_oops;
188
189 extern void
190 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
191
192 /**
193 * ftrace_printk - printf formatting in the ftrace buffer
194 * @fmt: the printf format for printing
195 *
196 * Note: __ftrace_printk is an internal function for ftrace_printk and
197 * the @ip is passed in via the ftrace_printk macro.
198 *
199 * This function allows a kernel developer to debug fast path sections
200 * that printk is not appropriate for. By scattering in various
201 * printk like tracing in the code, a developer can quickly see
202 * where problems are occurring.
203 *
204 * This is intended as a debugging tool for the developer only.
205 * Please refrain from leaving ftrace_printks scattered around in
206 * your code.
207 */
208 # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
209 extern int
210 __ftrace_printk(unsigned long ip, const char *fmt, ...)
211 __attribute__ ((format (printf, 2, 3)));
212 extern void ftrace_dump(void);
213 #else
214 static inline void
215 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
216 static inline int
217 ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
218
219 static inline int
220 ftrace_printk(const char *fmt, ...)
221 {
222 return 0;
223 }
224 static inline void ftrace_dump(void) { }
225 #endif
226
227 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
228 extern void ftrace_init(void);
229 extern void ftrace_init_module(unsigned long *start, unsigned long *end);
230 #else
231 static inline void ftrace_init(void) { }
232 static inline void
233 ftrace_init_module(unsigned long *start, unsigned long *end) { }
234 #endif
235
236
237 /*
238 * Structure which defines the trace of an initcall.
239 * You don't have to fill the func field since it is
240 * only used internally by the tracer.
241 */
242 struct boot_trace {
243 pid_t caller;
244 char func[KSYM_NAME_LEN];
245 int result;
246 unsigned long long duration; /* usecs */
247 ktime_t calltime;
248 ktime_t rettime;
249 };
250
251 #ifdef CONFIG_BOOT_TRACER
252 /* Append the trace on the ring-buffer */
253 extern void trace_boot(struct boot_trace *it, initcall_t fn);
254
255 /* Tells the tracer that smp_pre_initcall is finished.
256 * So we can start the tracing
257 */
258 extern void start_boot_trace(void);
259
260 /* Resume the tracing of other necessary events
261 * such as sched switches
262 */
263 extern void enable_boot_trace(void);
264
265 /* Suspend this tracing. Actually, only sched_switches tracing have
266 * to be suspended. Initcalls doesn't need it.)
267 */
268 extern void disable_boot_trace(void);
269 #else
270 static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
271 static inline void start_boot_trace(void) { }
272 static inline void enable_boot_trace(void) { }
273 static inline void disable_boot_trace(void) { }
274 #endif
275
276
277
278 #endif /* _LINUX_FTRACE_H */