]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/ftrace.h
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / include / linux / ftrace.h
CommitLineData
9849ed4d
MF
1/*
2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */
5
16444a8a
ACM
6#ifndef _LINUX_FTRACE_H
7#define _LINUX_FTRACE_H
8
0012693a 9#include <linux/trace_clock.h>
5601020f 10#include <linux/kallsyms.h>
0012693a 11#include <linux/linkage.h>
ea4e2bc4 12#include <linux/bitops.h>
a1e2e31d 13#include <linux/ptrace.h>
0012693a 14#include <linux/ktime.h>
21a8c466 15#include <linux/sched.h>
0012693a
FW
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/fs.h>
16444a8a 19
c79a61f5
UKK
20#include <asm/ftrace.h>
21
2f5f6ad9
SR
22/*
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
26 */
27#ifndef ARCH_SUPPORTS_FTRACE_OPS
28#define ARCH_SUPPORTS_FTRACE_OPS 0
29#endif
30
ccf3672d
SR
31/*
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
35 */
7544256a 36#if !ARCH_SUPPORTS_FTRACE_OPS
ccf3672d
SR
37# define FTRACE_FORCE_LIST_FUNC 1
38#else
39# define FTRACE_FORCE_LIST_FUNC 0
40#endif
41
5f893b26
SRRH
42/* Main tracing buffer and events set up */
43#ifdef CONFIG_TRACING
44void trace_init(void);
e725c731 45void early_trace_init(void);
5f893b26
SRRH
46#else
47static inline void trace_init(void) { }
e725c731 48static inline void early_trace_init(void) { }
5f893b26 49#endif
ccf3672d 50
de477254 51struct module;
04da85b8
SR
52struct ftrace_hash;
53
606576ce 54#ifdef CONFIG_FUNCTION_TRACER
3e1932ad 55
b0fc494f
SR
56extern int ftrace_enabled;
57extern int
58ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 59 void __user *buffer, size_t *lenp,
b0fc494f
SR
60 loff_t *ppos);
61
2f5f6ad9
SR
62struct ftrace_ops;
63
64typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
a1e2e31d 65 struct ftrace_ops *op, struct pt_regs *regs);
16444a8a 66
87354059
SRRH
67ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
68
e248491a
JO
69/*
70 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
71 * set in the flags member.
f8b8be8a
MH
72 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
73 * IPMODIFY are a kind of attribute flags which can be set only before
74 * registering the ftrace_ops, and can not be modified while registered.
ad61dd30 75 * Changing those attribute flags after registering ftrace_ops will
f8b8be8a 76 * cause unexpected results.
e248491a
JO
77 *
78 * ENABLED - set/unset when ftrace_ops is registered/unregistered
e248491a
JO
79 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
80 * allocated ftrace_ops which need special care
ba27f2bc
SRRH
81 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
82 * could be controlled by following calls:
e248491a
JO
83 * ftrace_function_local_enable
84 * ftrace_function_local_disable
08f6fba5
SR
85 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
86 * and passed to the callback. If this flag is set, but the
87 * architecture does not support passing regs
06aeaaea 88 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
08f6fba5
SR
89 * ftrace_ops will fail to register, unless the next flag
90 * is set.
91 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
92 * handler can handle an arch that does not save regs
93 * (the handler tests if regs == NULL), then it can set
94 * this flag instead. It will not fail registering the ftrace_ops
95 * but, the regs field will be NULL if the arch does not support
96 * passing regs to the handler.
97 * Note, if this flag is set, the SAVE_REGS flag will automatically
98 * get set upon registering the ftrace_ops, if the arch supports it.
4740974a
SR
99 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
100 * that the call back has its own recursion protection. If it does
101 * not set this, then the ftrace infrastructure will add recursion
102 * protection for the caller.
395b97a3 103 * STUB - The ftrace_ops is just a place holder.
f04f24fb
MH
104 * INITIALIZED - The ftrace_ops has already been initialized (first use time
105 * register_ftrace_function() is called, it will initialized the ops)
591dffda 106 * DELETED - The ops are being deleted, do not let them be registered again.
e1effa01
SRRH
107 * ADDING - The ops is in the process of being added.
108 * REMOVING - The ops is in the process of being removed.
109 * MODIFYING - The ops is in the process of changing its filter functions.
f3bea491
SRRH
110 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
111 * The arch specific code sets this flag when it allocated a
112 * trampoline. This lets the arch know that it can update the
113 * trampoline in case the callback function changes.
114 * The ftrace_ops trampoline can be set by the ftrace users, and
115 * in such cases the arch must not modify it. Only the arch ftrace
116 * core code should set this flag.
f8b8be8a
MH
117 * IPMODIFY - The ops can modify the IP register. This can only be set with
118 * SAVE_REGS. If another ops with this flag set is already registered
119 * for any of the functions that this ops will be registered for, then
120 * this ops will fail to register or set_filter_ip.
e3eea140 121 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
d0ba52f1 122 * RCU - Set when the ops can only be called when RCU is watching.
8c08f0d5 123 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
e248491a 124 */
b848914c 125enum {
08f6fba5 126 FTRACE_OPS_FL_ENABLED = 1 << 0,
4104d326 127 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
ba27f2bc 128 FTRACE_OPS_FL_PER_CPU = 1 << 2,
4104d326
SRRH
129 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
130 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
131 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
132 FTRACE_OPS_FL_STUB = 1 << 6,
133 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
134 FTRACE_OPS_FL_DELETED = 1 << 8,
e1effa01
SRRH
135 FTRACE_OPS_FL_ADDING = 1 << 9,
136 FTRACE_OPS_FL_REMOVING = 1 << 10,
137 FTRACE_OPS_FL_MODIFYING = 1 << 11,
f3bea491 138 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
f8b8be8a 139 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
e3eea140 140 FTRACE_OPS_FL_PID = 1 << 14,
ba27f2bc 141 FTRACE_OPS_FL_RCU = 1 << 15,
8c08f0d5 142 FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
b848914c
SR
143};
144
33b7f99c
SRRH
145#ifdef CONFIG_DYNAMIC_FTRACE
146/* The hash used to know what functions callbacks trace */
147struct ftrace_ops_hash {
f86f4180
CZ
148 struct ftrace_hash __rcu *notrace_hash;
149 struct ftrace_hash __rcu *filter_hash;
33b7f99c
SRRH
150 struct mutex regex_lock;
151};
42c269c8 152
b80f0f6c 153void ftrace_free_init_mem(void);
42c269c8 154#else
b80f0f6c 155static inline void ftrace_free_init_mem(void) { }
33b7f99c
SRRH
156#endif
157
b7e00a6c 158/*
ba27f2bc
SRRH
159 * Note, ftrace_ops can be referenced outside of RCU protection, unless
160 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
161 * core data, the unregistering of it will perform a scheduling on all CPUs
162 * to make sure that there are no more users. Depending on the load of the
163 * system that may take a bit of time.
b7e00a6c
SRRH
164 *
165 * Any private data added must also take care not to be freed and if private
166 * data is added to a ftrace_ops that is in core code, the user of the
167 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
168 */
16444a8a 169struct ftrace_ops {
f45948e8 170 ftrace_func_t func;
f86f4180 171 struct ftrace_ops __rcu *next;
b848914c 172 unsigned long flags;
b7e00a6c 173 void *private;
e3eea140 174 ftrace_func_t saved_func;
79922b80 175 int __percpu *disabled;
f45948e8 176#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
177 struct ftrace_ops_hash local_hash;
178 struct ftrace_ops_hash *func_hash;
fef5aeee 179 struct ftrace_ops_hash old_hash;
79922b80 180 unsigned long trampoline;
aec0be2d 181 unsigned long trampoline_size;
f45948e8 182#endif
16444a8a
ACM
183};
184
e7d3737e
FW
185/*
186 * Type of the current tracing.
187 */
188enum ftrace_tracing_type_t {
189 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
190 FTRACE_TYPE_RETURN, /* Hook the return of the function */
191};
192
193/* Current tracing type, default is FTRACE_TYPE_ENTER */
194extern enum ftrace_tracing_type_t ftrace_tracing_type;
195
16444a8a
ACM
196/*
197 * The ftrace_ops must be a static and should also
198 * be read_mostly. These functions do modify read_mostly variables
199 * so use them sparely. Never free an ftrace_op or modify the
200 * next pointer after it has been registered. Even after unregistering
201 * it, the next pointer may still be used internally.
202 */
203int register_ftrace_function(struct ftrace_ops *ops);
204int unregister_ftrace_function(struct ftrace_ops *ops);
205void clear_ftrace_function(void);
206
e248491a 207/**
ba27f2bc 208 * ftrace_function_local_enable - enable ftrace_ops on current cpu
e248491a
JO
209 *
210 * This function enables tracing on current cpu by decreasing
211 * the per cpu control variable.
212 * It must be called with preemption disabled and only on ftrace_ops
ba27f2bc 213 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
e248491a
JO
214 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
215 */
216static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
217{
ba27f2bc 218 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
e248491a
JO
219 return;
220
221 (*this_cpu_ptr(ops->disabled))--;
222}
223
224/**
ba27f2bc 225 * ftrace_function_local_disable - disable ftrace_ops on current cpu
e248491a 226 *
ba27f2bc 227 * This function disables tracing on current cpu by increasing
e248491a
JO
228 * the per cpu control variable.
229 * It must be called with preemption disabled and only on ftrace_ops
ba27f2bc 230 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
e248491a
JO
231 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
232 */
233static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
234{
ba27f2bc 235 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
e248491a
JO
236 return;
237
238 (*this_cpu_ptr(ops->disabled))++;
239}
240
241/**
242 * ftrace_function_local_disabled - returns ftrace_ops disabled value
243 * on current cpu
244 *
245 * This function returns value of ftrace_ops::disabled on current cpu.
246 * It must be called with preemption disabled and only on ftrace_ops
ba27f2bc 247 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
e248491a
JO
248 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
249 */
250static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
251{
ba27f2bc 252 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
e248491a
JO
253 return *this_cpu_ptr(ops->disabled);
254}
255
a1e2e31d
SR
256extern void ftrace_stub(unsigned long a0, unsigned long a1,
257 struct ftrace_ops *op, struct pt_regs *regs);
16444a8a 258
606576ce 259#else /* !CONFIG_FUNCTION_TRACER */
4dbf6bc2
SR
260/*
261 * (un)register_ftrace_function must be a macro since the ops parameter
262 * must not be evaluated.
263 */
264#define register_ftrace_function(ops) ({ 0; })
265#define unregister_ftrace_function(ops) ({ 0; })
ea701f11
SR
266static inline int ftrace_nr_registered_ops(void)
267{
268 return 0;
269}
4dbf6bc2 270static inline void clear_ftrace_function(void) { }
81adbdc0 271static inline void ftrace_kill(void) { }
b80f0f6c 272static inline void ftrace_free_init_mem(void) { }
606576ce 273#endif /* CONFIG_FUNCTION_TRACER */
352ad25a 274
f38f1d2a 275#ifdef CONFIG_STACK_TRACER
bb99d8cc
AT
276
277#define STACK_TRACE_ENTRIES 500
278
279struct stack_trace;
280
281extern unsigned stack_trace_index[];
282extern struct stack_trace stack_trace_max;
283extern unsigned long stack_trace_max_size;
d332736d 284extern arch_spinlock_t stack_trace_max_lock;
bb99d8cc 285
f38f1d2a 286extern int stack_tracer_enabled;
bb99d8cc 287void stack_trace_print(void);
f38f1d2a
SR
288int
289stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 290 void __user *buffer, size_t *lenp,
f38f1d2a 291 loff_t *ppos);
5367278c 292
8aaf1ee7
SRV
293/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
294DECLARE_PER_CPU(int, disable_stack_tracer);
295
296/**
297 * stack_tracer_disable - temporarily disable the stack tracer
298 *
299 * There's a few locations (namely in RCU) where stack tracing
300 * cannot be executed. This function is used to disable stack
301 * tracing during those critical sections.
302 *
303 * This function must be called with preemption or interrupts
304 * disabled and stack_tracer_enable() must be called shortly after
305 * while preemption or interrupts are still disabled.
306 */
307static inline void stack_tracer_disable(void)
308{
309 /* Preemption or interupts must be disabled */
310 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
311 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
312 this_cpu_inc(disable_stack_tracer);
313}
314
315/**
316 * stack_tracer_enable - re-enable the stack tracer
317 *
318 * After stack_tracer_disable() is called, stack_tracer_enable()
319 * must be called shortly afterward.
320 */
321static inline void stack_tracer_enable(void)
322{
323 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
324 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
325 this_cpu_dec(disable_stack_tracer);
326}
5367278c
SRV
327#else
328static inline void stack_tracer_disable(void) { }
329static inline void stack_tracer_enable(void) { }
f38f1d2a
SR
330#endif
331
3d083395 332#ifdef CONFIG_DYNAMIC_FTRACE
31e88909 333
000ab691
SR
334int ftrace_arch_code_modify_prepare(void);
335int ftrace_arch_code_modify_post_process(void);
336
4fd3279b
SRRH
337struct dyn_ftrace;
338
02a392a0
SRRH
339enum ftrace_bug_type {
340 FTRACE_BUG_UNKNOWN,
341 FTRACE_BUG_INIT,
342 FTRACE_BUG_NOP,
343 FTRACE_BUG_CALL,
344 FTRACE_BUG_UPDATE,
345};
346extern enum ftrace_bug_type ftrace_bug_type;
347
b05086c7
SRRH
348/*
349 * Archs can set this to point to a variable that holds the value that was
350 * expected at the call site before calling ftrace_bug().
351 */
352extern const void *ftrace_expected;
353
4fd3279b 354void ftrace_bug(int err, struct dyn_ftrace *rec);
c88fd863 355
809dcf29
SR
356struct seq_file;
357
d88471cb 358extern int ftrace_text_reserved(const void *start, const void *end);
2cfa1978 359
ea701f11
SR
360extern int ftrace_nr_registered_ops(void);
361
aec0be2d
SRRH
362bool is_ftrace_trampoline(unsigned long addr);
363
08f6fba5
SR
364/*
365 * The dyn_ftrace record's flags field is split into two parts.
366 * the first part which is '0-FTRACE_REF_MAX' is a counter of
367 * the number of callbacks that have registered the function that
368 * the dyn_ftrace descriptor represents.
369 *
370 * The second part is a mask:
371 * ENABLED - the function is being traced
372 * REGS - the record wants the function to save regs
373 * REGS_EN - the function is set up to save regs.
f8b8be8a 374 * IPMODIFY - the record allows for the IP address to be changed.
b7ffffbb 375 * DISABLED - the record is not ready to be touched yet
08f6fba5
SR
376 *
377 * When a new ftrace_ops is registered and wants a function to save
378 * pt_regs, the rec->flag REGS is set. When the function has been
379 * set up to save regs, the REG_EN flag is set. Once a function
380 * starts saving regs it will do so until all ftrace_ops are removed
381 * from tracing that function.
382 */
3c1720f0 383enum {
79922b80 384 FTRACE_FL_ENABLED = (1UL << 31),
08f6fba5 385 FTRACE_FL_REGS = (1UL << 30),
79922b80
SRRH
386 FTRACE_FL_REGS_EN = (1UL << 29),
387 FTRACE_FL_TRAMP = (1UL << 28),
388 FTRACE_FL_TRAMP_EN = (1UL << 27),
f8b8be8a 389 FTRACE_FL_IPMODIFY = (1UL << 26),
b7ffffbb 390 FTRACE_FL_DISABLED = (1UL << 25),
3c1720f0
SR
391};
392
b7ffffbb
SRRH
393#define FTRACE_REF_MAX_SHIFT 25
394#define FTRACE_FL_BITS 7
cf2cb0b2
SRRH
395#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
396#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
397#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
ed926f9b 398
0376bde1
SRRH
399#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
400
3d083395 401struct dyn_ftrace {
a762782d 402 unsigned long ip; /* address of mcount call-site */
85ae32ae 403 unsigned long flags;
a762782d 404 struct dyn_arch_ftrace arch;
3d083395
SR
405};
406
e1c08bdd 407int ftrace_force_update(void);
647664ea
MH
408int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
409 int remove, int reset);
ac483c44 410int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b 411 int len, int reset);
ac483c44 412int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
413 int len, int reset);
414void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
415void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
5500fa51 416void ftrace_free_filter(struct ftrace_ops *ops);
d032ae89 417void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
e1c08bdd 418
c88fd863
SR
419enum {
420 FTRACE_UPDATE_CALLS = (1 << 0),
421 FTRACE_DISABLE_CALLS = (1 << 1),
422 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
423 FTRACE_START_FUNC_RET = (1 << 3),
424 FTRACE_STOP_FUNC_RET = (1 << 4),
425};
426
08f6fba5
SR
427/*
428 * The FTRACE_UPDATE_* enum is used to pass information back
429 * from the ftrace_update_record() and ftrace_test_record()
430 * functions. These are called by the code update routines
431 * to find out what is to be done for a given function.
432 *
433 * IGNORE - The function is already what we want it to be
434 * MAKE_CALL - Start tracing the function
435 * MODIFY_CALL - Stop saving regs for the function
08f6fba5
SR
436 * MAKE_NOP - Stop tracing the function
437 */
c88fd863
SR
438enum {
439 FTRACE_UPDATE_IGNORE,
440 FTRACE_UPDATE_MAKE_CALL,
08f6fba5 441 FTRACE_UPDATE_MODIFY_CALL,
c88fd863
SR
442 FTRACE_UPDATE_MAKE_NOP,
443};
444
fc13cb0c
SR
445enum {
446 FTRACE_ITER_FILTER = (1 << 0),
447 FTRACE_ITER_NOTRACE = (1 << 1),
448 FTRACE_ITER_PRINTALL = (1 << 2),
eee8ded1
SRV
449 FTRACE_ITER_DO_PROBES = (1 << 3),
450 FTRACE_ITER_PROBE = (1 << 4),
5985ea8b
SRV
451 FTRACE_ITER_MOD = (1 << 5),
452 FTRACE_ITER_ENABLED = (1 << 6),
fc13cb0c
SR
453};
454
c88fd863
SR
455void arch_ftrace_update_code(int command);
456
457struct ftrace_rec_iter;
458
459struct ftrace_rec_iter *ftrace_rec_iter_start(void);
460struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
461struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
462
08d636b6
SR
463#define for_ftrace_rec_iter(iter) \
464 for (iter = ftrace_rec_iter_start(); \
465 iter; \
466 iter = ftrace_rec_iter_next(iter))
467
468
c88fd863
SR
469int ftrace_update_record(struct dyn_ftrace *rec, int enable);
470int ftrace_test_record(struct dyn_ftrace *rec, int enable);
471void ftrace_run_stop_machine(int command);
f0cf973a 472unsigned long ftrace_location(unsigned long ip);
04cf31a7 473unsigned long ftrace_location_range(unsigned long start, unsigned long end);
7413af1f
SRRH
474unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
475unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
c88fd863
SR
476
477extern ftrace_func_t ftrace_trace_function;
478
fc13cb0c
SR
479int ftrace_regex_open(struct ftrace_ops *ops, int flag,
480 struct inode *inode, struct file *file);
481ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
482 size_t cnt, loff_t *ppos);
483ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
484 size_t cnt, loff_t *ppos);
fc13cb0c
SR
485int ftrace_regex_release(struct inode *inode, struct file *file);
486
2a85a37f
SR
487void __init
488ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
489
3d083395 490/* defined in arch */
3c1720f0 491extern int ftrace_ip_converted(unsigned long ip);
3a36cb11 492extern int ftrace_dyn_arch_init(void);
e4f5d544 493extern void ftrace_replace_code(int enable);
d61f82d0
SR
494extern int ftrace_update_ftrace_func(ftrace_func_t func);
495extern void ftrace_caller(void);
08f6fba5 496extern void ftrace_regs_caller(void);
d61f82d0 497extern void ftrace_call(void);
08f6fba5 498extern void ftrace_regs_call(void);
d61f82d0 499extern void mcount_call(void);
f0001207 500
8ed3e2cf
SR
501void ftrace_modify_all_code(int command);
502
f0001207
SL
503#ifndef FTRACE_ADDR
504#define FTRACE_ADDR ((unsigned long)ftrace_caller)
505#endif
08f6fba5 506
79922b80
SRRH
507#ifndef FTRACE_GRAPH_ADDR
508#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
509#endif
510
08f6fba5 511#ifndef FTRACE_REGS_ADDR
06aeaaea 512#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
513# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
514#else
515# define FTRACE_REGS_ADDR FTRACE_ADDR
516#endif
517#endif
518
646d7043
SRRH
519/*
520 * If an arch would like functions that are only traced
521 * by the function graph tracer to jump directly to its own
522 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
523 * to be that address to jump to.
524 */
525#ifndef FTRACE_GRAPH_TRAMP_ADDR
526#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
527#endif
528
fb52607a
FW
529#ifdef CONFIG_FUNCTION_GRAPH_TRACER
530extern void ftrace_graph_caller(void);
5a45cfe1
SR
531extern int ftrace_enable_ftrace_graph_caller(void);
532extern int ftrace_disable_ftrace_graph_caller(void);
533#else
534static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
535static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
e7d3737e 536#endif
ad90c0e3 537
31e88909 538/**
57794a9d 539 * ftrace_make_nop - convert code into nop
31e88909
SR
540 * @mod: module structure if called by module load initialization
541 * @rec: the mcount call site record
542 * @addr: the address that the call site should be calling
543 *
544 * This is a very sensitive operation and great care needs
545 * to be taken by the arch. The operation should carefully
546 * read the location, check to see if what is read is indeed
547 * what we expect it to be, and then on success of the compare,
548 * it should write to the location.
549 *
550 * The code segment at @rec->ip should be a caller to @addr
551 *
552 * Return must be:
553 * 0 on success
554 * -EFAULT on error reading the location
555 * -EINVAL on a failed compare of the contents
556 * -EPERM on error writing to the location
557 * Any other value will be considered a failure.
558 */
559extern int ftrace_make_nop(struct module *mod,
560 struct dyn_ftrace *rec, unsigned long addr);
a26a2a27 561
593eb8a2 562/**
31e88909
SR
563 * ftrace_make_call - convert a nop call site into a call to addr
564 * @rec: the mcount call site record
565 * @addr: the address that the call site should call
593eb8a2
SR
566 *
567 * This is a very sensitive operation and great care needs
568 * to be taken by the arch. The operation should carefully
569 * read the location, check to see if what is read is indeed
570 * what we expect it to be, and then on success of the compare,
571 * it should write to the location.
572 *
31e88909
SR
573 * The code segment at @rec->ip should be a nop
574 *
593eb8a2
SR
575 * Return must be:
576 * 0 on success
577 * -EFAULT on error reading the location
578 * -EINVAL on a failed compare of the contents
579 * -EPERM on error writing to the location
580 * Any other value will be considered a failure.
581 */
31e88909
SR
582extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
583
06aeaaea 584#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
585/**
586 * ftrace_modify_call - convert from one addr to another (no nop)
587 * @rec: the mcount call site record
588 * @old_addr: the address expected to be currently called to
589 * @addr: the address to change to
590 *
591 * This is a very sensitive operation and great care needs
592 * to be taken by the arch. The operation should carefully
593 * read the location, check to see if what is read is indeed
594 * what we expect it to be, and then on success of the compare,
595 * it should write to the location.
596 *
597 * The code segment at @rec->ip should be a caller to @old_addr
598 *
599 * Return must be:
600 * 0 on success
601 * -EFAULT on error reading the location
602 * -EINVAL on a failed compare of the contents
603 * -EPERM on error writing to the location
604 * Any other value will be considered a failure.
605 */
606extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
607 unsigned long addr);
608#else
609/* Should never be called */
610static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
611 unsigned long addr)
612{
613 return -EINVAL;
614}
615#endif
616
31e88909
SR
617/* May be defined in arch */
618extern int ftrace_arch_read_dyn_info(char *buf, int size);
593eb8a2 619
ecea656d 620extern int skip_trace(unsigned long ip);
a949ae56 621extern void ftrace_module_init(struct module *mod);
7dcd182b 622extern void ftrace_module_enable(struct module *mod);
049fb9bd 623extern void ftrace_release_mod(struct module *mod);
ecea656d 624
c0719e5a
SR
625extern void ftrace_disable_daemon(void);
626extern void ftrace_enable_daemon(void);
4dc93676 627#else /* CONFIG_DYNAMIC_FTRACE */
4dbf6bc2
SR
628static inline int skip_trace(unsigned long ip) { return 0; }
629static inline int ftrace_force_update(void) { return 0; }
4dbf6bc2
SR
630static inline void ftrace_disable_daemon(void) { }
631static inline void ftrace_enable_daemon(void) { }
7dcd182b
JY
632static inline void ftrace_module_init(struct module *mod) { }
633static inline void ftrace_module_enable(struct module *mod) { }
634static inline void ftrace_release_mod(struct module *mod) { }
d88471cb 635static inline int ftrace_text_reserved(const void *start, const void *end)
2cfa1978
MH
636{
637 return 0;
638}
4dc93676
SR
639static inline unsigned long ftrace_location(unsigned long ip)
640{
641 return 0;
642}
fc13cb0c
SR
643
644/*
645 * Again users of functions that have ftrace_ops may not
646 * have them defined when ftrace is not enabled, but these
647 * functions may still be called. Use a macro instead of inline.
648 */
649#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
96de37b6 650#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
647664ea 651#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
5500fa51
JO
652#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
653#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
654#define ftrace_free_filter(ops) do { } while (0)
d032ae89 655#define ftrace_ops_set_global_filter(ops) do { } while (0)
fc13cb0c
SR
656
657static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
658 size_t cnt, loff_t *ppos) { return -ENODEV; }
659static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
660 size_t cnt, loff_t *ppos) { return -ENODEV; }
fc13cb0c
SR
661static inline int
662ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
aec0be2d
SRRH
663
664static inline bool is_ftrace_trampoline(unsigned long addr)
665{
666 return false;
667}
ecea656d 668#endif /* CONFIG_DYNAMIC_FTRACE */
352ad25a 669
aeaee8a2
IM
670/* totally disable ftrace - can not re-enable after this */
671void ftrace_kill(void);
672
f43fdad8
IM
673static inline void tracer_disable(void)
674{
606576ce 675#ifdef CONFIG_FUNCTION_TRACER
f43fdad8
IM
676 ftrace_enabled = 0;
677#endif
678}
679
37002735
HY
680/*
681 * Ftrace disable/restore without lock. Some synchronization mechanism
9bdeb7b5 682 * must be used to prevent ftrace_enabled to be changed between
37002735
HY
683 * disable/restore.
684 */
9bdeb7b5
HY
685static inline int __ftrace_enabled_save(void)
686{
606576ce 687#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
688 int saved_ftrace_enabled = ftrace_enabled;
689 ftrace_enabled = 0;
690 return saved_ftrace_enabled;
691#else
692 return 0;
693#endif
694}
695
696static inline void __ftrace_enabled_restore(int enabled)
697{
606576ce 698#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
699 ftrace_enabled = enabled;
700#endif
701}
702
eed542d6
AT
703/* All archs should have this, but we define it for consistency */
704#ifndef ftrace_return_address0
705# define ftrace_return_address0 __builtin_return_address(0)
706#endif
707
708/* Archs may use other ways for ADDR1 and beyond */
709#ifndef ftrace_return_address
c79a61f5 710# ifdef CONFIG_FRAME_POINTER
eed542d6 711# define ftrace_return_address(n) __builtin_return_address(n)
c79a61f5 712# else
eed542d6 713# define ftrace_return_address(n) 0UL
c79a61f5 714# endif
eed542d6
AT
715#endif
716
717#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
718#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
719#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
720#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
721#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
722#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
723#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
352ad25a 724
f904f582
SAS
725static inline unsigned long get_lock_parent_ip(void)
726{
727 unsigned long addr = CALLER_ADDR0;
728
729 if (!in_lock_functions(addr))
730 return addr;
731 addr = CALLER_ADDR1;
732 if (!in_lock_functions(addr))
733 return addr;
734 return CALLER_ADDR2;
735}
736
81d68a96 737#ifdef CONFIG_IRQSOFF_TRACER
489f1396
IM
738 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
739 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
81d68a96 740#else
4dbf6bc2
SR
741 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
742 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
81d68a96
SR
743#endif
744
6cd8a4bb 745#ifdef CONFIG_PREEMPT_TRACER
489f1396
IM
746 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
747 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
6cd8a4bb 748#else
b02ee9a3
MB
749/*
750 * Use defines instead of static inlines because some arches will make code out
751 * of the CALLER_ADDR, when we really want these to be a real nop.
752 */
753# define trace_preempt_on(a0, a1) do { } while (0)
754# define trace_preempt_off(a0, a1) do { } while (0)
6cd8a4bb
SR
755#endif
756
68bf21aa
SR
757#ifdef CONFIG_FTRACE_MCOUNT_RECORD
758extern void ftrace_init(void);
759#else
760static inline void ftrace_init(void) { }
761#endif
762
287b6e68
FW
763/*
764 * Structure that defines an entry function trace.
a4a551b8
NK
765 * It's already packed but the attribute "packed" is needed
766 * to remove extra padding at the end.
287b6e68
FW
767 */
768struct ftrace_graph_ent {
769 unsigned long func; /* Current function */
770 int depth;
a4a551b8 771} __packed;
dd0e545f 772
caf4b323
FW
773/*
774 * Structure that defines a return function trace.
a4a551b8
NK
775 * It's already packed but the attribute "packed" is needed
776 * to remove extra padding at the end.
caf4b323 777 */
fb52607a 778struct ftrace_graph_ret {
caf4b323 779 unsigned long func; /* Current function */
0231022c
FW
780 /* Number of functions that overran the depth limit for current task */
781 unsigned long overrun;
a4a551b8
NK
782 unsigned long long calltime;
783 unsigned long long rettime;
287b6e68 784 int depth;
a4a551b8 785} __packed;
caf4b323 786
62b915f1
JO
787/* Type of the callback handlers for tracing function graph*/
788typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
789typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
790
fb52607a 791#ifdef CONFIG_FUNCTION_GRAPH_TRACER
8b96f011 792
5ac9f622 793/* for init task */
f876d346 794#define INIT_FTRACE_GRAPH .ret_stack = NULL,
5ac9f622 795
712406a6
SR
796/*
797 * Stack of return addresses for functions
798 * of a thread.
799 * Used in struct thread_info
800 */
801struct ftrace_ret_stack {
802 unsigned long ret;
803 unsigned long func;
804 unsigned long long calltime;
8861dd30 805#ifdef CONFIG_FUNCTION_PROFILER
a2a16d6a 806 unsigned long long subtime;
8861dd30 807#endif
daa460a8 808#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
71e308a2 809 unsigned long fp;
daa460a8 810#endif
9a7c348b
JP
811#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
812 unsigned long *retp;
813#endif
712406a6
SR
814};
815
816/*
817 * Primary handler of a function return.
818 * It relays on ftrace_return_to_handler.
819 * Defined in entry_32/64.S
820 */
821extern void return_to_handler(void);
822
823extern int
71e308a2 824ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
9a7c348b 825 unsigned long frame_pointer, unsigned long *retp);
712406a6 826
223918e3
JP
827unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
828 unsigned long ret, unsigned long *retp);
829
8b96f011
FW
830/*
831 * Sometimes we don't want to trace a function with the function
832 * graph tracer but we want them to keep traced by the usual function
833 * tracer if the function graph tracer is not configured.
834 */
835#define __notrace_funcgraph notrace
836
29ad23b0 837#define FTRACE_NOTRACE_DEPTH 65536
f201ae23
FW
838#define FTRACE_RETFUNC_DEPTH 50
839#define FTRACE_RETSTACK_ALLOC_SIZE 32
287b6e68
FW
840extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
841 trace_func_graph_ent_t entryfunc);
842
1b2f121c 843extern bool ftrace_graph_is_dead(void);
14a866c5
SR
844extern void ftrace_graph_stop(void);
845
287b6e68
FW
846/* The current handlers in use */
847extern trace_func_graph_ret_t ftrace_graph_return;
848extern trace_func_graph_ent_t ftrace_graph_entry;
caf4b323 849
fb52607a 850extern void unregister_ftrace_graph(void);
f201ae23 851
fb52607a
FW
852extern void ftrace_graph_init_task(struct task_struct *t);
853extern void ftrace_graph_exit_task(struct task_struct *t);
868baf07 854extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
21a8c466
FW
855
856static inline int task_curr_ret_stack(struct task_struct *t)
857{
858 return t->curr_ret_stack;
859}
380c4b14
FW
860
861static inline void pause_graph_tracing(void)
862{
863 atomic_inc(&current->tracing_graph_pause);
864}
865
866static inline void unpause_graph_tracing(void)
867{
868 atomic_dec(&current->tracing_graph_pause);
869}
5ac9f622 870#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
8b96f011
FW
871
872#define __notrace_funcgraph
5ac9f622 873#define INIT_FTRACE_GRAPH
8b96f011 874
fb52607a
FW
875static inline void ftrace_graph_init_task(struct task_struct *t) { }
876static inline void ftrace_graph_exit_task(struct task_struct *t) { }
868baf07 877static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
21a8c466 878
62b915f1
JO
879static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
880 trace_func_graph_ent_t entryfunc)
881{
882 return -1;
883}
884static inline void unregister_ftrace_graph(void) { }
885
21a8c466
FW
886static inline int task_curr_ret_stack(struct task_struct *tsk)
887{
888 return -1;
889}
380c4b14 890
223918e3
JP
891static inline unsigned long
892ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
893 unsigned long *retp)
894{
895 return ret;
896}
897
380c4b14
FW
898static inline void pause_graph_tracing(void) { }
899static inline void unpause_graph_tracing(void) { }
5ac9f622 900#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
caf4b323 901
ea4e2bc4 902#ifdef CONFIG_TRACING
ea4e2bc4
SR
903
904/* flags for current->trace */
905enum {
906 TSK_TRACE_FL_TRACE_BIT = 0,
907 TSK_TRACE_FL_GRAPH_BIT = 1,
908};
909enum {
910 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
911 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
912};
913
914static inline void set_tsk_trace_trace(struct task_struct *tsk)
915{
916 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
917}
918
919static inline void clear_tsk_trace_trace(struct task_struct *tsk)
920{
921 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
922}
923
924static inline int test_tsk_trace_trace(struct task_struct *tsk)
925{
926 return tsk->trace & TSK_TRACE_FL_TRACE;
927}
928
929static inline void set_tsk_trace_graph(struct task_struct *tsk)
930{
931 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
932}
933
934static inline void clear_tsk_trace_graph(struct task_struct *tsk)
935{
936 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
937}
938
939static inline int test_tsk_trace_graph(struct task_struct *tsk)
940{
941 return tsk->trace & TSK_TRACE_FL_GRAPH;
942}
943
cecbca96
FW
944enum ftrace_dump_mode;
945
946extern enum ftrace_dump_mode ftrace_dump_on_oops;
0daa2302 947extern int tracepoint_printk;
526211bc 948
de7edd31
SRRH
949extern void disable_trace_on_warning(void);
950extern int __disable_trace_on_warning;
951
261842b7
SR
952#ifdef CONFIG_PREEMPT
953#define INIT_TRACE_RECURSION .trace_recursion = 0,
954#endif
955
42391745
SRRH
956int tracepoint_printk_sysctl(struct ctl_table *table, int write,
957 void __user *buffer, size_t *lenp,
958 loff_t *ppos);
959
de7edd31
SRRH
960#else /* CONFIG_TRACING */
961static inline void disable_trace_on_warning(void) { }
ea4e2bc4
SR
962#endif /* CONFIG_TRACING */
963
261842b7
SR
964#ifndef INIT_TRACE_RECURSION
965#define INIT_TRACE_RECURSION
966#endif
b1818748 967
e7b8e675
MF
968#ifdef CONFIG_FTRACE_SYSCALLS
969
970unsigned long arch_syscall_addr(int nr);
971
972#endif /* CONFIG_FTRACE_SYSCALLS */
973
16444a8a 974#endif /* _LINUX_FTRACE_H */