]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/mips/kernel/ftrace.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[mirror_ubuntu-bionic-kernel.git] / arch / mips / kernel / ftrace.c
1 /*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7 *
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
9 */
10
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19
20 #include <asm-generic/sections.h>
21
22 #ifdef CONFIG_DYNAMIC_FTRACE
23
24 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
25 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
26
27 #define INSN_NOP 0x00000000 /* nop */
28 #define INSN_JAL(addr) \
29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
30
31 static unsigned int insn_jal_ftrace_caller __read_mostly;
32 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
33 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
34
35 static inline void ftrace_dyn_arch_init_insns(void)
36 {
37 u32 *buf;
38 unsigned int v1;
39
40 /* lui v1, hi16_mcount */
41 v1 = 3;
42 buf = (u32 *)&insn_lui_v1_hi16_mcount;
43 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
44
45 /* jal (ftrace_caller + 8), jump over the first two instruction */
46 buf = (u32 *)&insn_jal_ftrace_caller;
47 uasm_i_jal(&buf, (FTRACE_ADDR + 8));
48
49 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
50 /* j ftrace_graph_caller */
51 buf = (u32 *)&insn_j_ftrace_graph_caller;
52 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
53 #endif
54 }
55
56 /*
57 * Check if the address is in kernel space
58 *
59 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
60 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
61 */
62 static inline int in_kernel_space(unsigned long ip)
63 {
64 if (ip >= (unsigned long)_stext &&
65 ip <= (unsigned long)_etext)
66 return 1;
67 return 0;
68 }
69
70 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
71 {
72 int faulted;
73
74 /* *(unsigned int *)ip = new_code; */
75 safe_store_code(new_code, ip, faulted);
76
77 if (unlikely(faulted))
78 return -EFAULT;
79
80 flush_icache_range(ip, ip + 8);
81
82 return 0;
83 }
84
85 /*
86 * The details about the calling site of mcount on MIPS
87 *
88 * 1. For kernel:
89 *
90 * move at, ra
91 * jal _mcount --> nop
92 *
93 * 2. For modules:
94 *
95 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
96 *
97 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
98 * addiu v1, v1, low_16bit_of_mcount
99 * move at, ra
100 * move $12, ra_address
101 * jalr v1
102 * sub sp, sp, 8
103 * 1: offset = 5 instructions
104 * 2.2 For the Other situations
105 *
106 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
107 * addiu v1, v1, low_16bit_of_mcount
108 * move at, ra
109 * jalr v1
110 * nop | move $12, ra_address | sub sp, sp, 8
111 * 1: offset = 4 instructions
112 */
113
114 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
115 #define MCOUNT_OFFSET_INSNS 5
116 #else
117 #define MCOUNT_OFFSET_INSNS 4
118 #endif
119 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
120
121 int ftrace_make_nop(struct module *mod,
122 struct dyn_ftrace *rec, unsigned long addr)
123 {
124 unsigned int new;
125 unsigned long ip = rec->ip;
126
127 /*
128 * If ip is in kernel space, no long call, otherwise, long call is
129 * needed.
130 */
131 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
132
133 return ftrace_modify_code(ip, new);
134 }
135
136 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
137 {
138 unsigned int new;
139 unsigned long ip = rec->ip;
140
141 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
142 insn_lui_v1_hi16_mcount;
143
144 return ftrace_modify_code(ip, new);
145 }
146
147 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
148
149 int ftrace_update_ftrace_func(ftrace_func_t func)
150 {
151 unsigned int new;
152
153 new = INSN_JAL((unsigned long)func);
154
155 return ftrace_modify_code(FTRACE_CALL_IP, new);
156 }
157
158 int __init ftrace_dyn_arch_init(void *data)
159 {
160 /* Encode the instructions when booting */
161 ftrace_dyn_arch_init_insns();
162
163 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
164 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
165
166 /* The return code is retured via data */
167 *(unsigned long *)data = 0;
168
169 return 0;
170 }
171 #endif /* CONFIG_DYNAMIC_FTRACE */
172
173 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
174
175 #ifdef CONFIG_DYNAMIC_FTRACE
176
177 extern void ftrace_graph_call(void);
178 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
179
180 int ftrace_enable_ftrace_graph_caller(void)
181 {
182 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
183 insn_j_ftrace_graph_caller);
184 }
185
186 int ftrace_disable_ftrace_graph_caller(void)
187 {
188 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
189 }
190
191 #endif /* CONFIG_DYNAMIC_FTRACE */
192
193 #ifndef KBUILD_MCOUNT_RA_ADDRESS
194
195 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
196 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
197 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
198
199 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
200 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
201 {
202 unsigned long sp, ip, tmp;
203 unsigned int code;
204 int faulted;
205
206 /*
207 * For module, move the ip from the return address after the
208 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
209 * kernel, move after the instruction "move ra, at"(offset is 16)
210 */
211 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
212
213 /*
214 * search the text until finding the non-store instruction or "s{d,w}
215 * ra, offset(sp)" instruction
216 */
217 do {
218 /* get the code at "ip": code = *(unsigned int *)ip; */
219 safe_load_code(code, ip, faulted);
220
221 if (unlikely(faulted))
222 return 0;
223 /*
224 * If we hit the non-store instruction before finding where the
225 * ra is stored, then this is a leaf function and it does not
226 * store the ra on the stack
227 */
228 if ((code & S_R_SP) != S_R_SP)
229 return parent_ra_addr;
230
231 /* Move to the next instruction */
232 ip -= 4;
233 } while ((code & S_RA_SP) != S_RA_SP);
234
235 sp = fp + (code & OFFSET_MASK);
236
237 /* tmp = *(unsigned long *)sp; */
238 safe_load_stack(tmp, sp, faulted);
239 if (unlikely(faulted))
240 return 0;
241
242 if (tmp == old_parent_ra)
243 return sp;
244 return 0;
245 }
246
247 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */
248
249 /*
250 * Hook the return address and push it in the stack of return addrs
251 * in current thread info.
252 */
253 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
254 unsigned long fp)
255 {
256 unsigned long old_parent_ra;
257 struct ftrace_graph_ent trace;
258 unsigned long return_hooker = (unsigned long)
259 &return_to_handler;
260 int faulted, insns;
261
262 if (unlikely(atomic_read(&current->tracing_graph_pause)))
263 return;
264
265 /*
266 * "parent_ra_addr" is the stack address saved the return address of
267 * the caller of _mcount.
268 *
269 * if the gcc < 4.5, a leaf function does not save the return address
270 * in the stack address, so, we "emulate" one in _mcount's stack space,
271 * and hijack it directly, but for a non-leaf function, it save the
272 * return address to the its own stack space, we can not hijack it
273 * directly, but need to find the real stack address,
274 * ftrace_get_parent_addr() does it!
275 *
276 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
277 * non-leaf function, the location of the return address will be saved
278 * to $12 for us, and for a leaf function, only put a zero into $12. we
279 * do it in ftrace_graph_caller of mcount.S.
280 */
281
282 /* old_parent_ra = *parent_ra_addr; */
283 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
284 if (unlikely(faulted))
285 goto out;
286 #ifndef KBUILD_MCOUNT_RA_ADDRESS
287 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
288 old_parent_ra, (unsigned long)parent_ra_addr, fp);
289 /*
290 * If fails when getting the stack address of the non-leaf function's
291 * ra, stop function graph tracer and return
292 */
293 if (parent_ra_addr == 0)
294 goto out;
295 #endif
296 /* *parent_ra_addr = return_hooker; */
297 safe_store_stack(return_hooker, parent_ra_addr, faulted);
298 if (unlikely(faulted))
299 goto out;
300
301 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
302 == -EBUSY) {
303 *parent_ra_addr = old_parent_ra;
304 return;
305 }
306
307 /*
308 * Get the recorded ip of the current mcount calling site in the
309 * __mcount_loc section, which will be used to filter the function
310 * entries configured through the tracing/set_graph_function interface.
311 */
312
313 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
314 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
315
316 /* Only trace if the calling function expects to */
317 if (!ftrace_graph_entry(&trace)) {
318 current->curr_ret_stack--;
319 *parent_ra_addr = old_parent_ra;
320 }
321 return;
322 out:
323 ftrace_graph_stop();
324 WARN_ON(1);
325 }
326 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */