]>
Commit | Line | Data |
---|---|---|
3d083395 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
9 | * the dangers of modifying code on the run. | |
10 | */ | |
11 | ||
12 | #include <linux/spinlock.h> | |
13 | #include <linux/hardirq.h> | |
6f93fc07 | 14 | #include <linux/uaccess.h> |
3d083395 SR |
15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | |
19b3e967 | 17 | #include <linux/sched.h> |
3d083395 SR |
18 | #include <linux/init.h> |
19 | #include <linux/list.h> | |
20 | ||
395a59d0 | 21 | #include <asm/ftrace.h> |
caf4b323 | 22 | #include <linux/ftrace.h> |
732f3ca7 | 23 | #include <asm/nops.h> |
caf4b323 | 24 | #include <asm/nmi.h> |
3d083395 | 25 | |
3d083395 | 26 | |
caf4b323 | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
3d083395 | 28 | |
3d083395 | 29 | union ftrace_code_union { |
395a59d0 | 30 | char code[MCOUNT_INSN_SIZE]; |
3d083395 SR |
31 | struct { |
32 | char e8; | |
33 | int offset; | |
34 | } __attribute__((packed)); | |
35 | }; | |
36 | ||
15adc048 | 37 | static int ftrace_calc_offset(long ip, long addr) |
3c1720f0 SR |
38 | { |
39 | return (int)(addr - ip); | |
40 | } | |
3d083395 | 41 | |
31e88909 | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
3c1720f0 SR |
43 | { |
44 | static union ftrace_code_union calc; | |
3d083395 | 45 | |
3c1720f0 | 46 | calc.e8 = 0xe8; |
395a59d0 | 47 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
3c1720f0 SR |
48 | |
49 | /* | |
50 | * No locking needed, this must be called via kstop_machine | |
51 | * which in essence is like running on a uniprocessor machine. | |
52 | */ | |
53 | return calc.code; | |
3d083395 SR |
54 | } |
55 | ||
17666f02 SR |
56 | /* |
57 | * Modifying code must take extra care. On an SMP machine, if | |
58 | * the code being modified is also being executed on another CPU | |
59 | * that CPU will have undefined results and possibly take a GPF. | |
60 | * We use kstop_machine to stop other CPUS from exectuing code. | |
61 | * But this does not stop NMIs from happening. We still need | |
62 | * to protect against that. We separate out the modification of | |
63 | * the code to take care of this. | |
64 | * | |
65 | * Two buffers are added: An IP buffer and a "code" buffer. | |
66 | * | |
a26a2a27 | 67 | * 1) Put the instruction pointer into the IP buffer |
17666f02 SR |
68 | * and the new code into the "code" buffer. |
69 | * 2) Set a flag that says we are modifying code | |
70 | * 3) Wait for any running NMIs to finish. | |
71 | * 4) Write the code | |
72 | * 5) clear the flag. | |
73 | * 6) Wait for any running NMIs to finish. | |
74 | * | |
75 | * If an NMI is executed, the first thing it does is to call | |
76 | * "ftrace_nmi_enter". This will check if the flag is set to write | |
77 | * and if it is, it will write what is in the IP and "code" buffers. | |
78 | * | |
79 | * The trick is, it does not matter if everyone is writing the same | |
80 | * content to the code location. Also, if a CPU is executing code | |
81 | * it is OK to write to that code location if the contents being written | |
82 | * are the same as what exists. | |
83 | */ | |
84 | ||
a26a2a27 SR |
85 | static atomic_t in_nmi = ATOMIC_INIT(0); |
86 | static int mod_code_status; /* holds return value of text write */ | |
87 | static int mod_code_write; /* set when NMI should do the write */ | |
88 | static void *mod_code_ip; /* holds the IP to write to */ | |
89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | |
17666f02 | 90 | |
a26a2a27 SR |
91 | static unsigned nmi_wait_count; |
92 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | |
b807c3d0 SR |
93 | |
94 | int ftrace_arch_read_dyn_info(char *buf, int size) | |
95 | { | |
96 | int r; | |
97 | ||
98 | r = snprintf(buf, size, "%u %u", | |
99 | nmi_wait_count, | |
100 | atomic_read(&nmi_update_count)); | |
101 | return r; | |
102 | } | |
103 | ||
17666f02 SR |
104 | static void ftrace_mod_code(void) |
105 | { | |
106 | /* | |
107 | * Yes, more than one CPU process can be writing to mod_code_status. | |
108 | * (and the code itself) | |
109 | * But if one were to fail, then they all should, and if one were | |
110 | * to succeed, then they all should. | |
111 | */ | |
112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | |
113 | MCOUNT_INSN_SIZE); | |
17666f02 SR |
114 | } |
115 | ||
116 | void ftrace_nmi_enter(void) | |
117 | { | |
118 | atomic_inc(&in_nmi); | |
119 | /* Must have in_nmi seen before reading write flag */ | |
120 | smp_mb(); | |
b807c3d0 | 121 | if (mod_code_write) { |
17666f02 | 122 | ftrace_mod_code(); |
b807c3d0 SR |
123 | atomic_inc(&nmi_update_count); |
124 | } | |
17666f02 SR |
125 | } |
126 | ||
127 | void ftrace_nmi_exit(void) | |
128 | { | |
129 | /* Finish all executions before clearing in_nmi */ | |
130 | smp_wmb(); | |
131 | atomic_dec(&in_nmi); | |
132 | } | |
133 | ||
134 | static void wait_for_nmi(void) | |
135 | { | |
b807c3d0 SR |
136 | int waited = 0; |
137 | ||
138 | while (atomic_read(&in_nmi)) { | |
139 | waited = 1; | |
17666f02 | 140 | cpu_relax(); |
b807c3d0 SR |
141 | } |
142 | ||
143 | if (waited) | |
144 | nmi_wait_count++; | |
17666f02 SR |
145 | } |
146 | ||
147 | static int | |
148 | do_ftrace_mod_code(unsigned long ip, void *new_code) | |
149 | { | |
150 | mod_code_ip = (void *)ip; | |
151 | mod_code_newcode = new_code; | |
152 | ||
153 | /* The buffers need to be visible before we let NMIs write them */ | |
154 | smp_wmb(); | |
155 | ||
156 | mod_code_write = 1; | |
157 | ||
158 | /* Make sure write bit is visible before we wait on NMIs */ | |
159 | smp_mb(); | |
160 | ||
161 | wait_for_nmi(); | |
162 | ||
163 | /* Make sure all running NMIs have finished before we write the code */ | |
164 | smp_mb(); | |
165 | ||
166 | ftrace_mod_code(); | |
167 | ||
168 | /* Make sure the write happens before clearing the bit */ | |
169 | smp_wmb(); | |
170 | ||
171 | mod_code_write = 0; | |
172 | ||
173 | /* make sure NMIs see the cleared bit */ | |
174 | smp_mb(); | |
175 | ||
176 | wait_for_nmi(); | |
177 | ||
178 | return mod_code_status; | |
179 | } | |
180 | ||
181 | ||
caf4b323 FW |
182 | |
183 | ||
184 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | |
185 | ||
31e88909 | 186 | static unsigned char *ftrace_nop_replace(void) |
caf4b323 FW |
187 | { |
188 | return ftrace_nop; | |
189 | } | |
190 | ||
31e88909 | 191 | static int |
3d083395 SR |
192 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
193 | unsigned char *new_code) | |
194 | { | |
6f93fc07 | 195 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
3d083395 SR |
196 | |
197 | /* | |
198 | * Note: Due to modules and __init, code can | |
199 | * disappear and change, we need to protect against faulting | |
76aefee5 | 200 | * as well as code changing. We do this by using the |
ab9a0918 | 201 | * probe_kernel_* functions. |
3d083395 SR |
202 | * |
203 | * No real locking needed, this code is run through | |
6f93fc07 | 204 | * kstop_machine, or before SMP starts. |
3d083395 | 205 | */ |
76aefee5 SR |
206 | |
207 | /* read the text we want to modify */ | |
ab9a0918 | 208 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
593eb8a2 | 209 | return -EFAULT; |
6f93fc07 | 210 | |
76aefee5 | 211 | /* Make sure it is what we expect it to be */ |
6f93fc07 | 212 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
593eb8a2 | 213 | return -EINVAL; |
3d083395 | 214 | |
76aefee5 | 215 | /* replace the text with the new text */ |
17666f02 | 216 | if (do_ftrace_mod_code(ip, new_code)) |
593eb8a2 | 217 | return -EPERM; |
6f93fc07 SR |
218 | |
219 | sync_core(); | |
3d083395 | 220 | |
6f93fc07 | 221 | return 0; |
3d083395 SR |
222 | } |
223 | ||
31e88909 SR |
224 | int ftrace_make_nop(struct module *mod, |
225 | struct dyn_ftrace *rec, unsigned long addr) | |
226 | { | |
227 | unsigned char *new, *old; | |
228 | unsigned long ip = rec->ip; | |
229 | ||
230 | old = ftrace_call_replace(ip, addr); | |
231 | new = ftrace_nop_replace(); | |
232 | ||
233 | return ftrace_modify_code(rec->ip, old, new); | |
234 | } | |
235 | ||
236 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
237 | { | |
238 | unsigned char *new, *old; | |
239 | unsigned long ip = rec->ip; | |
240 | ||
241 | old = ftrace_nop_replace(); | |
242 | new = ftrace_call_replace(ip, addr); | |
243 | ||
244 | return ftrace_modify_code(rec->ip, old, new); | |
245 | } | |
246 | ||
15adc048 | 247 | int ftrace_update_ftrace_func(ftrace_func_t func) |
d61f82d0 SR |
248 | { |
249 | unsigned long ip = (unsigned long)(&ftrace_call); | |
395a59d0 | 250 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
d61f82d0 SR |
251 | int ret; |
252 | ||
395a59d0 | 253 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
d61f82d0 SR |
254 | new = ftrace_call_replace(ip, (unsigned long)func); |
255 | ret = ftrace_modify_code(ip, old, new); | |
256 | ||
257 | return ret; | |
258 | } | |
259 | ||
d61f82d0 | 260 | int __init ftrace_dyn_arch_init(void *data) |
3d083395 | 261 | { |
732f3ca7 SR |
262 | extern const unsigned char ftrace_test_p6nop[]; |
263 | extern const unsigned char ftrace_test_nop5[]; | |
264 | extern const unsigned char ftrace_test_jmp[]; | |
265 | int faulted = 0; | |
d61f82d0 | 266 | |
732f3ca7 SR |
267 | /* |
268 | * There is no good nop for all x86 archs. | |
269 | * We will default to using the P6_NOP5, but first we | |
270 | * will test to make sure that the nop will actually | |
271 | * work on this CPU. If it faults, we will then | |
272 | * go to a lesser efficient 5 byte nop. If that fails | |
273 | * we then just use a jmp as our nop. This isn't the most | |
274 | * efficient nop, but we can not use a multi part nop | |
275 | * since we would then risk being preempted in the middle | |
276 | * of that nop, and if we enabled tracing then, it might | |
277 | * cause a system crash. | |
278 | * | |
279 | * TODO: check the cpuid to determine the best nop. | |
280 | */ | |
281 | asm volatile ( | |
732f3ca7 SR |
282 | "ftrace_test_jmp:" |
283 | "jmp ftrace_test_p6nop\n" | |
8b27386a AK |
284 | "nop\n" |
285 | "nop\n" | |
286 | "nop\n" /* 2 byte jmp + 3 bytes */ | |
732f3ca7 SR |
287 | "ftrace_test_p6nop:" |
288 | P6_NOP5 | |
289 | "jmp 1f\n" | |
290 | "ftrace_test_nop5:" | |
291 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | |
732f3ca7 SR |
292 | "1:" |
293 | ".section .fixup, \"ax\"\n" | |
294 | "2: movl $1, %0\n" | |
295 | " jmp ftrace_test_nop5\n" | |
296 | "3: movl $2, %0\n" | |
297 | " jmp 1b\n" | |
298 | ".previous\n" | |
299 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | |
300 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | |
301 | : "=r"(faulted) : "0" (faulted)); | |
302 | ||
303 | switch (faulted) { | |
304 | case 0: | |
305 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | |
8115f3f0 | 306 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
307 | break; |
308 | case 1: | |
309 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | |
8115f3f0 | 310 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
311 | break; |
312 | case 2: | |
8b27386a | 313 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
8115f3f0 | 314 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
315 | break; |
316 | } | |
317 | ||
318 | /* The return code is retured via data */ | |
319 | *(unsigned long *)data = 0; | |
dfa60aba | 320 | |
3d083395 SR |
321 | return 0; |
322 | } | |
caf4b323 | 323 | #endif |
e7d3737e | 324 | |
fb52607a | 325 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e7d3737e | 326 | |
5a45cfe1 SR |
327 | #ifdef CONFIG_DYNAMIC_FTRACE |
328 | extern void ftrace_graph_call(void); | |
329 | ||
330 | static int ftrace_mod_jmp(unsigned long ip, | |
331 | int old_offset, int new_offset) | |
332 | { | |
333 | unsigned char code[MCOUNT_INSN_SIZE]; | |
334 | ||
335 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | |
336 | return -EFAULT; | |
337 | ||
338 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) | |
339 | return -EINVAL; | |
340 | ||
341 | *(int *)(&code[1]) = new_offset; | |
342 | ||
343 | if (do_ftrace_mod_code(ip, &code)) | |
344 | return -EPERM; | |
345 | ||
346 | return 0; | |
347 | } | |
348 | ||
349 | int ftrace_enable_ftrace_graph_caller(void) | |
350 | { | |
351 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
352 | int old_offset, new_offset; | |
353 | ||
354 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | |
355 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | |
356 | ||
357 | return ftrace_mod_jmp(ip, old_offset, new_offset); | |
358 | } | |
359 | ||
360 | int ftrace_disable_ftrace_graph_caller(void) | |
361 | { | |
362 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
363 | int old_offset, new_offset; | |
364 | ||
365 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | |
366 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | |
367 | ||
368 | return ftrace_mod_jmp(ip, old_offset, new_offset); | |
369 | } | |
370 | ||
371 | #else /* CONFIG_DYNAMIC_FTRACE */ | |
e7d3737e FW |
372 | |
373 | /* | |
374 | * These functions are picked from those used on | |
375 | * this page for dynamic ftrace. They have been | |
376 | * simplified to ignore all traces in NMI context. | |
377 | */ | |
378 | static atomic_t in_nmi; | |
379 | ||
380 | void ftrace_nmi_enter(void) | |
381 | { | |
382 | atomic_inc(&in_nmi); | |
383 | } | |
384 | ||
385 | void ftrace_nmi_exit(void) | |
386 | { | |
387 | atomic_dec(&in_nmi); | |
388 | } | |
5a45cfe1 | 389 | |
e7d3737e FW |
390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
391 | ||
392 | /* Add a function return address to the trace stack on thread info.*/ | |
393 | static int push_return_trace(unsigned long ret, unsigned long long time, | |
287b6e68 | 394 | unsigned long func, int *depth) |
e7d3737e FW |
395 | { |
396 | int index; | |
f201ae23 FW |
397 | |
398 | if (!current->ret_stack) | |
399 | return -EBUSY; | |
e7d3737e FW |
400 | |
401 | /* The return trace stack is full */ | |
f201ae23 FW |
402 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
403 | atomic_inc(¤t->trace_overrun); | |
e7d3737e | 404 | return -EBUSY; |
0231022c | 405 | } |
e7d3737e | 406 | |
f201ae23 | 407 | index = ++current->curr_ret_stack; |
e7d3737e | 408 | barrier(); |
f201ae23 FW |
409 | current->ret_stack[index].ret = ret; |
410 | current->ret_stack[index].func = func; | |
411 | current->ret_stack[index].calltime = time; | |
287b6e68 | 412 | *depth = index; |
e7d3737e FW |
413 | |
414 | return 0; | |
415 | } | |
416 | ||
417 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
287b6e68 | 418 | static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
e7d3737e FW |
419 | { |
420 | int index; | |
421 | ||
f201ae23 FW |
422 | index = current->curr_ret_stack; |
423 | *ret = current->ret_stack[index].ret; | |
287b6e68 FW |
424 | trace->func = current->ret_stack[index].func; |
425 | trace->calltime = current->ret_stack[index].calltime; | |
426 | trace->overrun = atomic_read(¤t->trace_overrun); | |
427 | trace->depth = index; | |
f201ae23 | 428 | current->curr_ret_stack--; |
e7d3737e FW |
429 | } |
430 | ||
431 | /* | |
432 | * Send the trace to the ring-buffer. | |
433 | * @return the original return address. | |
434 | */ | |
435 | unsigned long ftrace_return_to_handler(void) | |
436 | { | |
fb52607a | 437 | struct ftrace_graph_ret trace; |
287b6e68 FW |
438 | unsigned long ret; |
439 | ||
440 | pop_return_trace(&trace, &ret); | |
e7d3737e | 441 | trace.rettime = cpu_clock(raw_smp_processor_id()); |
287b6e68 | 442 | ftrace_graph_return(&trace); |
e7d3737e | 443 | |
287b6e68 | 444 | return ret; |
e7d3737e FW |
445 | } |
446 | ||
447 | /* | |
448 | * Hook the return address and push it in the stack of return addrs | |
449 | * in current thread info. | |
450 | */ | |
451 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
452 | { | |
453 | unsigned long old; | |
454 | unsigned long long calltime; | |
455 | int faulted; | |
287b6e68 | 456 | struct ftrace_graph_ent trace; |
e7d3737e FW |
457 | unsigned long return_hooker = (unsigned long) |
458 | &return_to_handler; | |
459 | ||
460 | /* Nmi's are currently unsupported */ | |
461 | if (atomic_read(&in_nmi)) | |
462 | return; | |
463 | ||
464 | /* | |
465 | * Protect against fault, even if it shouldn't | |
466 | * happen. This tool is too much intrusive to | |
467 | * ignore such a protection. | |
468 | */ | |
469 | asm volatile( | |
347fdd9d SR |
470 | "1: " _ASM_MOV " (%[parent_old]), %[old]\n" |
471 | "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" | |
e7d3737e FW |
472 | " movl $0, %[faulted]\n" |
473 | ||
474 | ".section .fixup, \"ax\"\n" | |
475 | "3: movl $1, %[faulted]\n" | |
476 | ".previous\n" | |
477 | ||
347fdd9d SR |
478 | _ASM_EXTABLE(1b, 3b) |
479 | _ASM_EXTABLE(2b, 3b) | |
e7d3737e FW |
480 | |
481 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | |
482 | [faulted] "=r" (faulted) | |
483 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | |
484 | : "memory" | |
485 | ); | |
486 | ||
14a866c5 SR |
487 | if (unlikely(faulted)) { |
488 | ftrace_graph_stop(); | |
489 | WARN_ON(1); | |
e7d3737e FW |
490 | return; |
491 | } | |
492 | ||
14a866c5 SR |
493 | if (unlikely(!__kernel_text_address(old))) { |
494 | ftrace_graph_stop(); | |
e7d3737e | 495 | *parent = old; |
14a866c5 | 496 | WARN_ON(1); |
e7d3737e FW |
497 | return; |
498 | } | |
499 | ||
500 | calltime = cpu_clock(raw_smp_processor_id()); | |
501 | ||
287b6e68 FW |
502 | if (push_return_trace(old, calltime, |
503 | self_addr, &trace.depth) == -EBUSY) { | |
e7d3737e | 504 | *parent = old; |
287b6e68 FW |
505 | return; |
506 | } | |
507 | ||
508 | trace.func = self_addr; | |
509 | ftrace_graph_entry(&trace); | |
510 | ||
e7d3737e | 511 | } |
fb52607a | 512 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |