]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/ftrace.c
6bb479ce1ae4af48019f8418d4941631ac99453b
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / ftrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/memory.h>
26 #include <linux/vmalloc.h>
27
28 #include <trace/syscall.h>
29
30 #include <asm/set_memory.h>
31 #include <asm/kprobes.h>
32 #include <asm/ftrace.h>
33 #include <asm/nops.h>
34 #include <asm/text-patching.h>
35
36 #ifdef CONFIG_DYNAMIC_FTRACE
37
38 static int ftrace_poke_late = 0;
39
40 int ftrace_arch_code_modify_prepare(void)
41 __acquires(&text_mutex)
42 {
43 /*
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
47 */
48 mutex_lock(&text_mutex);
49 ftrace_poke_late = 1;
50 return 0;
51 }
52
53 int ftrace_arch_code_modify_post_process(void)
54 __releases(&text_mutex)
55 {
56 /*
57 * ftrace_make_{call,nop}() may be called during
58 * module load, and we need to finish the text_poke_queue()
59 * that they do, here.
60 */
61 text_poke_finish();
62 ftrace_poke_late = 0;
63 mutex_unlock(&text_mutex);
64 return 0;
65 }
66
67 static const char *ftrace_nop_replace(void)
68 {
69 return x86_nops[5];
70 }
71
72 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
73 {
74 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
75 }
76
77 static int ftrace_verify_code(unsigned long ip, const char *old_code)
78 {
79 char cur_code[MCOUNT_INSN_SIZE];
80
81 /*
82 * Note:
83 * We are paranoid about modifying text, as if a bug was to happen, it
84 * could cause us to read or write to someplace that could cause harm.
85 * Carefully read and modify the code with probe_kernel_*(), and make
86 * sure what we read is what we expected it to be before modifying it.
87 */
88 /* read the text we want to modify */
89 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
90 WARN_ON(1);
91 return -EFAULT;
92 }
93
94 /* Make sure it is what we expect it to be */
95 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
96 WARN_ON(1);
97 return -EINVAL;
98 }
99
100 return 0;
101 }
102
103 /*
104 * Marked __ref because it calls text_poke_early() which is .init.text. That is
105 * ok because that call will happen early, during boot, when .init sections are
106 * still present.
107 */
108 static int __ref
109 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
110 const char *new_code)
111 {
112 int ret = ftrace_verify_code(ip, old_code);
113 if (ret)
114 return ret;
115
116 /* replace the text with the new text */
117 if (ftrace_poke_late)
118 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
119 else
120 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
121 return 0;
122 }
123
124 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
125 {
126 unsigned long ip = rec->ip;
127 const char *new, *old;
128
129 old = ftrace_call_replace(ip, addr);
130 new = ftrace_nop_replace();
131
132 /*
133 * On boot up, and when modules are loaded, the MCOUNT_ADDR
134 * is converted to a nop, and will never become MCOUNT_ADDR
135 * again. This code is either running before SMP (on boot up)
136 * or before the code will ever be executed (module load).
137 * We do not want to use the breakpoint version in this case,
138 * just modify the code directly.
139 */
140 if (addr == MCOUNT_ADDR)
141 return ftrace_modify_code_direct(ip, old, new);
142
143 /*
144 * x86 overrides ftrace_replace_code -- this function will never be used
145 * in this case.
146 */
147 WARN_ONCE(1, "invalid use of ftrace_make_nop");
148 return -EINVAL;
149 }
150
151 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
152 {
153 unsigned long ip = rec->ip;
154 const char *new, *old;
155
156 old = ftrace_nop_replace();
157 new = ftrace_call_replace(ip, addr);
158
159 /* Should only be called when module is loaded */
160 return ftrace_modify_code_direct(rec->ip, old, new);
161 }
162
163 /*
164 * Should never be called:
165 * As it is only called by __ftrace_replace_code() which is called by
166 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
167 * which is called to turn mcount into nops or nops into function calls
168 * but not to convert a function from not using regs to one that uses
169 * regs, which ftrace_modify_call() is for.
170 */
171 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
172 unsigned long addr)
173 {
174 WARN_ON(1);
175 return -EINVAL;
176 }
177
178 int ftrace_update_ftrace_func(ftrace_func_t func)
179 {
180 unsigned long ip;
181 const char *new;
182
183 ip = (unsigned long)(&ftrace_call);
184 new = ftrace_call_replace(ip, (unsigned long)func);
185 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
186
187 ip = (unsigned long)(&ftrace_regs_call);
188 new = ftrace_call_replace(ip, (unsigned long)func);
189 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
190
191 return 0;
192 }
193
194 void ftrace_replace_code(int enable)
195 {
196 struct ftrace_rec_iter *iter;
197 struct dyn_ftrace *rec;
198 const char *new, *old;
199 int ret;
200
201 for_ftrace_rec_iter(iter) {
202 rec = ftrace_rec_iter_record(iter);
203
204 switch (ftrace_test_record(rec, enable)) {
205 case FTRACE_UPDATE_IGNORE:
206 default:
207 continue;
208
209 case FTRACE_UPDATE_MAKE_CALL:
210 old = ftrace_nop_replace();
211 break;
212
213 case FTRACE_UPDATE_MODIFY_CALL:
214 case FTRACE_UPDATE_MAKE_NOP:
215 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
216 break;
217 }
218
219 ret = ftrace_verify_code(rec->ip, old);
220 if (ret) {
221 ftrace_bug(ret, rec);
222 return;
223 }
224 }
225
226 for_ftrace_rec_iter(iter) {
227 rec = ftrace_rec_iter_record(iter);
228
229 switch (ftrace_test_record(rec, enable)) {
230 case FTRACE_UPDATE_IGNORE:
231 default:
232 continue;
233
234 case FTRACE_UPDATE_MAKE_CALL:
235 case FTRACE_UPDATE_MODIFY_CALL:
236 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
237 break;
238
239 case FTRACE_UPDATE_MAKE_NOP:
240 new = ftrace_nop_replace();
241 break;
242 }
243
244 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
245 ftrace_update_record(rec, enable);
246 }
247 text_poke_finish();
248 }
249
250 void arch_ftrace_update_code(int command)
251 {
252 ftrace_modify_all_code(command);
253 }
254
255 int __init ftrace_dyn_arch_init(void)
256 {
257 return 0;
258 }
259
260 /* Currently only x86_64 supports dynamic trampolines */
261 #ifdef CONFIG_X86_64
262
263 #ifdef CONFIG_MODULES
264 #include <linux/moduleloader.h>
265 /* Module allocation simplifies allocating memory for code */
266 static inline void *alloc_tramp(unsigned long size)
267 {
268 return module_alloc(size);
269 }
270 static inline void tramp_free(void *tramp)
271 {
272 module_memfree(tramp);
273 }
274 #else
275 /* Trampolines can only be created if modules are supported */
276 static inline void *alloc_tramp(unsigned long size)
277 {
278 return NULL;
279 }
280 static inline void tramp_free(void *tramp) { }
281 #endif
282
283 /* Defined as markers to the end of the ftrace default trampolines */
284 extern void ftrace_regs_caller_end(void);
285 extern void ftrace_regs_caller_ret(void);
286 extern void ftrace_caller_end(void);
287 extern void ftrace_caller_op_ptr(void);
288 extern void ftrace_regs_caller_op_ptr(void);
289 extern void ftrace_regs_caller_jmp(void);
290
291 /* movq function_trace_op(%rip), %rdx */
292 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
293 #define OP_REF_SIZE 7
294
295 /*
296 * The ftrace_ops is passed to the function callback. Since the
297 * trampoline only services a single ftrace_ops, we can pass in
298 * that ops directly.
299 *
300 * The ftrace_op_code_union is used to create a pointer to the
301 * ftrace_ops that will be passed to the callback function.
302 */
303 union ftrace_op_code_union {
304 char code[OP_REF_SIZE];
305 struct {
306 char op[3];
307 int offset;
308 } __attribute__((packed));
309 };
310
311 #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
312
313 static unsigned long
314 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
315 {
316 unsigned long start_offset;
317 unsigned long end_offset;
318 unsigned long op_offset;
319 unsigned long call_offset;
320 unsigned long jmp_offset;
321 unsigned long offset;
322 unsigned long npages;
323 unsigned long size;
324 unsigned long *ptr;
325 void *trampoline;
326 void *ip;
327 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
328 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
329 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
330 union ftrace_op_code_union op_ptr;
331 int ret;
332
333 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334 start_offset = (unsigned long)ftrace_regs_caller;
335 end_offset = (unsigned long)ftrace_regs_caller_end;
336 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
337 call_offset = (unsigned long)ftrace_regs_call;
338 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
339 } else {
340 start_offset = (unsigned long)ftrace_caller;
341 end_offset = (unsigned long)ftrace_caller_end;
342 op_offset = (unsigned long)ftrace_caller_op_ptr;
343 call_offset = (unsigned long)ftrace_call;
344 jmp_offset = 0;
345 }
346
347 size = end_offset - start_offset;
348
349 /*
350 * Allocate enough size to store the ftrace_caller code,
351 * the iret , as well as the address of the ftrace_ops this
352 * trampoline is used for.
353 */
354 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
355 if (!trampoline)
356 return 0;
357
358 *tramp_size = size + RET_SIZE + sizeof(void *);
359 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
360
361 /* Copy ftrace_caller onto the trampoline memory */
362 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
363 if (WARN_ON(ret < 0))
364 goto fail;
365
366 ip = trampoline + size;
367 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
368 memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE);
369 else
370 memcpy(ip, retq, sizeof(retq));
371
372 /* No need to test direct calls on created trampolines */
373 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
374 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
375 ip = trampoline + (jmp_offset - start_offset);
376 if (WARN_ON(*(char *)ip != 0x75))
377 goto fail;
378 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
379 if (ret < 0)
380 goto fail;
381 }
382
383 /*
384 * The address of the ftrace_ops that is used for this trampoline
385 * is stored at the end of the trampoline. This will be used to
386 * load the third parameter for the callback. Basically, that
387 * location at the end of the trampoline takes the place of
388 * the global function_trace_op variable.
389 */
390
391 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
392 *ptr = (unsigned long)ops;
393
394 op_offset -= start_offset;
395 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
396
397 /* Are we pointing to the reference? */
398 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
399 goto fail;
400
401 /* Load the contents of ptr into the callback parameter */
402 offset = (unsigned long)ptr;
403 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
404
405 op_ptr.offset = offset;
406
407 /* put in the new offset to the ftrace_ops */
408 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
409
410 /* put in the call to the function */
411 mutex_lock(&text_mutex);
412 call_offset -= start_offset;
413 memcpy(trampoline + call_offset,
414 text_gen_insn(CALL_INSN_OPCODE,
415 trampoline + call_offset,
416 ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
417 mutex_unlock(&text_mutex);
418
419 /* ALLOC_TRAMP flags lets us know we created it */
420 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
421
422 set_vm_flush_reset_perms(trampoline);
423
424 if (likely(system_state != SYSTEM_BOOTING))
425 set_memory_ro((unsigned long)trampoline, npages);
426 set_memory_x((unsigned long)trampoline, npages);
427 return (unsigned long)trampoline;
428 fail:
429 tramp_free(trampoline);
430 return 0;
431 }
432
433 void set_ftrace_ops_ro(void)
434 {
435 struct ftrace_ops *ops;
436 unsigned long start_offset;
437 unsigned long end_offset;
438 unsigned long npages;
439 unsigned long size;
440
441 do_for_each_ftrace_op(ops, ftrace_ops_list) {
442 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
443 continue;
444
445 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
446 start_offset = (unsigned long)ftrace_regs_caller;
447 end_offset = (unsigned long)ftrace_regs_caller_end;
448 } else {
449 start_offset = (unsigned long)ftrace_caller;
450 end_offset = (unsigned long)ftrace_caller_end;
451 }
452 size = end_offset - start_offset;
453 size = size + RET_SIZE + sizeof(void *);
454 npages = DIV_ROUND_UP(size, PAGE_SIZE);
455 set_memory_ro((unsigned long)ops->trampoline, npages);
456 } while_for_each_ftrace_op(ops);
457 }
458
459 static unsigned long calc_trampoline_call_offset(bool save_regs)
460 {
461 unsigned long start_offset;
462 unsigned long call_offset;
463
464 if (save_regs) {
465 start_offset = (unsigned long)ftrace_regs_caller;
466 call_offset = (unsigned long)ftrace_regs_call;
467 } else {
468 start_offset = (unsigned long)ftrace_caller;
469 call_offset = (unsigned long)ftrace_call;
470 }
471
472 return call_offset - start_offset;
473 }
474
475 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
476 {
477 ftrace_func_t func;
478 unsigned long offset;
479 unsigned long ip;
480 unsigned int size;
481 const char *new;
482
483 if (!ops->trampoline) {
484 ops->trampoline = create_trampoline(ops, &size);
485 if (!ops->trampoline)
486 return;
487 ops->trampoline_size = size;
488 return;
489 }
490
491 /*
492 * The ftrace_ops caller may set up its own trampoline.
493 * In such a case, this code must not modify it.
494 */
495 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
496 return;
497
498 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
499 ip = ops->trampoline + offset;
500 func = ftrace_ops_get_func(ops);
501
502 mutex_lock(&text_mutex);
503 /* Do a safe modify in case the trampoline is executing */
504 new = ftrace_call_replace(ip, (unsigned long)func);
505 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
506 mutex_unlock(&text_mutex);
507 }
508
509 /* Return the address of the function the trampoline calls */
510 static void *addr_from_call(void *ptr)
511 {
512 union text_poke_insn call;
513 int ret;
514
515 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
516 if (WARN_ON_ONCE(ret < 0))
517 return NULL;
518
519 /* Make sure this is a call */
520 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
521 pr_warn("Expected E8, got %x\n", call.opcode);
522 return NULL;
523 }
524
525 return ptr + CALL_INSN_SIZE + call.disp;
526 }
527
528 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
529 unsigned long frame_pointer);
530
531 /*
532 * If the ops->trampoline was not allocated, then it probably
533 * has a static trampoline func, or is the ftrace caller itself.
534 */
535 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
536 {
537 unsigned long offset;
538 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
539 void *ptr;
540
541 if (ops && ops->trampoline) {
542 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
543 /*
544 * We only know about function graph tracer setting as static
545 * trampoline.
546 */
547 if (ops->trampoline == FTRACE_GRAPH_ADDR)
548 return (void *)prepare_ftrace_return;
549 #endif
550 return NULL;
551 }
552
553 offset = calc_trampoline_call_offset(save_regs);
554
555 if (save_regs)
556 ptr = (void *)FTRACE_REGS_ADDR + offset;
557 else
558 ptr = (void *)FTRACE_ADDR + offset;
559
560 return addr_from_call(ptr);
561 }
562
563 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
564 {
565 unsigned long offset;
566
567 /* If we didn't allocate this trampoline, consider it static */
568 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
569 return static_tramp_func(ops, rec);
570
571 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
572 return addr_from_call((void *)ops->trampoline + offset);
573 }
574
575 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
576 {
577 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
578 return;
579
580 tramp_free((void *)ops->trampoline);
581 ops->trampoline = 0;
582 }
583
584 #endif /* CONFIG_X86_64 */
585 #endif /* CONFIG_DYNAMIC_FTRACE */
586
587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
588
589 #ifdef CONFIG_DYNAMIC_FTRACE
590 extern void ftrace_graph_call(void);
591
592 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
593 {
594 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
595 }
596
597 static int ftrace_mod_jmp(unsigned long ip, void *func)
598 {
599 const char *new;
600
601 new = ftrace_jmp_replace(ip, (unsigned long)func);
602 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
603 return 0;
604 }
605
606 int ftrace_enable_ftrace_graph_caller(void)
607 {
608 unsigned long ip = (unsigned long)(&ftrace_graph_call);
609
610 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
611 }
612
613 int ftrace_disable_ftrace_graph_caller(void)
614 {
615 unsigned long ip = (unsigned long)(&ftrace_graph_call);
616
617 return ftrace_mod_jmp(ip, &ftrace_stub);
618 }
619
620 #endif /* !CONFIG_DYNAMIC_FTRACE */
621
622 /*
623 * Hook the return address and push it in the stack of return addrs
624 * in current thread info.
625 */
626 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
627 unsigned long frame_pointer)
628 {
629 unsigned long return_hooker = (unsigned long)&return_to_handler;
630 unsigned long old;
631 int faulted;
632
633 /*
634 * When resuming from suspend-to-ram, this function can be indirectly
635 * called from early CPU startup code while the CPU is in real mode,
636 * which would fail miserably. Make sure the stack pointer is a
637 * virtual address.
638 *
639 * This check isn't as accurate as virt_addr_valid(), but it should be
640 * good enough for this purpose, and it's fast.
641 */
642 if (unlikely((long)__builtin_frame_address(0) >= 0))
643 return;
644
645 if (unlikely(ftrace_graph_is_dead()))
646 return;
647
648 if (unlikely(atomic_read(&current->tracing_graph_pause)))
649 return;
650
651 /*
652 * Protect against fault, even if it shouldn't
653 * happen. This tool is too much intrusive to
654 * ignore such a protection.
655 */
656 asm volatile(
657 "1: " _ASM_MOV " (%[parent]), %[old]\n"
658 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
659 " movl $0, %[faulted]\n"
660 "3:\n"
661
662 ".section .fixup, \"ax\"\n"
663 "4: movl $1, %[faulted]\n"
664 " jmp 3b\n"
665 ".previous\n"
666
667 _ASM_EXTABLE(1b, 4b)
668 _ASM_EXTABLE(2b, 4b)
669
670 : [old] "=&r" (old), [faulted] "=r" (faulted)
671 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
672 : "memory"
673 );
674
675 if (unlikely(faulted)) {
676 ftrace_graph_stop();
677 WARN_ON(1);
678 return;
679 }
680
681 if (function_graph_enter(old, self_addr, frame_pointer, parent))
682 *parent = old;
683 }
684 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */