]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
35de5b06 AL |
2 | #ifndef _ASM_X86_TEXT_PATCHING_H |
3 | #define _ASM_X86_TEXT_PATCHING_H | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/stddef.h> | |
7 | #include <asm/ptrace.h> | |
8 | ||
9 | struct paravirt_patch_site; | |
10 | #ifdef CONFIG_PARAVIRT | |
11 | void apply_paravirt(struct paravirt_patch_site *start, | |
12 | struct paravirt_patch_site *end); | |
13 | #else | |
14 | static inline void apply_paravirt(struct paravirt_patch_site *start, | |
15 | struct paravirt_patch_site *end) | |
16 | {} | |
17 | #define __parainstructions NULL | |
18 | #define __parainstructions_end NULL | |
19 | #endif | |
20 | ||
21 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | |
22 | ||
23 | /* | |
24 | * Clear and restore the kernel write-protection flag on the local CPU. | |
25 | * Allows the kernel to edit read-only pages. | |
26 | * Side-effect: any interrupt handler running between save and restore will have | |
27 | * the ability to write to read-only pages. | |
28 | * | |
29 | * Warning: | |
30 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | |
31 | * no thread can be preempted in the instructions being modified (no iret to an | |
32 | * invalid instruction possible) or if the instructions are changed from a | |
33 | * consistent state to another consistent state atomically. | |
34 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | |
35 | * inconsistent instruction while you patch. | |
36 | */ | |
37 | extern void *text_poke(void *addr, const void *opcode, size_t len); | |
38 | extern int poke_int3_handler(struct pt_regs *regs); | |
39 | extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); | |
40 | ||
e30ab1af | 41 | #ifndef CONFIG_UML_X86 |
7be82ee8 PZ |
42 | static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) |
43 | { | |
44 | regs->ip = ip; | |
45 | } | |
46 | ||
47 | #define INT3_INSN_SIZE 1 | |
48 | #define CALL_INSN_SIZE 5 | |
49 | ||
50 | #ifdef CONFIG_X86_64 | |
51 | static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) | |
52 | { | |
53 | /* | |
54 | * The int3 handler in entry_64.S adds a gap between the | |
55 | * stack where the break point happened, and the saving of | |
56 | * pt_regs. We can extend the original stack because of | |
57 | * this gap. See the idtentry macro's create_gap option. | |
58 | */ | |
59 | regs->sp -= sizeof(unsigned long); | |
60 | *(unsigned long *)regs->sp = val; | |
61 | } | |
62 | ||
63 | static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) | |
64 | { | |
65 | int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); | |
66 | int3_emulate_jmp(regs, func); | |
67 | } | |
e30ab1af SRV |
68 | #endif /* CONFIG_X86_64 */ |
69 | #endif /* !CONFIG_UML_X86 */ | |
7be82ee8 | 70 | |
35de5b06 | 71 | #endif /* _ASM_X86_TEXT_PATCHING_H */ |