]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c767a54b JP |
2 | #define pr_fmt(fmt) "SMP alternatives: " fmt |
3 | ||
9a0b5817 | 4 | #include <linux/module.h> |
f6a57033 | 5 | #include <linux/sched.h> |
d769811c | 6 | #include <linux/perf_event.h> |
2f1dafe5 | 7 | #include <linux/mutex.h> |
9a0b5817 | 8 | #include <linux/list.h> |
8b5a10fc | 9 | #include <linux/stringify.h> |
ca15ca40 | 10 | #include <linux/highmem.h> |
19d36ccd AK |
11 | #include <linux/mm.h> |
12 | #include <linux/vmalloc.h> | |
3945dab4 | 13 | #include <linux/memory.h> |
3d55cc8a | 14 | #include <linux/stop_machine.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
fd4363ff | 16 | #include <linux/kdebug.h> |
c13324a5 | 17 | #include <linux/kprobes.h> |
b3fd8e83 | 18 | #include <linux/mmu_context.h> |
c0213b0a | 19 | #include <linux/bsearch.h> |
9998a983 | 20 | #include <linux/sync_core.h> |
35de5b06 | 21 | #include <asm/text-patching.h> |
9a0b5817 GH |
22 | #include <asm/alternative.h> |
23 | #include <asm/sections.h> | |
8f4e956b AK |
24 | #include <asm/mce.h> |
25 | #include <asm/nmi.h> | |
e587cadd | 26 | #include <asm/cacheflush.h> |
78ff7fae | 27 | #include <asm/tlbflush.h> |
3a125539 | 28 | #include <asm/insn.h> |
e587cadd | 29 | #include <asm/io.h> |
78ff7fae | 30 | #include <asm/fixmap.h> |
9a0b5817 | 31 | |
5e907bb0 IM |
32 | int __read_mostly alternatives_patched; |
33 | ||
34 | EXPORT_SYMBOL_GPL(alternatives_patched); | |
35 | ||
ab144f5e AK |
36 | #define MAX_PATCH_LEN (255-1) |
37 | ||
8b5a10fc | 38 | static int __initdata_or_module debug_alternative; |
b7fb4af0 | 39 | |
d167a518 GH |
40 | static int __init debug_alt(char *str) |
41 | { | |
42 | debug_alternative = 1; | |
43 | return 1; | |
44 | } | |
d167a518 GH |
45 | __setup("debug-alternative", debug_alt); |
46 | ||
09488165 JB |
47 | static int noreplace_smp; |
48 | ||
b7fb4af0 JF |
49 | static int __init setup_noreplace_smp(char *str) |
50 | { | |
51 | noreplace_smp = 1; | |
52 | return 1; | |
53 | } | |
54 | __setup("noreplace-smp", setup_noreplace_smp); | |
55 | ||
db477a33 BP |
56 | #define DPRINTK(fmt, args...) \ |
57 | do { \ | |
58 | if (debug_alternative) \ | |
1b2e335e | 59 | printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \ |
c767a54b | 60 | } while (0) |
d167a518 | 61 | |
48c7a250 BP |
62 | #define DUMP_BYTES(buf, len, fmt, args...) \ |
63 | do { \ | |
64 | if (unlikely(debug_alternative)) { \ | |
65 | int j; \ | |
66 | \ | |
67 | if (!(len)) \ | |
68 | break; \ | |
69 | \ | |
1b2e335e | 70 | printk(KERN_DEBUG pr_fmt(fmt), ##args); \ |
48c7a250 BP |
71 | for (j = 0; j < (len) - 1; j++) \ |
72 | printk(KERN_CONT "%02hhx ", buf[j]); \ | |
73 | printk(KERN_CONT "%02hhx\n", buf[j]); \ | |
74 | } \ | |
75 | } while (0) | |
76 | ||
dc326fca PA |
77 | /* |
78 | * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes | |
79 | * that correspond to that nop. Getting from one nop to the next, we | |
80 | * add to the array the offset that is equal to the sum of all sizes of | |
81 | * nops preceding the one we are after. | |
82 | * | |
83 | * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the | |
84 | * nice symmetry of sizes of the previous nops. | |
85 | */ | |
8b5a10fc | 86 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
dc326fca PA |
87 | static const unsigned char intelnops[] = |
88 | { | |
89 | GENERIC_NOP1, | |
90 | GENERIC_NOP2, | |
91 | GENERIC_NOP3, | |
92 | GENERIC_NOP4, | |
93 | GENERIC_NOP5, | |
94 | GENERIC_NOP6, | |
95 | GENERIC_NOP7, | |
96 | GENERIC_NOP8, | |
97 | GENERIC_NOP5_ATOMIC | |
98 | }; | |
99 | static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = | |
100 | { | |
9a0b5817 GH |
101 | NULL, |
102 | intelnops, | |
103 | intelnops + 1, | |
104 | intelnops + 1 + 2, | |
105 | intelnops + 1 + 2 + 3, | |
106 | intelnops + 1 + 2 + 3 + 4, | |
107 | intelnops + 1 + 2 + 3 + 4 + 5, | |
108 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | |
109 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 110 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
9a0b5817 | 111 | }; |
d167a518 GH |
112 | #endif |
113 | ||
114 | #ifdef K8_NOP1 | |
dc326fca PA |
115 | static const unsigned char k8nops[] = |
116 | { | |
117 | K8_NOP1, | |
118 | K8_NOP2, | |
119 | K8_NOP3, | |
120 | K8_NOP4, | |
121 | K8_NOP5, | |
122 | K8_NOP6, | |
123 | K8_NOP7, | |
124 | K8_NOP8, | |
125 | K8_NOP5_ATOMIC | |
126 | }; | |
127 | static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = | |
128 | { | |
9a0b5817 GH |
129 | NULL, |
130 | k8nops, | |
131 | k8nops + 1, | |
132 | k8nops + 1 + 2, | |
133 | k8nops + 1 + 2 + 3, | |
134 | k8nops + 1 + 2 + 3 + 4, | |
135 | k8nops + 1 + 2 + 3 + 4 + 5, | |
136 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | |
137 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 138 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
9a0b5817 | 139 | }; |
d167a518 GH |
140 | #endif |
141 | ||
8b5a10fc | 142 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
dc326fca PA |
143 | static const unsigned char k7nops[] = |
144 | { | |
145 | K7_NOP1, | |
146 | K7_NOP2, | |
147 | K7_NOP3, | |
148 | K7_NOP4, | |
149 | K7_NOP5, | |
150 | K7_NOP6, | |
151 | K7_NOP7, | |
152 | K7_NOP8, | |
153 | K7_NOP5_ATOMIC | |
154 | }; | |
155 | static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = | |
156 | { | |
9a0b5817 GH |
157 | NULL, |
158 | k7nops, | |
159 | k7nops + 1, | |
160 | k7nops + 1 + 2, | |
161 | k7nops + 1 + 2 + 3, | |
162 | k7nops + 1 + 2 + 3 + 4, | |
163 | k7nops + 1 + 2 + 3 + 4 + 5, | |
164 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | |
165 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 166 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
9a0b5817 | 167 | }; |
d167a518 GH |
168 | #endif |
169 | ||
32c464f5 | 170 | #ifdef P6_NOP1 |
cb09cad4 | 171 | static const unsigned char p6nops[] = |
dc326fca PA |
172 | { |
173 | P6_NOP1, | |
174 | P6_NOP2, | |
175 | P6_NOP3, | |
176 | P6_NOP4, | |
177 | P6_NOP5, | |
178 | P6_NOP6, | |
179 | P6_NOP7, | |
180 | P6_NOP8, | |
181 | P6_NOP5_ATOMIC | |
182 | }; | |
183 | static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = | |
184 | { | |
32c464f5 JB |
185 | NULL, |
186 | p6nops, | |
187 | p6nops + 1, | |
188 | p6nops + 1 + 2, | |
189 | p6nops + 1 + 2 + 3, | |
190 | p6nops + 1 + 2 + 3 + 4, | |
191 | p6nops + 1 + 2 + 3 + 4 + 5, | |
192 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, | |
193 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | |
dc326fca | 194 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
32c464f5 JB |
195 | }; |
196 | #endif | |
197 | ||
dc326fca | 198 | /* Initialize these to a safe default */ |
d167a518 | 199 | #ifdef CONFIG_X86_64 |
dc326fca PA |
200 | const unsigned char * const *ideal_nops = p6_nops; |
201 | #else | |
202 | const unsigned char * const *ideal_nops = intel_nops; | |
203 | #endif | |
d167a518 | 204 | |
dc326fca | 205 | void __init arch_init_ideal_nops(void) |
d167a518 | 206 | { |
dc326fca PA |
207 | switch (boot_cpu_data.x86_vendor) { |
208 | case X86_VENDOR_INTEL: | |
d8d9766c PA |
209 | /* |
210 | * Due to a decoder implementation quirk, some | |
211 | * specific Intel CPUs actually perform better with | |
212 | * the "k8_nops" than with the SDM-recommended NOPs. | |
213 | */ | |
214 | if (boot_cpu_data.x86 == 6 && | |
215 | boot_cpu_data.x86_model >= 0x0f && | |
216 | boot_cpu_data.x86_model != 0x1c && | |
217 | boot_cpu_data.x86_model != 0x26 && | |
218 | boot_cpu_data.x86_model != 0x27 && | |
219 | boot_cpu_data.x86_model < 0x30) { | |
220 | ideal_nops = k8_nops; | |
221 | } else if (boot_cpu_has(X86_FEATURE_NOPL)) { | |
dc326fca PA |
222 | ideal_nops = p6_nops; |
223 | } else { | |
224 | #ifdef CONFIG_X86_64 | |
225 | ideal_nops = k8_nops; | |
226 | #else | |
227 | ideal_nops = intel_nops; | |
228 | #endif | |
229 | } | |
d6250a3f | 230 | break; |
f21262b8 | 231 | |
c3fecca4 PW |
232 | case X86_VENDOR_HYGON: |
233 | ideal_nops = p6_nops; | |
234 | return; | |
235 | ||
f21262b8 BP |
236 | case X86_VENDOR_AMD: |
237 | if (boot_cpu_data.x86 > 0xf) { | |
238 | ideal_nops = p6_nops; | |
239 | return; | |
240 | } | |
241 | ||
df561f66 | 242 | fallthrough; |
f21262b8 | 243 | |
dc326fca PA |
244 | default: |
245 | #ifdef CONFIG_X86_64 | |
246 | ideal_nops = k8_nops; | |
247 | #else | |
248 | if (boot_cpu_has(X86_FEATURE_K8)) | |
249 | ideal_nops = k8_nops; | |
250 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
251 | ideal_nops = k7_nops; | |
252 | else | |
253 | ideal_nops = intel_nops; | |
254 | #endif | |
255 | } | |
9a0b5817 GH |
256 | } |
257 | ||
ab144f5e | 258 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
8b5a10fc | 259 | static void __init_or_module add_nops(void *insns, unsigned int len) |
139ec7c4 | 260 | { |
139ec7c4 RR |
261 | while (len > 0) { |
262 | unsigned int noplen = len; | |
263 | if (noplen > ASM_NOP_MAX) | |
264 | noplen = ASM_NOP_MAX; | |
dc326fca | 265 | memcpy(insns, ideal_nops[noplen], noplen); |
139ec7c4 RR |
266 | insns += noplen; |
267 | len -= noplen; | |
268 | } | |
269 | } | |
270 | ||
d167a518 | 271 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
5967ed87 | 272 | extern s32 __smp_locks[], __smp_locks_end[]; |
0a203df5 | 273 | void text_poke_early(void *addr, const void *opcode, size_t len); |
d167a518 | 274 | |
48c7a250 BP |
275 | /* |
276 | * Are we looking at a near JMP with a 1 or 4-byte displacement. | |
277 | */ | |
278 | static inline bool is_jmp(const u8 opcode) | |
279 | { | |
280 | return opcode == 0xeb || opcode == 0xe9; | |
281 | } | |
282 | ||
283 | static void __init_or_module | |
1fc654cf | 284 | recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff) |
48c7a250 BP |
285 | { |
286 | u8 *next_rip, *tgt_rip; | |
287 | s32 n_dspl, o_dspl; | |
288 | int repl_len; | |
289 | ||
290 | if (a->replacementlen != 5) | |
291 | return; | |
292 | ||
1fc654cf | 293 | o_dspl = *(s32 *)(insn_buff + 1); |
48c7a250 BP |
294 | |
295 | /* next_rip of the replacement JMP */ | |
296 | next_rip = repl_insn + a->replacementlen; | |
297 | /* target rip of the replacement JMP */ | |
298 | tgt_rip = next_rip + o_dspl; | |
299 | n_dspl = tgt_rip - orig_insn; | |
300 | ||
0e6c16c6 | 301 | DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl); |
48c7a250 BP |
302 | |
303 | if (tgt_rip - orig_insn >= 0) { | |
304 | if (n_dspl - 2 <= 127) | |
305 | goto two_byte_jmp; | |
306 | else | |
307 | goto five_byte_jmp; | |
308 | /* negative offset */ | |
309 | } else { | |
310 | if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) | |
311 | goto two_byte_jmp; | |
312 | else | |
313 | goto five_byte_jmp; | |
314 | } | |
315 | ||
316 | two_byte_jmp: | |
317 | n_dspl -= 2; | |
318 | ||
1fc654cf IM |
319 | insn_buff[0] = 0xeb; |
320 | insn_buff[1] = (s8)n_dspl; | |
321 | add_nops(insn_buff + 2, 3); | |
48c7a250 BP |
322 | |
323 | repl_len = 2; | |
324 | goto done; | |
325 | ||
326 | five_byte_jmp: | |
327 | n_dspl -= 5; | |
328 | ||
1fc654cf IM |
329 | insn_buff[0] = 0xe9; |
330 | *(s32 *)&insn_buff[1] = n_dspl; | |
48c7a250 BP |
331 | |
332 | repl_len = 5; | |
333 | ||
334 | done: | |
335 | ||
336 | DPRINTK("final displ: 0x%08x, JMP 0x%lx", | |
337 | n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); | |
338 | } | |
339 | ||
34bfab0e BP |
340 | /* |
341 | * "noinline" to cause control flow change and thus invalidate I$ and | |
342 | * cause refetch after modification. | |
343 | */ | |
344 | static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) | |
4fd4b6e5 | 345 | { |
66c117d7 | 346 | unsigned long flags; |
612e8e93 | 347 | int i; |
66c117d7 | 348 | |
612e8e93 BP |
349 | for (i = 0; i < a->padlen; i++) { |
350 | if (instr[i] != 0x90) | |
351 | return; | |
352 | } | |
69df353f | 353 | |
66c117d7 | 354 | local_irq_save(flags); |
4fd4b6e5 | 355 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
66c117d7 | 356 | local_irq_restore(flags); |
4fd4b6e5 | 357 | |
0e6c16c6 | 358 | DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", |
4fd4b6e5 BP |
359 | instr, a->instrlen - a->padlen, a->padlen); |
360 | } | |
361 | ||
db477a33 BP |
362 | /* |
363 | * Replace instructions with better alternatives for this CPU type. This runs | |
364 | * before SMP is initialized to avoid SMP problems with self modifying code. | |
365 | * This implies that asymmetric systems where APs have less capabilities than | |
366 | * the boot processor are not handled. Tough. Make sure you disable such | |
367 | * features by hand. | |
34bfab0e BP |
368 | * |
369 | * Marked "noinline" to cause control flow change and thus insn cache | |
370 | * to refetch changed I$ lines. | |
db477a33 | 371 | */ |
34bfab0e BP |
372 | void __init_or_module noinline apply_alternatives(struct alt_instr *start, |
373 | struct alt_instr *end) | |
9a0b5817 | 374 | { |
9a0b5817 | 375 | struct alt_instr *a; |
59e97e4d | 376 | u8 *instr, *replacement; |
1fc654cf | 377 | u8 insn_buff[MAX_PATCH_LEN]; |
9a0b5817 | 378 | |
0e6c16c6 | 379 | DPRINTK("alt table %px, -> %px", start, end); |
50973133 FY |
380 | /* |
381 | * The scan order should be from start to end. A later scanned | |
db477a33 | 382 | * alternative code can overwrite previously scanned alternative code. |
50973133 FY |
383 | * Some kernel functions (e.g. memcpy, memset, etc) use this order to |
384 | * patch code. | |
385 | * | |
386 | * So be careful if you want to change the scan order to any other | |
387 | * order. | |
388 | */ | |
9a0b5817 | 389 | for (a = start; a < end; a++) { |
1fc654cf | 390 | int insn_buff_sz = 0; |
dda7bb76 JG |
391 | /* Mask away "NOT" flag bit for feature to test. */ |
392 | u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV; | |
48c7a250 | 393 | |
59e97e4d AL |
394 | instr = (u8 *)&a->instr_offset + a->instr_offset; |
395 | replacement = (u8 *)&a->repl_offset + a->repl_offset; | |
1fc654cf | 396 | BUG_ON(a->instrlen > sizeof(insn_buff)); |
dda7bb76 JG |
397 | BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32); |
398 | ||
399 | /* | |
400 | * Patch if either: | |
401 | * - feature is present | |
402 | * - feature not present but ALTINSTR_FLAG_INV is set to mean, | |
403 | * patch if feature is *NOT* present. | |
404 | */ | |
405 | if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) { | |
4fd4b6e5 BP |
406 | if (a->padlen > 1) |
407 | optimize_nops(a, instr); | |
408 | ||
9a0b5817 | 409 | continue; |
4fd4b6e5 | 410 | } |
59e97e4d | 411 | |
dda7bb76 JG |
412 | DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", |
413 | (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "", | |
414 | feature >> 5, | |
415 | feature & 0x1f, | |
c1d4e419 | 416 | instr, instr, a->instrlen, |
dbe4058a | 417 | replacement, a->replacementlen, a->padlen); |
db477a33 | 418 | |
0e6c16c6 BP |
419 | DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); |
420 | DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); | |
48c7a250 | 421 | |
1fc654cf IM |
422 | memcpy(insn_buff, replacement, a->replacementlen); |
423 | insn_buff_sz = a->replacementlen; | |
59e97e4d | 424 | |
fc152d22 MJ |
425 | /* |
426 | * 0xe8 is a relative jump; fix the offset. | |
427 | * | |
428 | * Instruction length is checked before the opcode to avoid | |
429 | * accessing uninitialized bytes for zero-length replacements. | |
430 | */ | |
1fc654cf IM |
431 | if (a->replacementlen == 5 && *insn_buff == 0xe8) { |
432 | *(s32 *)(insn_buff + 1) += replacement - instr; | |
48c7a250 | 433 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", |
1fc654cf IM |
434 | *(s32 *)(insn_buff + 1), |
435 | (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5); | |
db477a33 | 436 | } |
59e97e4d | 437 | |
48c7a250 | 438 | if (a->replacementlen && is_jmp(replacement[0])) |
1fc654cf | 439 | recompute_jump(a, instr, replacement, insn_buff); |
48c7a250 BP |
440 | |
441 | if (a->instrlen > a->replacementlen) { | |
1fc654cf | 442 | add_nops(insn_buff + a->replacementlen, |
4332195c | 443 | a->instrlen - a->replacementlen); |
1fc654cf | 444 | insn_buff_sz += a->instrlen - a->replacementlen; |
48c7a250 | 445 | } |
1fc654cf | 446 | DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr); |
59e97e4d | 447 | |
1fc654cf | 448 | text_poke_early(instr, insn_buff, insn_buff_sz); |
9a0b5817 GH |
449 | } |
450 | } | |
451 | ||
8ec4d41f | 452 | #ifdef CONFIG_SMP |
5967ed87 JB |
453 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
454 | u8 *text, u8 *text_end) | |
9a0b5817 | 455 | { |
5967ed87 | 456 | const s32 *poff; |
9a0b5817 | 457 | |
5967ed87 JB |
458 | for (poff = start; poff < end; poff++) { |
459 | u8 *ptr = (u8 *)poff + *poff; | |
460 | ||
461 | if (!*poff || ptr < text || ptr >= text_end) | |
9a0b5817 | 462 | continue; |
f88f07e0 | 463 | /* turn DS segment override prefix into lock prefix */ |
d9c5841e PA |
464 | if (*ptr == 0x3e) |
465 | text_poke(ptr, ((unsigned char []){0xf0}), 1); | |
4b8073e4 | 466 | } |
9a0b5817 GH |
467 | } |
468 | ||
5967ed87 JB |
469 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
470 | u8 *text, u8 *text_end) | |
9a0b5817 | 471 | { |
5967ed87 | 472 | const s32 *poff; |
9a0b5817 | 473 | |
5967ed87 JB |
474 | for (poff = start; poff < end; poff++) { |
475 | u8 *ptr = (u8 *)poff + *poff; | |
476 | ||
477 | if (!*poff || ptr < text || ptr >= text_end) | |
9a0b5817 | 478 | continue; |
f88f07e0 | 479 | /* turn lock prefix into DS segment override prefix */ |
d9c5841e PA |
480 | if (*ptr == 0xf0) |
481 | text_poke(ptr, ((unsigned char []){0x3E}), 1); | |
4b8073e4 | 482 | } |
9a0b5817 GH |
483 | } |
484 | ||
485 | struct smp_alt_module { | |
486 | /* what is this ??? */ | |
487 | struct module *mod; | |
488 | char *name; | |
489 | ||
490 | /* ptrs to lock prefixes */ | |
5967ed87 JB |
491 | const s32 *locks; |
492 | const s32 *locks_end; | |
9a0b5817 GH |
493 | |
494 | /* .text segment, needed to avoid patching init code ;) */ | |
495 | u8 *text; | |
496 | u8 *text_end; | |
497 | ||
498 | struct list_head next; | |
499 | }; | |
500 | static LIST_HEAD(smp_alt_modules); | |
e846d139 | 501 | static bool uniproc_patched = false; /* protected by text_mutex */ |
9a0b5817 | 502 | |
8b5a10fc JB |
503 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
504 | char *name, | |
505 | void *locks, void *locks_end, | |
506 | void *text, void *text_end) | |
9a0b5817 GH |
507 | { |
508 | struct smp_alt_module *smp; | |
9a0b5817 | 509 | |
e846d139 | 510 | mutex_lock(&text_mutex); |
816afe4f RR |
511 | if (!uniproc_patched) |
512 | goto unlock; | |
b7fb4af0 | 513 | |
816afe4f RR |
514 | if (num_possible_cpus() == 1) |
515 | /* Don't bother remembering, we'll never have to undo it. */ | |
516 | goto smp_unlock; | |
9a0b5817 GH |
517 | |
518 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); | |
519 | if (NULL == smp) | |
816afe4f RR |
520 | /* we'll run the (safe but slow) SMP code then ... */ |
521 | goto unlock; | |
9a0b5817 GH |
522 | |
523 | smp->mod = mod; | |
524 | smp->name = name; | |
525 | smp->locks = locks; | |
526 | smp->locks_end = locks_end; | |
527 | smp->text = text; | |
528 | smp->text_end = text_end; | |
db477a33 BP |
529 | DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", |
530 | smp->locks, smp->locks_end, | |
9a0b5817 GH |
531 | smp->text, smp->text_end, smp->name); |
532 | ||
9a0b5817 | 533 | list_add_tail(&smp->next, &smp_alt_modules); |
816afe4f RR |
534 | smp_unlock: |
535 | alternatives_smp_unlock(locks, locks_end, text, text_end); | |
536 | unlock: | |
e846d139 | 537 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
538 | } |
539 | ||
8b5a10fc | 540 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
9a0b5817 GH |
541 | { |
542 | struct smp_alt_module *item; | |
9a0b5817 | 543 | |
e846d139 | 544 | mutex_lock(&text_mutex); |
9a0b5817 GH |
545 | list_for_each_entry(item, &smp_alt_modules, next) { |
546 | if (mod != item->mod) | |
547 | continue; | |
548 | list_del(&item->next); | |
9a0b5817 | 549 | kfree(item); |
816afe4f | 550 | break; |
9a0b5817 | 551 | } |
e846d139 | 552 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
553 | } |
554 | ||
816afe4f | 555 | void alternatives_enable_smp(void) |
9a0b5817 GH |
556 | { |
557 | struct smp_alt_module *mod; | |
9a0b5817 | 558 | |
816afe4f RR |
559 | /* Why bother if there are no other CPUs? */ |
560 | BUG_ON(num_possible_cpus() == 1); | |
9a0b5817 | 561 | |
e846d139 | 562 | mutex_lock(&text_mutex); |
ca74a6f8 | 563 | |
816afe4f | 564 | if (uniproc_patched) { |
c767a54b | 565 | pr_info("switching to SMP code\n"); |
816afe4f | 566 | BUG_ON(num_online_cpus() != 1); |
53756d37 JF |
567 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
568 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); | |
9a0b5817 GH |
569 | list_for_each_entry(mod, &smp_alt_modules, next) |
570 | alternatives_smp_lock(mod->locks, mod->locks_end, | |
571 | mod->text, mod->text_end); | |
816afe4f | 572 | uniproc_patched = false; |
9a0b5817 | 573 | } |
e846d139 | 574 | mutex_unlock(&text_mutex); |
9a0b5817 GH |
575 | } |
576 | ||
e846d139 ZC |
577 | /* |
578 | * Return 1 if the address range is reserved for SMP-alternatives. | |
579 | * Must hold text_mutex. | |
580 | */ | |
2cfa1978 MH |
581 | int alternatives_text_reserved(void *start, void *end) |
582 | { | |
583 | struct smp_alt_module *mod; | |
5967ed87 | 584 | const s32 *poff; |
076dc4a6 MH |
585 | u8 *text_start = start; |
586 | u8 *text_end = end; | |
2cfa1978 | 587 | |
e846d139 ZC |
588 | lockdep_assert_held(&text_mutex); |
589 | ||
2cfa1978 | 590 | list_for_each_entry(mod, &smp_alt_modules, next) { |
076dc4a6 | 591 | if (mod->text > text_end || mod->text_end < text_start) |
2cfa1978 | 592 | continue; |
5967ed87 JB |
593 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
594 | const u8 *ptr = (const u8 *)poff + *poff; | |
595 | ||
596 | if (text_start <= ptr && text_end > ptr) | |
2cfa1978 | 597 | return 1; |
5967ed87 | 598 | } |
2cfa1978 MH |
599 | } |
600 | ||
601 | return 0; | |
602 | } | |
48c7a250 | 603 | #endif /* CONFIG_SMP */ |
8ec4d41f | 604 | |
139ec7c4 | 605 | #ifdef CONFIG_PARAVIRT |
8b5a10fc JB |
606 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
607 | struct paravirt_patch_site *end) | |
139ec7c4 | 608 | { |
98de032b | 609 | struct paravirt_patch_site *p; |
1fc654cf | 610 | char insn_buff[MAX_PATCH_LEN]; |
139ec7c4 RR |
611 | |
612 | for (p = start; p < end; p++) { | |
613 | unsigned int used; | |
614 | ||
ab144f5e | 615 | BUG_ON(p->len > MAX_PATCH_LEN); |
d34fda4a | 616 | /* prep the buffer with the original instructions */ |
1fc654cf | 617 | memcpy(insn_buff, p->instr, p->len); |
46938cc8 | 618 | used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len); |
7f63c41c | 619 | |
63f70270 JF |
620 | BUG_ON(used > p->len); |
621 | ||
139ec7c4 | 622 | /* Pad the rest with nops */ |
1fc654cf IM |
623 | add_nops(insn_buff + used, p->len - used); |
624 | text_poke_early(p->instr, insn_buff, p->len); | |
139ec7c4 | 625 | } |
139ec7c4 | 626 | } |
98de032b | 627 | extern struct paravirt_patch_site __start_parainstructions[], |
139ec7c4 RR |
628 | __stop_parainstructions[]; |
629 | #endif /* CONFIG_PARAVIRT */ | |
630 | ||
7457c0da PZ |
631 | /* |
632 | * Self-test for the INT3 based CALL emulation code. | |
633 | * | |
634 | * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up | |
635 | * properly and that there is a stack gap between the INT3 frame and the | |
636 | * previous context. Without this gap doing a virtual PUSH on the interrupted | |
637 | * stack would corrupt the INT3 IRET frame. | |
638 | * | |
639 | * See entry_{32,64}.S for more details. | |
640 | */ | |
ecc60610 PZ |
641 | |
642 | /* | |
643 | * We define the int3_magic() function in assembly to control the calling | |
644 | * convention such that we can 'call' it from assembly. | |
645 | */ | |
646 | ||
647 | extern void int3_magic(unsigned int *ptr); /* defined in asm */ | |
648 | ||
649 | asm ( | |
650 | " .pushsection .init.text, \"ax\", @progbits\n" | |
651 | " .type int3_magic, @function\n" | |
652 | "int3_magic:\n" | |
653 | " movl $1, (%" _ASM_ARG1 ")\n" | |
654 | " ret\n" | |
655 | " .size int3_magic, .-int3_magic\n" | |
656 | " .popsection\n" | |
657 | ); | |
7457c0da PZ |
658 | |
659 | extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */ | |
660 | ||
661 | static int __init | |
662 | int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) | |
663 | { | |
664 | struct die_args *args = data; | |
665 | struct pt_regs *regs = args->regs; | |
666 | ||
667 | if (!regs || user_mode(regs)) | |
668 | return NOTIFY_DONE; | |
669 | ||
670 | if (val != DIE_INT3) | |
671 | return NOTIFY_DONE; | |
672 | ||
673 | if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip) | |
674 | return NOTIFY_DONE; | |
675 | ||
676 | int3_emulate_call(regs, (unsigned long)&int3_magic); | |
677 | return NOTIFY_STOP; | |
678 | } | |
679 | ||
680 | static void __init int3_selftest(void) | |
681 | { | |
682 | static __initdata struct notifier_block int3_exception_nb = { | |
683 | .notifier_call = int3_exception_notify, | |
684 | .priority = INT_MAX-1, /* last */ | |
685 | }; | |
686 | unsigned int val = 0; | |
687 | ||
688 | BUG_ON(register_die_notifier(&int3_exception_nb)); | |
689 | ||
690 | /* | |
691 | * Basically: int3_magic(&val); but really complicated :-) | |
692 | * | |
693 | * Stick the address of the INT3 instruction into int3_selftest_ip, | |
694 | * then trigger the INT3, padded with NOPs to match a CALL instruction | |
695 | * length. | |
696 | */ | |
697 | asm volatile ("1: int3; nop; nop; nop; nop\n\t" | |
698 | ".pushsection .init.data,\"aw\"\n\t" | |
699 | ".align " __ASM_SEL(4, 8) "\n\t" | |
700 | ".type int3_selftest_ip, @object\n\t" | |
701 | ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t" | |
702 | "int3_selftest_ip:\n\t" | |
703 | __ASM_SEL(.long, .quad) " 1b\n\t" | |
704 | ".popsection\n\t" | |
ecc60610 PZ |
705 | : ASM_CALL_CONSTRAINT |
706 | : __ASM_SEL_RAW(a, D) (&val) | |
707 | : "memory"); | |
7457c0da PZ |
708 | |
709 | BUG_ON(val != 1); | |
710 | ||
711 | unregister_die_notifier(&int3_exception_nb); | |
712 | } | |
713 | ||
9a0b5817 GH |
714 | void __init alternative_instructions(void) |
715 | { | |
7457c0da PZ |
716 | int3_selftest(); |
717 | ||
718 | /* | |
719 | * The patching is not fully atomic, so try to avoid local | |
720 | * interruptions that might execute the to be patched code. | |
721 | * Other CPUs are not running. | |
722 | */ | |
8f4e956b | 723 | stop_nmi(); |
123aa76e AK |
724 | |
725 | /* | |
726 | * Don't stop machine check exceptions while patching. | |
727 | * MCEs only happen when something got corrupted and in this | |
728 | * case we must do something about the corruption. | |
32b1cbe3 | 729 | * Ignoring it is worse than an unlikely patching race. |
123aa76e AK |
730 | * Also machine checks tend to be broadcast and if one CPU |
731 | * goes into machine check the others follow quickly, so we don't | |
732 | * expect a machine check to cause undue problems during to code | |
733 | * patching. | |
734 | */ | |
8f4e956b | 735 | |
9a0b5817 GH |
736 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
737 | ||
8ec4d41f | 738 | #ifdef CONFIG_SMP |
816afe4f RR |
739 | /* Patch to UP if other cpus not imminent. */ |
740 | if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { | |
741 | uniproc_patched = true; | |
9a0b5817 GH |
742 | alternatives_smp_module_add(NULL, "core kernel", |
743 | __smp_locks, __smp_locks_end, | |
744 | _text, _etext); | |
9a0b5817 | 745 | } |
8f4e956b | 746 | |
7457c0da | 747 | if (!uniproc_patched || num_possible_cpus() == 1) { |
f68fd5f4 FW |
748 | free_init_pages("SMP alternatives", |
749 | (unsigned long)__smp_locks, | |
750 | (unsigned long)__smp_locks_end); | |
7457c0da | 751 | } |
816afe4f RR |
752 | #endif |
753 | ||
754 | apply_paravirt(__parainstructions, __parainstructions_end); | |
f68fd5f4 | 755 | |
8f4e956b | 756 | restart_nmi(); |
5e907bb0 | 757 | alternatives_patched = 1; |
9a0b5817 | 758 | } |
19d36ccd | 759 | |
e587cadd MD |
760 | /** |
761 | * text_poke_early - Update instructions on a live kernel at boot time | |
762 | * @addr: address to modify | |
763 | * @opcode: source of the copy | |
764 | * @len: length to copy | |
765 | * | |
19d36ccd AK |
766 | * When you use this code to patch more than one byte of an instruction |
767 | * you need to make sure that other CPUs cannot execute this code in parallel. | |
e587cadd | 768 | * Also no thread must be currently preempted in the middle of these |
32b1cbe3 MA |
769 | * instructions. And on the local CPU you need to be protected against NMI or |
770 | * MCE handlers seeing an inconsistent instruction while you patch. | |
19d36ccd | 771 | */ |
0a203df5 NA |
772 | void __init_or_module text_poke_early(void *addr, const void *opcode, |
773 | size_t len) | |
19d36ccd | 774 | { |
e587cadd | 775 | unsigned long flags; |
f2c65fb3 NA |
776 | |
777 | if (boot_cpu_has(X86_FEATURE_NX) && | |
778 | is_module_text_address((unsigned long)addr)) { | |
779 | /* | |
780 | * Modules text is marked initially as non-executable, so the | |
781 | * code cannot be running and speculative code-fetches are | |
782 | * prevented. Just change the code. | |
783 | */ | |
784 | memcpy(addr, opcode, len); | |
785 | } else { | |
786 | local_irq_save(flags); | |
787 | memcpy(addr, opcode, len); | |
788 | local_irq_restore(flags); | |
789 | sync_core(); | |
790 | ||
791 | /* | |
792 | * Could also do a CLFLUSH here to speed up CPU recovery; but | |
793 | * that causes hangs on some VIA CPUs. | |
794 | */ | |
795 | } | |
e587cadd MD |
796 | } |
797 | ||
9020d395 TG |
798 | typedef struct { |
799 | struct mm_struct *mm; | |
800 | } temp_mm_state_t; | |
801 | ||
802 | /* | |
803 | * Using a temporary mm allows to set temporary mappings that are not accessible | |
804 | * by other CPUs. Such mappings are needed to perform sensitive memory writes | |
805 | * that override the kernel memory protections (e.g., W^X), without exposing the | |
806 | * temporary page-table mappings that are required for these write operations to | |
807 | * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the | |
808 | * mapping is torn down. | |
809 | * | |
810 | * Context: The temporary mm needs to be used exclusively by a single core. To | |
811 | * harden security IRQs must be disabled while the temporary mm is | |
812 | * loaded, thereby preventing interrupt handler bugs from overriding | |
813 | * the kernel memory protection. | |
814 | */ | |
815 | static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) | |
816 | { | |
817 | temp_mm_state_t temp_state; | |
818 | ||
819 | lockdep_assert_irqs_disabled(); | |
abee7c49 JG |
820 | |
821 | /* | |
822 | * Make sure not to be in TLB lazy mode, as otherwise we'll end up | |
823 | * with a stale address space WITHOUT being in lazy mode after | |
824 | * restoring the previous mm. | |
825 | */ | |
826 | if (this_cpu_read(cpu_tlbstate.is_lazy)) | |
827 | leave_mm(smp_processor_id()); | |
828 | ||
9020d395 TG |
829 | temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
830 | switch_mm_irqs_off(NULL, mm, current); | |
831 | ||
832 | /* | |
833 | * If breakpoints are enabled, disable them while the temporary mm is | |
834 | * used. Userspace might set up watchpoints on addresses that are used | |
835 | * in the temporary mm, which would lead to wrong signals being sent or | |
836 | * crashes. | |
837 | * | |
838 | * Note that breakpoints are not disabled selectively, which also causes | |
839 | * kernel breakpoints (e.g., perf's) to be disabled. This might be | |
840 | * undesirable, but still seems reasonable as the code that runs in the | |
841 | * temporary mm should be short. | |
842 | */ | |
843 | if (hw_breakpoint_active()) | |
844 | hw_breakpoint_disable(); | |
845 | ||
846 | return temp_state; | |
847 | } | |
848 | ||
849 | static inline void unuse_temporary_mm(temp_mm_state_t prev_state) | |
850 | { | |
851 | lockdep_assert_irqs_disabled(); | |
852 | switch_mm_irqs_off(NULL, prev_state.mm, current); | |
853 | ||
854 | /* | |
855 | * Restore the breakpoints if they were disabled before the temporary mm | |
856 | * was loaded. | |
857 | */ | |
858 | if (hw_breakpoint_active()) | |
859 | hw_breakpoint_restore(); | |
860 | } | |
861 | ||
4fc19708 NA |
862 | __ro_after_init struct mm_struct *poking_mm; |
863 | __ro_after_init unsigned long poking_addr; | |
864 | ||
e836673c | 865 | static void *__text_poke(void *addr, const void *opcode, size_t len) |
e587cadd | 866 | { |
b3fd8e83 NA |
867 | bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; |
868 | struct page *pages[2] = {NULL}; | |
869 | temp_mm_state_t prev; | |
78ff7fae | 870 | unsigned long flags; |
b3fd8e83 NA |
871 | pte_t pte, *ptep; |
872 | spinlock_t *ptl; | |
873 | pgprot_t pgprot; | |
e587cadd | 874 | |
6fffacb3 | 875 | /* |
b3fd8e83 NA |
876 | * While boot memory allocator is running we cannot use struct pages as |
877 | * they are not yet initialized. There is no way to recover. | |
6fffacb3 PT |
878 | */ |
879 | BUG_ON(!after_bootmem); | |
880 | ||
b7b66baa MD |
881 | if (!core_kernel_text((unsigned long)addr)) { |
882 | pages[0] = vmalloc_to_page(addr); | |
b3fd8e83 NA |
883 | if (cross_page_boundary) |
884 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | |
15a601eb | 885 | } else { |
b7b66baa | 886 | pages[0] = virt_to_page(addr); |
00c6b2d5 | 887 | WARN_ON(!PageReserved(pages[0])); |
b3fd8e83 NA |
888 | if (cross_page_boundary) |
889 | pages[1] = virt_to_page(addr + PAGE_SIZE); | |
e587cadd | 890 | } |
b3fd8e83 NA |
891 | /* |
892 | * If something went wrong, crash and burn since recovery paths are not | |
893 | * implemented. | |
894 | */ | |
895 | BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); | |
896 | ||
b3fd8e83 NA |
897 | /* |
898 | * Map the page without the global bit, as TLB flushing is done with | |
899 | * flush_tlb_mm_range(), which is intended for non-global PTEs. | |
900 | */ | |
901 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL); | |
902 | ||
903 | /* | |
904 | * The lock is not really needed, but this allows to avoid open-coding. | |
905 | */ | |
906 | ptep = get_locked_pte(poking_mm, poking_addr, &ptl); | |
907 | ||
908 | /* | |
909 | * This must not fail; preallocated in poking_init(). | |
910 | */ | |
911 | VM_BUG_ON(!ptep); | |
912 | ||
a6d996cb SAS |
913 | local_irq_save(flags); |
914 | ||
b3fd8e83 NA |
915 | pte = mk_pte(pages[0], pgprot); |
916 | set_pte_at(poking_mm, poking_addr, ptep, pte); | |
917 | ||
918 | if (cross_page_boundary) { | |
919 | pte = mk_pte(pages[1], pgprot); | |
920 | set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); | |
921 | } | |
922 | ||
923 | /* | |
924 | * Loading the temporary mm behaves as a compiler barrier, which | |
925 | * guarantees that the PTE will be set at the time memcpy() is done. | |
926 | */ | |
927 | prev = use_temporary_mm(poking_mm); | |
928 | ||
929 | kasan_disable_current(); | |
930 | memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len); | |
931 | kasan_enable_current(); | |
932 | ||
933 | /* | |
934 | * Ensure that the PTE is only cleared after the instructions of memcpy | |
935 | * were issued by using a compiler barrier. | |
936 | */ | |
937 | barrier(); | |
938 | ||
939 | pte_clear(poking_mm, poking_addr, ptep); | |
940 | if (cross_page_boundary) | |
941 | pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); | |
942 | ||
943 | /* | |
944 | * Loading the previous page-table hierarchy requires a serializing | |
945 | * instruction that already allows the core to see the updated version. | |
946 | * Xen-PV is assumed to serialize execution in a similar manner. | |
947 | */ | |
948 | unuse_temporary_mm(prev); | |
949 | ||
950 | /* | |
951 | * Flushing the TLB might involve IPIs, which would require enabled | |
952 | * IRQs, but not if the mm is not used, as it is in this point. | |
953 | */ | |
954 | flush_tlb_mm_range(poking_mm, poking_addr, poking_addr + | |
955 | (cross_page_boundary ? 2 : 1) * PAGE_SIZE, | |
956 | PAGE_SHIFT, false); | |
957 | ||
958 | /* | |
959 | * If the text does not match what we just wrote then something is | |
960 | * fundamentally screwy; there's nothing we can really do about that. | |
961 | */ | |
962 | BUG_ON(memcmp(addr, opcode, len)); | |
963 | ||
7cf49427 | 964 | local_irq_restore(flags); |
a6d996cb | 965 | pte_unmap_unlock(ptep, ptl); |
e587cadd | 966 | return addr; |
19d36ccd | 967 | } |
3d55cc8a | 968 | |
e836673c NA |
969 | /** |
970 | * text_poke - Update instructions on a live kernel | |
971 | * @addr: address to modify | |
972 | * @opcode: source of the copy | |
973 | * @len: length to copy | |
974 | * | |
975 | * Only atomic text poke/set should be allowed when not doing early patching. | |
976 | * It means the size must be writable atomically and the address must be aligned | |
977 | * in a way that permits an atomic write. It also makes sure we fit on a single | |
978 | * page. | |
3950746d NA |
979 | * |
980 | * Note that the caller must ensure that if the modified code is part of a | |
981 | * module, the module would not be removed during poking. This can be achieved | |
982 | * by registering a module notifier, and ordering module removal and patching | |
983 | * trough a mutex. | |
e836673c NA |
984 | */ |
985 | void *text_poke(void *addr, const void *opcode, size_t len) | |
986 | { | |
987 | lockdep_assert_held(&text_mutex); | |
988 | ||
989 | return __text_poke(addr, opcode, len); | |
990 | } | |
991 | ||
992 | /** | |
993 | * text_poke_kgdb - Update instructions on a live kernel by kgdb | |
994 | * @addr: address to modify | |
995 | * @opcode: source of the copy | |
996 | * @len: length to copy | |
997 | * | |
998 | * Only atomic text poke/set should be allowed when not doing early patching. | |
999 | * It means the size must be writable atomically and the address must be aligned | |
1000 | * in a way that permits an atomic write. It also makes sure we fit on a single | |
1001 | * page. | |
1002 | * | |
1003 | * Context: should only be used by kgdb, which ensures no other core is running, | |
1004 | * despite the fact it does not hold the text_mutex. | |
1005 | */ | |
1006 | void *text_poke_kgdb(void *addr, const void *opcode, size_t len) | |
1007 | { | |
1008 | return __text_poke(addr, opcode, len); | |
1009 | } | |
1010 | ||
fd4363ff JK |
1011 | static void do_sync_core(void *info) |
1012 | { | |
1013 | sync_core(); | |
1014 | } | |
1015 | ||
5c02ece8 PZ |
1016 | void text_poke_sync(void) |
1017 | { | |
1018 | on_each_cpu(do_sync_core, NULL, 1); | |
1019 | } | |
1020 | ||
18cbc8be | 1021 | struct text_poke_loc { |
4531ef6a | 1022 | s32 rel_addr; /* addr := _stext + rel_addr */ |
18cbc8be PZ |
1023 | s32 rel32; |
1024 | u8 opcode; | |
1025 | const u8 text[POKE_MAX_OPCODE_SIZE]; | |
d769811c | 1026 | u8 old; |
18cbc8be PZ |
1027 | }; |
1028 | ||
1f676247 | 1029 | struct bp_patching_desc { |
c0213b0a DBO |
1030 | struct text_poke_loc *vec; |
1031 | int nr_entries; | |
1f676247 PZ |
1032 | atomic_t refs; |
1033 | }; | |
1034 | ||
1035 | static struct bp_patching_desc *bp_desc; | |
1036 | ||
4979fb53 TG |
1037 | static __always_inline |
1038 | struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp) | |
1f676247 | 1039 | { |
ef882bfe | 1040 | struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */ |
1f676247 | 1041 | |
ef882bfe | 1042 | if (!desc || !arch_atomic_inc_not_zero(&desc->refs)) |
1f676247 PZ |
1043 | return NULL; |
1044 | ||
1045 | return desc; | |
1046 | } | |
1047 | ||
4979fb53 | 1048 | static __always_inline void put_desc(struct bp_patching_desc *desc) |
1f676247 PZ |
1049 | { |
1050 | smp_mb__before_atomic(); | |
ef882bfe | 1051 | arch_atomic_dec(&desc->refs); |
1f676247 | 1052 | } |
c0213b0a | 1053 | |
4979fb53 | 1054 | static __always_inline void *text_poke_addr(struct text_poke_loc *tp) |
4531ef6a PZ |
1055 | { |
1056 | return _stext + tp->rel_addr; | |
1057 | } | |
1058 | ||
f64366ef | 1059 | static __always_inline int patch_cmp(const void *key, const void *elt) |
c0213b0a DBO |
1060 | { |
1061 | struct text_poke_loc *tp = (struct text_poke_loc *) elt; | |
1062 | ||
4531ef6a | 1063 | if (key < text_poke_addr(tp)) |
c0213b0a | 1064 | return -1; |
4531ef6a | 1065 | if (key > text_poke_addr(tp)) |
c0213b0a DBO |
1066 | return 1; |
1067 | return 0; | |
1068 | } | |
fd4363ff | 1069 | |
7f6fa101 | 1070 | noinstr int poke_int3_handler(struct pt_regs *regs) |
fd4363ff | 1071 | { |
1f676247 | 1072 | struct bp_patching_desc *desc; |
c0213b0a | 1073 | struct text_poke_loc *tp; |
1f676247 | 1074 | int len, ret = 0; |
c0213b0a | 1075 | void *ip; |
1f676247 PZ |
1076 | |
1077 | if (user_mode(regs)) | |
1078 | return 0; | |
c0213b0a | 1079 | |
01651324 PZ |
1080 | /* |
1081 | * Having observed our INT3 instruction, we now must observe | |
1f676247 | 1082 | * bp_desc: |
01651324 | 1083 | * |
1f676247 | 1084 | * bp_desc = desc INT3 |
c3d6324f | 1085 | * WMB RMB |
1f676247 | 1086 | * write INT3 if (desc) |
01651324 | 1087 | */ |
fd4363ff JK |
1088 | smp_rmb(); |
1089 | ||
1f676247 PZ |
1090 | desc = try_get_desc(&bp_desc); |
1091 | if (!desc) | |
17f41571 | 1092 | return 0; |
fd4363ff | 1093 | |
c0213b0a | 1094 | /* |
c3d6324f | 1095 | * Discount the INT3. See text_poke_bp_batch(). |
c0213b0a | 1096 | */ |
c3d6324f | 1097 | ip = (void *) regs->ip - INT3_INSN_SIZE; |
c0213b0a DBO |
1098 | |
1099 | /* | |
1100 | * Skip the binary search if there is a single member in the vector. | |
1101 | */ | |
1f676247 | 1102 | if (unlikely(desc->nr_entries > 1)) { |
f64366ef PZ |
1103 | tp = __inline_bsearch(ip, desc->vec, desc->nr_entries, |
1104 | sizeof(struct text_poke_loc), | |
1105 | patch_cmp); | |
c0213b0a | 1106 | if (!tp) |
1f676247 | 1107 | goto out_put; |
c0213b0a | 1108 | } else { |
1f676247 | 1109 | tp = desc->vec; |
4531ef6a | 1110 | if (text_poke_addr(tp) != ip) |
1f676247 | 1111 | goto out_put; |
c0213b0a DBO |
1112 | } |
1113 | ||
97e6c977 PZ |
1114 | len = text_opcode_size(tp->opcode); |
1115 | ip += len; | |
c3d6324f PZ |
1116 | |
1117 | switch (tp->opcode) { | |
1118 | case INT3_INSN_OPCODE: | |
1119 | /* | |
1120 | * Someone poked an explicit INT3, they'll want to handle it, | |
1121 | * do not consume. | |
1122 | */ | |
1f676247 | 1123 | goto out_put; |
c3d6324f | 1124 | |
c43a43e4 PZ |
1125 | case RET_INSN_OPCODE: |
1126 | int3_emulate_ret(regs); | |
1127 | break; | |
1128 | ||
c3d6324f PZ |
1129 | case CALL_INSN_OPCODE: |
1130 | int3_emulate_call(regs, (long)ip + tp->rel32); | |
1131 | break; | |
1132 | ||
1133 | case JMP32_INSN_OPCODE: | |
1134 | case JMP8_INSN_OPCODE: | |
1135 | int3_emulate_jmp(regs, (long)ip + tp->rel32); | |
1136 | break; | |
1137 | ||
1138 | default: | |
1139 | BUG(); | |
1140 | } | |
17f41571 | 1141 | |
1f676247 PZ |
1142 | ret = 1; |
1143 | ||
1144 | out_put: | |
1145 | put_desc(desc); | |
1146 | return ret; | |
fd4363ff | 1147 | } |
17f41571 | 1148 | |
18cbc8be PZ |
1149 | #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc)) |
1150 | static struct text_poke_loc tp_vec[TP_VEC_MAX]; | |
1151 | static int tp_vec_nr; | |
1152 | ||
fd4363ff | 1153 | /** |
c0213b0a DBO |
1154 | * text_poke_bp_batch() -- update instructions on live kernel on SMP |
1155 | * @tp: vector of instructions to patch | |
1156 | * @nr_entries: number of entries in the vector | |
fd4363ff JK |
1157 | * |
1158 | * Modify multi-byte instruction by using int3 breakpoint on SMP. | |
ea8596bb MH |
1159 | * We completely avoid stop_machine() here, and achieve the |
1160 | * synchronization using int3 breakpoint. | |
fd4363ff JK |
1161 | * |
1162 | * The way it is done: | |
c3d6324f | 1163 | * - For each entry in the vector: |
c0213b0a | 1164 | * - add a int3 trap to the address that will be patched |
fd4363ff | 1165 | * - sync cores |
c0213b0a DBO |
1166 | * - For each entry in the vector: |
1167 | * - update all but the first byte of the patched range | |
fd4363ff | 1168 | * - sync cores |
c0213b0a DBO |
1169 | * - For each entry in the vector: |
1170 | * - replace the first byte (int3) by the first byte of | |
1171 | * replacing opcode | |
fd4363ff | 1172 | * - sync cores |
fd4363ff | 1173 | */ |
18cbc8be | 1174 | static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) |
fd4363ff | 1175 | { |
1f676247 PZ |
1176 | struct bp_patching_desc desc = { |
1177 | .vec = tp, | |
1178 | .nr_entries = nr_entries, | |
1179 | .refs = ATOMIC_INIT(1), | |
1180 | }; | |
c3d6324f | 1181 | unsigned char int3 = INT3_INSN_OPCODE; |
c0213b0a | 1182 | unsigned int i; |
c3d6324f | 1183 | int do_sync; |
9222f606 JK |
1184 | |
1185 | lockdep_assert_held(&text_mutex); | |
1186 | ||
1f676247 | 1187 | smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */ |
c0213b0a | 1188 | |
fd4363ff | 1189 | /* |
01651324 | 1190 | * Corresponding read barrier in int3 notifier for making sure the |
c0213b0a | 1191 | * nr_entries and handler are correctly ordered wrt. patching. |
fd4363ff JK |
1192 | */ |
1193 | smp_wmb(); | |
1194 | ||
c0213b0a DBO |
1195 | /* |
1196 | * First step: add a int3 trap to the address that will be patched. | |
1197 | */ | |
d769811c AH |
1198 | for (i = 0; i < nr_entries; i++) { |
1199 | tp[i].old = *(u8 *)text_poke_addr(&tp[i]); | |
76ffa720 | 1200 | text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE); |
d769811c | 1201 | } |
fd4363ff | 1202 | |
5c02ece8 | 1203 | text_poke_sync(); |
fd4363ff | 1204 | |
c0213b0a DBO |
1205 | /* |
1206 | * Second step: update all but the first byte of the patched range. | |
1207 | */ | |
c3d6324f | 1208 | for (do_sync = 0, i = 0; i < nr_entries; i++) { |
d769811c | 1209 | u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, }; |
97e6c977 PZ |
1210 | int len = text_opcode_size(tp[i].opcode); |
1211 | ||
76ffa720 | 1212 | if (len - INT3_INSN_SIZE > 0) { |
d769811c AH |
1213 | memcpy(old + INT3_INSN_SIZE, |
1214 | text_poke_addr(&tp[i]) + INT3_INSN_SIZE, | |
1215 | len - INT3_INSN_SIZE); | |
76ffa720 PZ |
1216 | text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE, |
1217 | (const char *)tp[i].text + INT3_INSN_SIZE, | |
1218 | len - INT3_INSN_SIZE); | |
c3d6324f | 1219 | do_sync++; |
c0213b0a | 1220 | } |
d769811c AH |
1221 | |
1222 | /* | |
1223 | * Emit a perf event to record the text poke, primarily to | |
1224 | * support Intel PT decoding which must walk the executable code | |
1225 | * to reconstruct the trace. The flow up to here is: | |
1226 | * - write INT3 byte | |
1227 | * - IPI-SYNC | |
1228 | * - write instruction tail | |
1229 | * At this point the actual control flow will be through the | |
1230 | * INT3 and handler and not hit the old or new instruction. | |
1231 | * Intel PT outputs FUP/TIP packets for the INT3, so the flow | |
1232 | * can still be decoded. Subsequently: | |
1233 | * - emit RECORD_TEXT_POKE with the new instruction | |
1234 | * - IPI-SYNC | |
1235 | * - write first byte | |
1236 | * - IPI-SYNC | |
1237 | * So before the text poke event timestamp, the decoder will see | |
1238 | * either the old instruction flow or FUP/TIP of INT3. After the | |
1239 | * text poke event timestamp, the decoder will see either the | |
1240 | * new instruction flow or FUP/TIP of INT3. Thus decoders can | |
1241 | * use the timestamp as the point at which to modify the | |
1242 | * executable code. | |
1243 | * The old instruction is recorded so that the event can be | |
1244 | * processed forwards or backwards. | |
1245 | */ | |
1246 | perf_event_text_poke(text_poke_addr(&tp[i]), old, len, | |
1247 | tp[i].text, len); | |
c0213b0a DBO |
1248 | } |
1249 | ||
c3d6324f | 1250 | if (do_sync) { |
fd4363ff JK |
1251 | /* |
1252 | * According to Intel, this core syncing is very likely | |
1253 | * not necessary and we'd be safe even without it. But | |
1254 | * better safe than sorry (plus there's not only Intel). | |
1255 | */ | |
5c02ece8 | 1256 | text_poke_sync(); |
fd4363ff JK |
1257 | } |
1258 | ||
c0213b0a DBO |
1259 | /* |
1260 | * Third step: replace the first byte (int3) by the first byte of | |
1261 | * replacing opcode. | |
1262 | */ | |
c3d6324f PZ |
1263 | for (do_sync = 0, i = 0; i < nr_entries; i++) { |
1264 | if (tp[i].text[0] == INT3_INSN_OPCODE) | |
1265 | continue; | |
1266 | ||
76ffa720 | 1267 | text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE); |
c3d6324f PZ |
1268 | do_sync++; |
1269 | } | |
1270 | ||
1271 | if (do_sync) | |
5c02ece8 | 1272 | text_poke_sync(); |
fd4363ff | 1273 | |
01651324 | 1274 | /* |
1f676247 PZ |
1275 | * Remove and synchronize_rcu(), except we have a very primitive |
1276 | * refcount based completion. | |
01651324 | 1277 | */ |
1f676247 PZ |
1278 | WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */ |
1279 | if (!atomic_dec_and_test(&desc.refs)) | |
1280 | atomic_cond_read_acquire(&desc.refs, !VAL); | |
fd4363ff JK |
1281 | } |
1282 | ||
244febbe QH |
1283 | static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, |
1284 | const void *opcode, size_t len, const void *emulate) | |
c3d6324f PZ |
1285 | { |
1286 | struct insn insn; | |
1287 | ||
18cbc8be | 1288 | memcpy((void *)tp->text, opcode, len); |
c3d6324f PZ |
1289 | if (!emulate) |
1290 | emulate = opcode; | |
1291 | ||
1292 | kernel_insn_init(&insn, emulate, MAX_INSN_SIZE); | |
1293 | insn_get_length(&insn); | |
1294 | ||
1295 | BUG_ON(!insn_complete(&insn)); | |
1296 | BUG_ON(len != insn.length); | |
1297 | ||
4531ef6a | 1298 | tp->rel_addr = addr - (void *)_stext; |
c3d6324f PZ |
1299 | tp->opcode = insn.opcode.bytes[0]; |
1300 | ||
1301 | switch (tp->opcode) { | |
1302 | case INT3_INSN_OPCODE: | |
c43a43e4 | 1303 | case RET_INSN_OPCODE: |
c3d6324f PZ |
1304 | break; |
1305 | ||
1306 | case CALL_INSN_OPCODE: | |
1307 | case JMP32_INSN_OPCODE: | |
1308 | case JMP8_INSN_OPCODE: | |
1309 | tp->rel32 = insn.immediate.value; | |
1310 | break; | |
1311 | ||
1312 | default: /* assume NOP */ | |
1313 | switch (len) { | |
1314 | case 2: /* NOP2 -- emulate as JMP8+0 */ | |
1315 | BUG_ON(memcmp(emulate, ideal_nops[len], len)); | |
1316 | tp->opcode = JMP8_INSN_OPCODE; | |
1317 | tp->rel32 = 0; | |
1318 | break; | |
1319 | ||
1320 | case 5: /* NOP5 -- emulate as JMP32+0 */ | |
1321 | BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len)); | |
1322 | tp->opcode = JMP32_INSN_OPCODE; | |
1323 | tp->rel32 = 0; | |
1324 | break; | |
1325 | ||
1326 | default: /* unknown instruction */ | |
1327 | BUG(); | |
1328 | } | |
1329 | break; | |
1330 | } | |
1331 | } | |
1332 | ||
18cbc8be PZ |
1333 | /* |
1334 | * We hard rely on the tp_vec being ordered; ensure this is so by flushing | |
1335 | * early if needed. | |
1336 | */ | |
1337 | static bool tp_order_fail(void *addr) | |
1338 | { | |
1339 | struct text_poke_loc *tp; | |
1340 | ||
1341 | if (!tp_vec_nr) | |
1342 | return false; | |
1343 | ||
1344 | if (!addr) /* force */ | |
1345 | return true; | |
1346 | ||
1347 | tp = &tp_vec[tp_vec_nr - 1]; | |
4531ef6a | 1348 | if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr) |
18cbc8be PZ |
1349 | return true; |
1350 | ||
1351 | return false; | |
1352 | } | |
1353 | ||
1354 | static void text_poke_flush(void *addr) | |
1355 | { | |
1356 | if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) { | |
1357 | text_poke_bp_batch(tp_vec, tp_vec_nr); | |
1358 | tp_vec_nr = 0; | |
1359 | } | |
1360 | } | |
1361 | ||
1362 | void text_poke_finish(void) | |
1363 | { | |
1364 | text_poke_flush(NULL); | |
1365 | } | |
1366 | ||
768ae440 | 1367 | void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate) |
18cbc8be PZ |
1368 | { |
1369 | struct text_poke_loc *tp; | |
1370 | ||
768ae440 PZ |
1371 | if (unlikely(system_state == SYSTEM_BOOTING)) { |
1372 | text_poke_early(addr, opcode, len); | |
1373 | return; | |
1374 | } | |
1375 | ||
18cbc8be PZ |
1376 | text_poke_flush(addr); |
1377 | ||
1378 | tp = &tp_vec[tp_vec_nr++]; | |
1379 | text_poke_loc_init(tp, addr, opcode, len, emulate); | |
1380 | } | |
1381 | ||
c0213b0a DBO |
1382 | /** |
1383 | * text_poke_bp() -- update instructions on live kernel on SMP | |
1384 | * @addr: address to patch | |
1385 | * @opcode: opcode of new instruction | |
1386 | * @len: length to copy | |
72ebb5ff | 1387 | * @emulate: instruction to be emulated |
c0213b0a DBO |
1388 | * |
1389 | * Update a single instruction with the vector in the stack, avoiding | |
1390 | * dynamically allocated memory. This function should be used when it is | |
1391 | * not possible to allocate memory. | |
1392 | */ | |
768ae440 | 1393 | void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) |
c0213b0a | 1394 | { |
c3d6324f | 1395 | struct text_poke_loc tp; |
c0213b0a | 1396 | |
768ae440 PZ |
1397 | if (unlikely(system_state == SYSTEM_BOOTING)) { |
1398 | text_poke_early(addr, opcode, len); | |
1399 | return; | |
1400 | } | |
1401 | ||
c3d6324f | 1402 | text_poke_loc_init(&tp, addr, opcode, len, emulate); |
c0213b0a DBO |
1403 | text_poke_bp_batch(&tp, 1); |
1404 | } |