]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/kernel/alternative.c
perf/x86: Add support for perf text poke event for text_poke_bp_batch() callers
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kernel / alternative.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
c767a54b
JP
2#define pr_fmt(fmt) "SMP alternatives: " fmt
3
9a0b5817 4#include <linux/module.h>
f6a57033 5#include <linux/sched.h>
d769811c 6#include <linux/perf_event.h>
2f1dafe5 7#include <linux/mutex.h>
9a0b5817 8#include <linux/list.h>
8b5a10fc 9#include <linux/stringify.h>
19d36ccd
AK
10#include <linux/mm.h>
11#include <linux/vmalloc.h>
3945dab4 12#include <linux/memory.h>
3d55cc8a 13#include <linux/stop_machine.h>
5a0e3ad6 14#include <linux/slab.h>
fd4363ff 15#include <linux/kdebug.h>
c13324a5 16#include <linux/kprobes.h>
b3fd8e83 17#include <linux/mmu_context.h>
c0213b0a 18#include <linux/bsearch.h>
35de5b06 19#include <asm/text-patching.h>
9a0b5817
GH
20#include <asm/alternative.h>
21#include <asm/sections.h>
8f4e956b
AK
22#include <asm/mce.h>
23#include <asm/nmi.h>
e587cadd 24#include <asm/cacheflush.h>
78ff7fae 25#include <asm/tlbflush.h>
3a125539 26#include <asm/insn.h>
e587cadd 27#include <asm/io.h>
78ff7fae 28#include <asm/fixmap.h>
9a0b5817 29
5e907bb0
IM
30int __read_mostly alternatives_patched;
31
32EXPORT_SYMBOL_GPL(alternatives_patched);
33
ab144f5e
AK
34#define MAX_PATCH_LEN (255-1)
35
8b5a10fc 36static int __initdata_or_module debug_alternative;
b7fb4af0 37
d167a518
GH
38static int __init debug_alt(char *str)
39{
40 debug_alternative = 1;
41 return 1;
42}
d167a518
GH
43__setup("debug-alternative", debug_alt);
44
09488165
JB
45static int noreplace_smp;
46
b7fb4af0
JF
47static int __init setup_noreplace_smp(char *str)
48{
49 noreplace_smp = 1;
50 return 1;
51}
52__setup("noreplace-smp", setup_noreplace_smp);
53
db477a33
BP
54#define DPRINTK(fmt, args...) \
55do { \
56 if (debug_alternative) \
57 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
c767a54b 58} while (0)
d167a518 59
48c7a250
BP
60#define DUMP_BYTES(buf, len, fmt, args...) \
61do { \
62 if (unlikely(debug_alternative)) { \
63 int j; \
64 \
65 if (!(len)) \
66 break; \
67 \
68 printk(KERN_DEBUG fmt, ##args); \
69 for (j = 0; j < (len) - 1; j++) \
70 printk(KERN_CONT "%02hhx ", buf[j]); \
71 printk(KERN_CONT "%02hhx\n", buf[j]); \
72 } \
73} while (0)
74
dc326fca
PA
75/*
76 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
77 * that correspond to that nop. Getting from one nop to the next, we
78 * add to the array the offset that is equal to the sum of all sizes of
79 * nops preceding the one we are after.
80 *
81 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
82 * nice symmetry of sizes of the previous nops.
83 */
8b5a10fc 84#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
dc326fca
PA
85static const unsigned char intelnops[] =
86{
87 GENERIC_NOP1,
88 GENERIC_NOP2,
89 GENERIC_NOP3,
90 GENERIC_NOP4,
91 GENERIC_NOP5,
92 GENERIC_NOP6,
93 GENERIC_NOP7,
94 GENERIC_NOP8,
95 GENERIC_NOP5_ATOMIC
96};
97static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
98{
9a0b5817
GH
99 NULL,
100 intelnops,
101 intelnops + 1,
102 intelnops + 1 + 2,
103 intelnops + 1 + 2 + 3,
104 intelnops + 1 + 2 + 3 + 4,
105 intelnops + 1 + 2 + 3 + 4 + 5,
106 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
107 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
dc326fca 108 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
9a0b5817 109};
d167a518
GH
110#endif
111
112#ifdef K8_NOP1
dc326fca
PA
113static const unsigned char k8nops[] =
114{
115 K8_NOP1,
116 K8_NOP2,
117 K8_NOP3,
118 K8_NOP4,
119 K8_NOP5,
120 K8_NOP6,
121 K8_NOP7,
122 K8_NOP8,
123 K8_NOP5_ATOMIC
124};
125static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
126{
9a0b5817
GH
127 NULL,
128 k8nops,
129 k8nops + 1,
130 k8nops + 1 + 2,
131 k8nops + 1 + 2 + 3,
132 k8nops + 1 + 2 + 3 + 4,
133 k8nops + 1 + 2 + 3 + 4 + 5,
134 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
135 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
dc326fca 136 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
9a0b5817 137};
d167a518
GH
138#endif
139
8b5a10fc 140#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
dc326fca
PA
141static const unsigned char k7nops[] =
142{
143 K7_NOP1,
144 K7_NOP2,
145 K7_NOP3,
146 K7_NOP4,
147 K7_NOP5,
148 K7_NOP6,
149 K7_NOP7,
150 K7_NOP8,
151 K7_NOP5_ATOMIC
152};
153static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
154{
9a0b5817
GH
155 NULL,
156 k7nops,
157 k7nops + 1,
158 k7nops + 1 + 2,
159 k7nops + 1 + 2 + 3,
160 k7nops + 1 + 2 + 3 + 4,
161 k7nops + 1 + 2 + 3 + 4 + 5,
162 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
163 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
dc326fca 164 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
9a0b5817 165};
d167a518
GH
166#endif
167
32c464f5 168#ifdef P6_NOP1
cb09cad4 169static const unsigned char p6nops[] =
dc326fca
PA
170{
171 P6_NOP1,
172 P6_NOP2,
173 P6_NOP3,
174 P6_NOP4,
175 P6_NOP5,
176 P6_NOP6,
177 P6_NOP7,
178 P6_NOP8,
179 P6_NOP5_ATOMIC
180};
181static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
182{
32c464f5
JB
183 NULL,
184 p6nops,
185 p6nops + 1,
186 p6nops + 1 + 2,
187 p6nops + 1 + 2 + 3,
188 p6nops + 1 + 2 + 3 + 4,
189 p6nops + 1 + 2 + 3 + 4 + 5,
190 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
191 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
dc326fca 192 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
32c464f5
JB
193};
194#endif
195
dc326fca 196/* Initialize these to a safe default */
d167a518 197#ifdef CONFIG_X86_64
dc326fca
PA
198const unsigned char * const *ideal_nops = p6_nops;
199#else
200const unsigned char * const *ideal_nops = intel_nops;
201#endif
d167a518 202
dc326fca 203void __init arch_init_ideal_nops(void)
d167a518 204{
dc326fca
PA
205 switch (boot_cpu_data.x86_vendor) {
206 case X86_VENDOR_INTEL:
d8d9766c
PA
207 /*
208 * Due to a decoder implementation quirk, some
209 * specific Intel CPUs actually perform better with
210 * the "k8_nops" than with the SDM-recommended NOPs.
211 */
212 if (boot_cpu_data.x86 == 6 &&
213 boot_cpu_data.x86_model >= 0x0f &&
214 boot_cpu_data.x86_model != 0x1c &&
215 boot_cpu_data.x86_model != 0x26 &&
216 boot_cpu_data.x86_model != 0x27 &&
217 boot_cpu_data.x86_model < 0x30) {
218 ideal_nops = k8_nops;
219 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
dc326fca
PA
220 ideal_nops = p6_nops;
221 } else {
222#ifdef CONFIG_X86_64
223 ideal_nops = k8_nops;
224#else
225 ideal_nops = intel_nops;
226#endif
227 }
d6250a3f 228 break;
f21262b8 229
c3fecca4
PW
230 case X86_VENDOR_HYGON:
231 ideal_nops = p6_nops;
232 return;
233
f21262b8
BP
234 case X86_VENDOR_AMD:
235 if (boot_cpu_data.x86 > 0xf) {
236 ideal_nops = p6_nops;
237 return;
238 }
239
240 /* fall through */
241
dc326fca
PA
242 default:
243#ifdef CONFIG_X86_64
244 ideal_nops = k8_nops;
245#else
246 if (boot_cpu_has(X86_FEATURE_K8))
247 ideal_nops = k8_nops;
248 else if (boot_cpu_has(X86_FEATURE_K7))
249 ideal_nops = k7_nops;
250 else
251 ideal_nops = intel_nops;
252#endif
253 }
9a0b5817
GH
254}
255
ab144f5e 256/* Use this to add nops to a buffer, then text_poke the whole buffer. */
8b5a10fc 257static void __init_or_module add_nops(void *insns, unsigned int len)
139ec7c4 258{
139ec7c4
RR
259 while (len > 0) {
260 unsigned int noplen = len;
261 if (noplen > ASM_NOP_MAX)
262 noplen = ASM_NOP_MAX;
dc326fca 263 memcpy(insns, ideal_nops[noplen], noplen);
139ec7c4
RR
264 insns += noplen;
265 len -= noplen;
266 }
267}
268
d167a518 269extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
5967ed87 270extern s32 __smp_locks[], __smp_locks_end[];
0a203df5 271void text_poke_early(void *addr, const void *opcode, size_t len);
d167a518 272
48c7a250
BP
273/*
274 * Are we looking at a near JMP with a 1 or 4-byte displacement.
275 */
276static inline bool is_jmp(const u8 opcode)
277{
278 return opcode == 0xeb || opcode == 0xe9;
279}
280
281static void __init_or_module
1fc654cf 282recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
48c7a250
BP
283{
284 u8 *next_rip, *tgt_rip;
285 s32 n_dspl, o_dspl;
286 int repl_len;
287
288 if (a->replacementlen != 5)
289 return;
290
1fc654cf 291 o_dspl = *(s32 *)(insn_buff + 1);
48c7a250
BP
292
293 /* next_rip of the replacement JMP */
294 next_rip = repl_insn + a->replacementlen;
295 /* target rip of the replacement JMP */
296 tgt_rip = next_rip + o_dspl;
297 n_dspl = tgt_rip - orig_insn;
298
0e6c16c6 299 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
48c7a250
BP
300
301 if (tgt_rip - orig_insn >= 0) {
302 if (n_dspl - 2 <= 127)
303 goto two_byte_jmp;
304 else
305 goto five_byte_jmp;
306 /* negative offset */
307 } else {
308 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
309 goto two_byte_jmp;
310 else
311 goto five_byte_jmp;
312 }
313
314two_byte_jmp:
315 n_dspl -= 2;
316
1fc654cf
IM
317 insn_buff[0] = 0xeb;
318 insn_buff[1] = (s8)n_dspl;
319 add_nops(insn_buff + 2, 3);
48c7a250
BP
320
321 repl_len = 2;
322 goto done;
323
324five_byte_jmp:
325 n_dspl -= 5;
326
1fc654cf
IM
327 insn_buff[0] = 0xe9;
328 *(s32 *)&insn_buff[1] = n_dspl;
48c7a250
BP
329
330 repl_len = 5;
331
332done:
333
334 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
335 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
336}
337
34bfab0e
BP
338/*
339 * "noinline" to cause control flow change and thus invalidate I$ and
340 * cause refetch after modification.
341 */
342static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
4fd4b6e5 343{
66c117d7 344 unsigned long flags;
612e8e93 345 int i;
66c117d7 346
612e8e93
BP
347 for (i = 0; i < a->padlen; i++) {
348 if (instr[i] != 0x90)
349 return;
350 }
69df353f 351
66c117d7 352 local_irq_save(flags);
4fd4b6e5 353 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
66c117d7 354 local_irq_restore(flags);
4fd4b6e5 355
0e6c16c6 356 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
4fd4b6e5
BP
357 instr, a->instrlen - a->padlen, a->padlen);
358}
359
db477a33
BP
360/*
361 * Replace instructions with better alternatives for this CPU type. This runs
362 * before SMP is initialized to avoid SMP problems with self modifying code.
363 * This implies that asymmetric systems where APs have less capabilities than
364 * the boot processor are not handled. Tough. Make sure you disable such
365 * features by hand.
34bfab0e
BP
366 *
367 * Marked "noinline" to cause control flow change and thus insn cache
368 * to refetch changed I$ lines.
db477a33 369 */
34bfab0e
BP
370void __init_or_module noinline apply_alternatives(struct alt_instr *start,
371 struct alt_instr *end)
9a0b5817 372{
9a0b5817 373 struct alt_instr *a;
59e97e4d 374 u8 *instr, *replacement;
1fc654cf 375 u8 insn_buff[MAX_PATCH_LEN];
9a0b5817 376
0e6c16c6 377 DPRINTK("alt table %px, -> %px", start, end);
50973133
FY
378 /*
379 * The scan order should be from start to end. A later scanned
db477a33 380 * alternative code can overwrite previously scanned alternative code.
50973133
FY
381 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
382 * patch code.
383 *
384 * So be careful if you want to change the scan order to any other
385 * order.
386 */
9a0b5817 387 for (a = start; a < end; a++) {
1fc654cf 388 int insn_buff_sz = 0;
48c7a250 389
59e97e4d
AL
390 instr = (u8 *)&a->instr_offset + a->instr_offset;
391 replacement = (u8 *)&a->repl_offset + a->repl_offset;
1fc654cf 392 BUG_ON(a->instrlen > sizeof(insn_buff));
65fc985b 393 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
4fd4b6e5
BP
394 if (!boot_cpu_has(a->cpuid)) {
395 if (a->padlen > 1)
396 optimize_nops(a, instr);
397
9a0b5817 398 continue;
4fd4b6e5 399 }
59e97e4d 400
c1d4e419 401 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
db477a33
BP
402 a->cpuid >> 5,
403 a->cpuid & 0x1f,
c1d4e419 404 instr, instr, a->instrlen,
dbe4058a 405 replacement, a->replacementlen, a->padlen);
db477a33 406
0e6c16c6
BP
407 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
408 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
48c7a250 409
1fc654cf
IM
410 memcpy(insn_buff, replacement, a->replacementlen);
411 insn_buff_sz = a->replacementlen;
59e97e4d 412
fc152d22
MJ
413 /*
414 * 0xe8 is a relative jump; fix the offset.
415 *
416 * Instruction length is checked before the opcode to avoid
417 * accessing uninitialized bytes for zero-length replacements.
418 */
1fc654cf
IM
419 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
420 *(s32 *)(insn_buff + 1) += replacement - instr;
48c7a250 421 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
1fc654cf
IM
422 *(s32 *)(insn_buff + 1),
423 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
db477a33 424 }
59e97e4d 425
48c7a250 426 if (a->replacementlen && is_jmp(replacement[0]))
1fc654cf 427 recompute_jump(a, instr, replacement, insn_buff);
48c7a250
BP
428
429 if (a->instrlen > a->replacementlen) {
1fc654cf 430 add_nops(insn_buff + a->replacementlen,
4332195c 431 a->instrlen - a->replacementlen);
1fc654cf 432 insn_buff_sz += a->instrlen - a->replacementlen;
48c7a250 433 }
1fc654cf 434 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
59e97e4d 435
1fc654cf 436 text_poke_early(instr, insn_buff, insn_buff_sz);
9a0b5817
GH
437 }
438}
439
8ec4d41f 440#ifdef CONFIG_SMP
5967ed87
JB
441static void alternatives_smp_lock(const s32 *start, const s32 *end,
442 u8 *text, u8 *text_end)
9a0b5817 443{
5967ed87 444 const s32 *poff;
9a0b5817 445
5967ed87
JB
446 for (poff = start; poff < end; poff++) {
447 u8 *ptr = (u8 *)poff + *poff;
448
449 if (!*poff || ptr < text || ptr >= text_end)
9a0b5817 450 continue;
f88f07e0 451 /* turn DS segment override prefix into lock prefix */
d9c5841e
PA
452 if (*ptr == 0x3e)
453 text_poke(ptr, ((unsigned char []){0xf0}), 1);
4b8073e4 454 }
9a0b5817
GH
455}
456
5967ed87
JB
457static void alternatives_smp_unlock(const s32 *start, const s32 *end,
458 u8 *text, u8 *text_end)
9a0b5817 459{
5967ed87 460 const s32 *poff;
9a0b5817 461
5967ed87
JB
462 for (poff = start; poff < end; poff++) {
463 u8 *ptr = (u8 *)poff + *poff;
464
465 if (!*poff || ptr < text || ptr >= text_end)
9a0b5817 466 continue;
f88f07e0 467 /* turn lock prefix into DS segment override prefix */
d9c5841e
PA
468 if (*ptr == 0xf0)
469 text_poke(ptr, ((unsigned char []){0x3E}), 1);
4b8073e4 470 }
9a0b5817
GH
471}
472
473struct smp_alt_module {
474 /* what is this ??? */
475 struct module *mod;
476 char *name;
477
478 /* ptrs to lock prefixes */
5967ed87
JB
479 const s32 *locks;
480 const s32 *locks_end;
9a0b5817
GH
481
482 /* .text segment, needed to avoid patching init code ;) */
483 u8 *text;
484 u8 *text_end;
485
486 struct list_head next;
487};
488static LIST_HEAD(smp_alt_modules);
e846d139 489static bool uniproc_patched = false; /* protected by text_mutex */
9a0b5817 490
8b5a10fc
JB
491void __init_or_module alternatives_smp_module_add(struct module *mod,
492 char *name,
493 void *locks, void *locks_end,
494 void *text, void *text_end)
9a0b5817
GH
495{
496 struct smp_alt_module *smp;
9a0b5817 497
e846d139 498 mutex_lock(&text_mutex);
816afe4f
RR
499 if (!uniproc_patched)
500 goto unlock;
b7fb4af0 501
816afe4f
RR
502 if (num_possible_cpus() == 1)
503 /* Don't bother remembering, we'll never have to undo it. */
504 goto smp_unlock;
9a0b5817
GH
505
506 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
507 if (NULL == smp)
816afe4f
RR
508 /* we'll run the (safe but slow) SMP code then ... */
509 goto unlock;
9a0b5817
GH
510
511 smp->mod = mod;
512 smp->name = name;
513 smp->locks = locks;
514 smp->locks_end = locks_end;
515 smp->text = text;
516 smp->text_end = text_end;
db477a33
BP
517 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
518 smp->locks, smp->locks_end,
9a0b5817
GH
519 smp->text, smp->text_end, smp->name);
520
9a0b5817 521 list_add_tail(&smp->next, &smp_alt_modules);
816afe4f
RR
522smp_unlock:
523 alternatives_smp_unlock(locks, locks_end, text, text_end);
524unlock:
e846d139 525 mutex_unlock(&text_mutex);
9a0b5817
GH
526}
527
8b5a10fc 528void __init_or_module alternatives_smp_module_del(struct module *mod)
9a0b5817
GH
529{
530 struct smp_alt_module *item;
9a0b5817 531
e846d139 532 mutex_lock(&text_mutex);
9a0b5817
GH
533 list_for_each_entry(item, &smp_alt_modules, next) {
534 if (mod != item->mod)
535 continue;
536 list_del(&item->next);
9a0b5817 537 kfree(item);
816afe4f 538 break;
9a0b5817 539 }
e846d139 540 mutex_unlock(&text_mutex);
9a0b5817
GH
541}
542
816afe4f 543void alternatives_enable_smp(void)
9a0b5817
GH
544{
545 struct smp_alt_module *mod;
9a0b5817 546
816afe4f
RR
547 /* Why bother if there are no other CPUs? */
548 BUG_ON(num_possible_cpus() == 1);
9a0b5817 549
e846d139 550 mutex_lock(&text_mutex);
ca74a6f8 551
816afe4f 552 if (uniproc_patched) {
c767a54b 553 pr_info("switching to SMP code\n");
816afe4f 554 BUG_ON(num_online_cpus() != 1);
53756d37
JF
555 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
556 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
9a0b5817
GH
557 list_for_each_entry(mod, &smp_alt_modules, next)
558 alternatives_smp_lock(mod->locks, mod->locks_end,
559 mod->text, mod->text_end);
816afe4f 560 uniproc_patched = false;
9a0b5817 561 }
e846d139 562 mutex_unlock(&text_mutex);
9a0b5817
GH
563}
564
e846d139
ZC
565/*
566 * Return 1 if the address range is reserved for SMP-alternatives.
567 * Must hold text_mutex.
568 */
2cfa1978
MH
569int alternatives_text_reserved(void *start, void *end)
570{
571 struct smp_alt_module *mod;
5967ed87 572 const s32 *poff;
076dc4a6
MH
573 u8 *text_start = start;
574 u8 *text_end = end;
2cfa1978 575
e846d139
ZC
576 lockdep_assert_held(&text_mutex);
577
2cfa1978 578 list_for_each_entry(mod, &smp_alt_modules, next) {
076dc4a6 579 if (mod->text > text_end || mod->text_end < text_start)
2cfa1978 580 continue;
5967ed87
JB
581 for (poff = mod->locks; poff < mod->locks_end; poff++) {
582 const u8 *ptr = (const u8 *)poff + *poff;
583
584 if (text_start <= ptr && text_end > ptr)
2cfa1978 585 return 1;
5967ed87 586 }
2cfa1978
MH
587 }
588
589 return 0;
590}
48c7a250 591#endif /* CONFIG_SMP */
8ec4d41f 592
139ec7c4 593#ifdef CONFIG_PARAVIRT
8b5a10fc
JB
594void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
595 struct paravirt_patch_site *end)
139ec7c4 596{
98de032b 597 struct paravirt_patch_site *p;
1fc654cf 598 char insn_buff[MAX_PATCH_LEN];
139ec7c4
RR
599
600 for (p = start; p < end; p++) {
601 unsigned int used;
602
ab144f5e 603 BUG_ON(p->len > MAX_PATCH_LEN);
d34fda4a 604 /* prep the buffer with the original instructions */
1fc654cf 605 memcpy(insn_buff, p->instr, p->len);
46938cc8 606 used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
7f63c41c 607
63f70270
JF
608 BUG_ON(used > p->len);
609
139ec7c4 610 /* Pad the rest with nops */
1fc654cf
IM
611 add_nops(insn_buff + used, p->len - used);
612 text_poke_early(p->instr, insn_buff, p->len);
139ec7c4 613 }
139ec7c4 614}
98de032b 615extern struct paravirt_patch_site __start_parainstructions[],
139ec7c4
RR
616 __stop_parainstructions[];
617#endif /* CONFIG_PARAVIRT */
618
7457c0da
PZ
619/*
620 * Self-test for the INT3 based CALL emulation code.
621 *
622 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
623 * properly and that there is a stack gap between the INT3 frame and the
624 * previous context. Without this gap doing a virtual PUSH on the interrupted
625 * stack would corrupt the INT3 IRET frame.
626 *
627 * See entry_{32,64}.S for more details.
628 */
ecc60610
PZ
629
630/*
631 * We define the int3_magic() function in assembly to control the calling
632 * convention such that we can 'call' it from assembly.
633 */
634
635extern void int3_magic(unsigned int *ptr); /* defined in asm */
636
637asm (
638" .pushsection .init.text, \"ax\", @progbits\n"
639" .type int3_magic, @function\n"
640"int3_magic:\n"
641" movl $1, (%" _ASM_ARG1 ")\n"
642" ret\n"
643" .size int3_magic, .-int3_magic\n"
644" .popsection\n"
645);
7457c0da
PZ
646
647extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
648
649static int __init
650int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
651{
652 struct die_args *args = data;
653 struct pt_regs *regs = args->regs;
654
655 if (!regs || user_mode(regs))
656 return NOTIFY_DONE;
657
658 if (val != DIE_INT3)
659 return NOTIFY_DONE;
660
661 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
662 return NOTIFY_DONE;
663
664 int3_emulate_call(regs, (unsigned long)&int3_magic);
665 return NOTIFY_STOP;
666}
667
668static void __init int3_selftest(void)
669{
670 static __initdata struct notifier_block int3_exception_nb = {
671 .notifier_call = int3_exception_notify,
672 .priority = INT_MAX-1, /* last */
673 };
674 unsigned int val = 0;
675
676 BUG_ON(register_die_notifier(&int3_exception_nb));
677
678 /*
679 * Basically: int3_magic(&val); but really complicated :-)
680 *
681 * Stick the address of the INT3 instruction into int3_selftest_ip,
682 * then trigger the INT3, padded with NOPs to match a CALL instruction
683 * length.
684 */
685 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
686 ".pushsection .init.data,\"aw\"\n\t"
687 ".align " __ASM_SEL(4, 8) "\n\t"
688 ".type int3_selftest_ip, @object\n\t"
689 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
690 "int3_selftest_ip:\n\t"
691 __ASM_SEL(.long, .quad) " 1b\n\t"
692 ".popsection\n\t"
ecc60610
PZ
693 : ASM_CALL_CONSTRAINT
694 : __ASM_SEL_RAW(a, D) (&val)
695 : "memory");
7457c0da
PZ
696
697 BUG_ON(val != 1);
698
699 unregister_die_notifier(&int3_exception_nb);
700}
701
9a0b5817
GH
702void __init alternative_instructions(void)
703{
7457c0da
PZ
704 int3_selftest();
705
706 /*
707 * The patching is not fully atomic, so try to avoid local
708 * interruptions that might execute the to be patched code.
709 * Other CPUs are not running.
710 */
8f4e956b 711 stop_nmi();
123aa76e
AK
712
713 /*
714 * Don't stop machine check exceptions while patching.
715 * MCEs only happen when something got corrupted and in this
716 * case we must do something about the corruption.
32b1cbe3 717 * Ignoring it is worse than an unlikely patching race.
123aa76e
AK
718 * Also machine checks tend to be broadcast and if one CPU
719 * goes into machine check the others follow quickly, so we don't
720 * expect a machine check to cause undue problems during to code
721 * patching.
722 */
8f4e956b 723
9a0b5817
GH
724 apply_alternatives(__alt_instructions, __alt_instructions_end);
725
8ec4d41f 726#ifdef CONFIG_SMP
816afe4f
RR
727 /* Patch to UP if other cpus not imminent. */
728 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
729 uniproc_patched = true;
9a0b5817
GH
730 alternatives_smp_module_add(NULL, "core kernel",
731 __smp_locks, __smp_locks_end,
732 _text, _etext);
9a0b5817 733 }
8f4e956b 734
7457c0da 735 if (!uniproc_patched || num_possible_cpus() == 1) {
f68fd5f4
FW
736 free_init_pages("SMP alternatives",
737 (unsigned long)__smp_locks,
738 (unsigned long)__smp_locks_end);
7457c0da 739 }
816afe4f
RR
740#endif
741
742 apply_paravirt(__parainstructions, __parainstructions_end);
f68fd5f4 743
8f4e956b 744 restart_nmi();
5e907bb0 745 alternatives_patched = 1;
9a0b5817 746}
19d36ccd 747
e587cadd
MD
748/**
749 * text_poke_early - Update instructions on a live kernel at boot time
750 * @addr: address to modify
751 * @opcode: source of the copy
752 * @len: length to copy
753 *
19d36ccd
AK
754 * When you use this code to patch more than one byte of an instruction
755 * you need to make sure that other CPUs cannot execute this code in parallel.
e587cadd 756 * Also no thread must be currently preempted in the middle of these
32b1cbe3
MA
757 * instructions. And on the local CPU you need to be protected against NMI or
758 * MCE handlers seeing an inconsistent instruction while you patch.
19d36ccd 759 */
0a203df5
NA
760void __init_or_module text_poke_early(void *addr, const void *opcode,
761 size_t len)
19d36ccd 762{
e587cadd 763 unsigned long flags;
f2c65fb3
NA
764
765 if (boot_cpu_has(X86_FEATURE_NX) &&
766 is_module_text_address((unsigned long)addr)) {
767 /*
768 * Modules text is marked initially as non-executable, so the
769 * code cannot be running and speculative code-fetches are
770 * prevented. Just change the code.
771 */
772 memcpy(addr, opcode, len);
773 } else {
774 local_irq_save(flags);
775 memcpy(addr, opcode, len);
776 local_irq_restore(flags);
777 sync_core();
778
779 /*
780 * Could also do a CLFLUSH here to speed up CPU recovery; but
781 * that causes hangs on some VIA CPUs.
782 */
783 }
e587cadd
MD
784}
785
9020d395
TG
786typedef struct {
787 struct mm_struct *mm;
788} temp_mm_state_t;
789
790/*
791 * Using a temporary mm allows to set temporary mappings that are not accessible
792 * by other CPUs. Such mappings are needed to perform sensitive memory writes
793 * that override the kernel memory protections (e.g., W^X), without exposing the
794 * temporary page-table mappings that are required for these write operations to
795 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
796 * mapping is torn down.
797 *
798 * Context: The temporary mm needs to be used exclusively by a single core. To
799 * harden security IRQs must be disabled while the temporary mm is
800 * loaded, thereby preventing interrupt handler bugs from overriding
801 * the kernel memory protection.
802 */
803static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
804{
805 temp_mm_state_t temp_state;
806
807 lockdep_assert_irqs_disabled();
808 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
809 switch_mm_irqs_off(NULL, mm, current);
810
811 /*
812 * If breakpoints are enabled, disable them while the temporary mm is
813 * used. Userspace might set up watchpoints on addresses that are used
814 * in the temporary mm, which would lead to wrong signals being sent or
815 * crashes.
816 *
817 * Note that breakpoints are not disabled selectively, which also causes
818 * kernel breakpoints (e.g., perf's) to be disabled. This might be
819 * undesirable, but still seems reasonable as the code that runs in the
820 * temporary mm should be short.
821 */
822 if (hw_breakpoint_active())
823 hw_breakpoint_disable();
824
825 return temp_state;
826}
827
828static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
829{
830 lockdep_assert_irqs_disabled();
831 switch_mm_irqs_off(NULL, prev_state.mm, current);
832
833 /*
834 * Restore the breakpoints if they were disabled before the temporary mm
835 * was loaded.
836 */
837 if (hw_breakpoint_active())
838 hw_breakpoint_restore();
839}
840
4fc19708
NA
841__ro_after_init struct mm_struct *poking_mm;
842__ro_after_init unsigned long poking_addr;
843
e836673c 844static void *__text_poke(void *addr, const void *opcode, size_t len)
e587cadd 845{
b3fd8e83
NA
846 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
847 struct page *pages[2] = {NULL};
848 temp_mm_state_t prev;
78ff7fae 849 unsigned long flags;
b3fd8e83
NA
850 pte_t pte, *ptep;
851 spinlock_t *ptl;
852 pgprot_t pgprot;
e587cadd 853
6fffacb3 854 /*
b3fd8e83
NA
855 * While boot memory allocator is running we cannot use struct pages as
856 * they are not yet initialized. There is no way to recover.
6fffacb3
PT
857 */
858 BUG_ON(!after_bootmem);
859
b7b66baa
MD
860 if (!core_kernel_text((unsigned long)addr)) {
861 pages[0] = vmalloc_to_page(addr);
b3fd8e83
NA
862 if (cross_page_boundary)
863 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
15a601eb 864 } else {
b7b66baa 865 pages[0] = virt_to_page(addr);
00c6b2d5 866 WARN_ON(!PageReserved(pages[0]));
b3fd8e83
NA
867 if (cross_page_boundary)
868 pages[1] = virt_to_page(addr + PAGE_SIZE);
e587cadd 869 }
b3fd8e83
NA
870 /*
871 * If something went wrong, crash and burn since recovery paths are not
872 * implemented.
873 */
874 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
875
7cf49427 876 local_irq_save(flags);
b3fd8e83
NA
877
878 /*
879 * Map the page without the global bit, as TLB flushing is done with
880 * flush_tlb_mm_range(), which is intended for non-global PTEs.
881 */
882 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
883
884 /*
885 * The lock is not really needed, but this allows to avoid open-coding.
886 */
887 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
888
889 /*
890 * This must not fail; preallocated in poking_init().
891 */
892 VM_BUG_ON(!ptep);
893
894 pte = mk_pte(pages[0], pgprot);
895 set_pte_at(poking_mm, poking_addr, ptep, pte);
896
897 if (cross_page_boundary) {
898 pte = mk_pte(pages[1], pgprot);
899 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
900 }
901
902 /*
903 * Loading the temporary mm behaves as a compiler barrier, which
904 * guarantees that the PTE will be set at the time memcpy() is done.
905 */
906 prev = use_temporary_mm(poking_mm);
907
908 kasan_disable_current();
909 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
910 kasan_enable_current();
911
912 /*
913 * Ensure that the PTE is only cleared after the instructions of memcpy
914 * were issued by using a compiler barrier.
915 */
916 barrier();
917
918 pte_clear(poking_mm, poking_addr, ptep);
919 if (cross_page_boundary)
920 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
921
922 /*
923 * Loading the previous page-table hierarchy requires a serializing
924 * instruction that already allows the core to see the updated version.
925 * Xen-PV is assumed to serialize execution in a similar manner.
926 */
927 unuse_temporary_mm(prev);
928
929 /*
930 * Flushing the TLB might involve IPIs, which would require enabled
931 * IRQs, but not if the mm is not used, as it is in this point.
932 */
933 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
934 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
935 PAGE_SHIFT, false);
936
937 /*
938 * If the text does not match what we just wrote then something is
939 * fundamentally screwy; there's nothing we can really do about that.
940 */
941 BUG_ON(memcmp(addr, opcode, len));
942
943 pte_unmap_unlock(ptep, ptl);
7cf49427 944 local_irq_restore(flags);
e587cadd 945 return addr;
19d36ccd 946}
3d55cc8a 947
e836673c
NA
948/**
949 * text_poke - Update instructions on a live kernel
950 * @addr: address to modify
951 * @opcode: source of the copy
952 * @len: length to copy
953 *
954 * Only atomic text poke/set should be allowed when not doing early patching.
955 * It means the size must be writable atomically and the address must be aligned
956 * in a way that permits an atomic write. It also makes sure we fit on a single
957 * page.
3950746d
NA
958 *
959 * Note that the caller must ensure that if the modified code is part of a
960 * module, the module would not be removed during poking. This can be achieved
961 * by registering a module notifier, and ordering module removal and patching
962 * trough a mutex.
e836673c
NA
963 */
964void *text_poke(void *addr, const void *opcode, size_t len)
965{
966 lockdep_assert_held(&text_mutex);
967
968 return __text_poke(addr, opcode, len);
969}
970
971/**
972 * text_poke_kgdb - Update instructions on a live kernel by kgdb
973 * @addr: address to modify
974 * @opcode: source of the copy
975 * @len: length to copy
976 *
977 * Only atomic text poke/set should be allowed when not doing early patching.
978 * It means the size must be writable atomically and the address must be aligned
979 * in a way that permits an atomic write. It also makes sure we fit on a single
980 * page.
981 *
982 * Context: should only be used by kgdb, which ensures no other core is running,
983 * despite the fact it does not hold the text_mutex.
984 */
985void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
986{
987 return __text_poke(addr, opcode, len);
988}
989
fd4363ff
JK
990static void do_sync_core(void *info)
991{
992 sync_core();
993}
994
5c02ece8
PZ
995void text_poke_sync(void)
996{
997 on_each_cpu(do_sync_core, NULL, 1);
998}
999
18cbc8be 1000struct text_poke_loc {
4531ef6a 1001 s32 rel_addr; /* addr := _stext + rel_addr */
18cbc8be
PZ
1002 s32 rel32;
1003 u8 opcode;
1004 const u8 text[POKE_MAX_OPCODE_SIZE];
d769811c 1005 u8 old;
18cbc8be
PZ
1006};
1007
1f676247 1008struct bp_patching_desc {
c0213b0a
DBO
1009 struct text_poke_loc *vec;
1010 int nr_entries;
1f676247
PZ
1011 atomic_t refs;
1012};
1013
1014static struct bp_patching_desc *bp_desc;
1015
4979fb53
TG
1016static __always_inline
1017struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
1f676247 1018{
ef882bfe 1019 struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
1f676247 1020
ef882bfe 1021 if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
1f676247
PZ
1022 return NULL;
1023
1024 return desc;
1025}
1026
4979fb53 1027static __always_inline void put_desc(struct bp_patching_desc *desc)
1f676247
PZ
1028{
1029 smp_mb__before_atomic();
ef882bfe 1030 arch_atomic_dec(&desc->refs);
1f676247 1031}
c0213b0a 1032
4979fb53 1033static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
4531ef6a
PZ
1034{
1035 return _stext + tp->rel_addr;
1036}
1037
f64366ef 1038static __always_inline int patch_cmp(const void *key, const void *elt)
c0213b0a
DBO
1039{
1040 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1041
4531ef6a 1042 if (key < text_poke_addr(tp))
c0213b0a 1043 return -1;
4531ef6a 1044 if (key > text_poke_addr(tp))
c0213b0a
DBO
1045 return 1;
1046 return 0;
1047}
fd4363ff 1048
4979fb53 1049int noinstr poke_int3_handler(struct pt_regs *regs)
fd4363ff 1050{
1f676247 1051 struct bp_patching_desc *desc;
c0213b0a 1052 struct text_poke_loc *tp;
1f676247 1053 int len, ret = 0;
c0213b0a 1054 void *ip;
1f676247
PZ
1055
1056 if (user_mode(regs))
1057 return 0;
c0213b0a 1058
01651324
PZ
1059 /*
1060 * Having observed our INT3 instruction, we now must observe
1f676247 1061 * bp_desc:
01651324 1062 *
1f676247 1063 * bp_desc = desc INT3
c3d6324f 1064 * WMB RMB
1f676247 1065 * write INT3 if (desc)
01651324 1066 */
fd4363ff
JK
1067 smp_rmb();
1068
1f676247
PZ
1069 desc = try_get_desc(&bp_desc);
1070 if (!desc)
17f41571 1071 return 0;
fd4363ff 1072
c0213b0a 1073 /*
c3d6324f 1074 * Discount the INT3. See text_poke_bp_batch().
c0213b0a 1075 */
c3d6324f 1076 ip = (void *) regs->ip - INT3_INSN_SIZE;
c0213b0a
DBO
1077
1078 /*
1079 * Skip the binary search if there is a single member in the vector.
1080 */
1f676247 1081 if (unlikely(desc->nr_entries > 1)) {
f64366ef
PZ
1082 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1083 sizeof(struct text_poke_loc),
1084 patch_cmp);
c0213b0a 1085 if (!tp)
1f676247 1086 goto out_put;
c0213b0a 1087 } else {
1f676247 1088 tp = desc->vec;
4531ef6a 1089 if (text_poke_addr(tp) != ip)
1f676247 1090 goto out_put;
c0213b0a
DBO
1091 }
1092
97e6c977
PZ
1093 len = text_opcode_size(tp->opcode);
1094 ip += len;
c3d6324f
PZ
1095
1096 switch (tp->opcode) {
1097 case INT3_INSN_OPCODE:
1098 /*
1099 * Someone poked an explicit INT3, they'll want to handle it,
1100 * do not consume.
1101 */
1f676247 1102 goto out_put;
c3d6324f
PZ
1103
1104 case CALL_INSN_OPCODE:
1105 int3_emulate_call(regs, (long)ip + tp->rel32);
1106 break;
1107
1108 case JMP32_INSN_OPCODE:
1109 case JMP8_INSN_OPCODE:
1110 int3_emulate_jmp(regs, (long)ip + tp->rel32);
1111 break;
1112
1113 default:
1114 BUG();
1115 }
17f41571 1116
1f676247
PZ
1117 ret = 1;
1118
1119out_put:
1120 put_desc(desc);
1121 return ret;
fd4363ff 1122}
17f41571 1123
18cbc8be
PZ
1124#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1125static struct text_poke_loc tp_vec[TP_VEC_MAX];
1126static int tp_vec_nr;
1127
fd4363ff 1128/**
c0213b0a
DBO
1129 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1130 * @tp: vector of instructions to patch
1131 * @nr_entries: number of entries in the vector
fd4363ff
JK
1132 *
1133 * Modify multi-byte instruction by using int3 breakpoint on SMP.
ea8596bb
MH
1134 * We completely avoid stop_machine() here, and achieve the
1135 * synchronization using int3 breakpoint.
fd4363ff
JK
1136 *
1137 * The way it is done:
c3d6324f 1138 * - For each entry in the vector:
c0213b0a 1139 * - add a int3 trap to the address that will be patched
fd4363ff 1140 * - sync cores
c0213b0a
DBO
1141 * - For each entry in the vector:
1142 * - update all but the first byte of the patched range
fd4363ff 1143 * - sync cores
c0213b0a
DBO
1144 * - For each entry in the vector:
1145 * - replace the first byte (int3) by the first byte of
1146 * replacing opcode
fd4363ff 1147 * - sync cores
fd4363ff 1148 */
18cbc8be 1149static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
fd4363ff 1150{
1f676247
PZ
1151 struct bp_patching_desc desc = {
1152 .vec = tp,
1153 .nr_entries = nr_entries,
1154 .refs = ATOMIC_INIT(1),
1155 };
c3d6324f 1156 unsigned char int3 = INT3_INSN_OPCODE;
c0213b0a 1157 unsigned int i;
c3d6324f 1158 int do_sync;
9222f606
JK
1159
1160 lockdep_assert_held(&text_mutex);
1161
1f676247 1162 smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
c0213b0a 1163
fd4363ff 1164 /*
01651324 1165 * Corresponding read barrier in int3 notifier for making sure the
c0213b0a 1166 * nr_entries and handler are correctly ordered wrt. patching.
fd4363ff
JK
1167 */
1168 smp_wmb();
1169
c0213b0a
DBO
1170 /*
1171 * First step: add a int3 trap to the address that will be patched.
1172 */
d769811c
AH
1173 for (i = 0; i < nr_entries; i++) {
1174 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
76ffa720 1175 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
d769811c 1176 }
fd4363ff 1177
5c02ece8 1178 text_poke_sync();
fd4363ff 1179
c0213b0a
DBO
1180 /*
1181 * Second step: update all but the first byte of the patched range.
1182 */
c3d6324f 1183 for (do_sync = 0, i = 0; i < nr_entries; i++) {
d769811c 1184 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
97e6c977
PZ
1185 int len = text_opcode_size(tp[i].opcode);
1186
76ffa720 1187 if (len - INT3_INSN_SIZE > 0) {
d769811c
AH
1188 memcpy(old + INT3_INSN_SIZE,
1189 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1190 len - INT3_INSN_SIZE);
76ffa720
PZ
1191 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1192 (const char *)tp[i].text + INT3_INSN_SIZE,
1193 len - INT3_INSN_SIZE);
c3d6324f 1194 do_sync++;
c0213b0a 1195 }
d769811c
AH
1196
1197 /*
1198 * Emit a perf event to record the text poke, primarily to
1199 * support Intel PT decoding which must walk the executable code
1200 * to reconstruct the trace. The flow up to here is:
1201 * - write INT3 byte
1202 * - IPI-SYNC
1203 * - write instruction tail
1204 * At this point the actual control flow will be through the
1205 * INT3 and handler and not hit the old or new instruction.
1206 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1207 * can still be decoded. Subsequently:
1208 * - emit RECORD_TEXT_POKE with the new instruction
1209 * - IPI-SYNC
1210 * - write first byte
1211 * - IPI-SYNC
1212 * So before the text poke event timestamp, the decoder will see
1213 * either the old instruction flow or FUP/TIP of INT3. After the
1214 * text poke event timestamp, the decoder will see either the
1215 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1216 * use the timestamp as the point at which to modify the
1217 * executable code.
1218 * The old instruction is recorded so that the event can be
1219 * processed forwards or backwards.
1220 */
1221 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1222 tp[i].text, len);
c0213b0a
DBO
1223 }
1224
c3d6324f 1225 if (do_sync) {
fd4363ff
JK
1226 /*
1227 * According to Intel, this core syncing is very likely
1228 * not necessary and we'd be safe even without it. But
1229 * better safe than sorry (plus there's not only Intel).
1230 */
5c02ece8 1231 text_poke_sync();
fd4363ff
JK
1232 }
1233
c0213b0a
DBO
1234 /*
1235 * Third step: replace the first byte (int3) by the first byte of
1236 * replacing opcode.
1237 */
c3d6324f
PZ
1238 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1239 if (tp[i].text[0] == INT3_INSN_OPCODE)
1240 continue;
1241
76ffa720 1242 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
c3d6324f
PZ
1243 do_sync++;
1244 }
1245
1246 if (do_sync)
5c02ece8 1247 text_poke_sync();
fd4363ff 1248
01651324 1249 /*
1f676247
PZ
1250 * Remove and synchronize_rcu(), except we have a very primitive
1251 * refcount based completion.
01651324 1252 */
1f676247
PZ
1253 WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1254 if (!atomic_dec_and_test(&desc.refs))
1255 atomic_cond_read_acquire(&desc.refs, !VAL);
fd4363ff
JK
1256}
1257
244febbe
QH
1258static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1259 const void *opcode, size_t len, const void *emulate)
c3d6324f
PZ
1260{
1261 struct insn insn;
1262
18cbc8be 1263 memcpy((void *)tp->text, opcode, len);
c3d6324f
PZ
1264 if (!emulate)
1265 emulate = opcode;
1266
1267 kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
1268 insn_get_length(&insn);
1269
1270 BUG_ON(!insn_complete(&insn));
1271 BUG_ON(len != insn.length);
1272
4531ef6a 1273 tp->rel_addr = addr - (void *)_stext;
c3d6324f
PZ
1274 tp->opcode = insn.opcode.bytes[0];
1275
1276 switch (tp->opcode) {
1277 case INT3_INSN_OPCODE:
1278 break;
1279
1280 case CALL_INSN_OPCODE:
1281 case JMP32_INSN_OPCODE:
1282 case JMP8_INSN_OPCODE:
1283 tp->rel32 = insn.immediate.value;
1284 break;
1285
1286 default: /* assume NOP */
1287 switch (len) {
1288 case 2: /* NOP2 -- emulate as JMP8+0 */
1289 BUG_ON(memcmp(emulate, ideal_nops[len], len));
1290 tp->opcode = JMP8_INSN_OPCODE;
1291 tp->rel32 = 0;
1292 break;
1293
1294 case 5: /* NOP5 -- emulate as JMP32+0 */
1295 BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1296 tp->opcode = JMP32_INSN_OPCODE;
1297 tp->rel32 = 0;
1298 break;
1299
1300 default: /* unknown instruction */
1301 BUG();
1302 }
1303 break;
1304 }
1305}
1306
18cbc8be
PZ
1307/*
1308 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1309 * early if needed.
1310 */
1311static bool tp_order_fail(void *addr)
1312{
1313 struct text_poke_loc *tp;
1314
1315 if (!tp_vec_nr)
1316 return false;
1317
1318 if (!addr) /* force */
1319 return true;
1320
1321 tp = &tp_vec[tp_vec_nr - 1];
4531ef6a 1322 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
18cbc8be
PZ
1323 return true;
1324
1325 return false;
1326}
1327
1328static void text_poke_flush(void *addr)
1329{
1330 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1331 text_poke_bp_batch(tp_vec, tp_vec_nr);
1332 tp_vec_nr = 0;
1333 }
1334}
1335
1336void text_poke_finish(void)
1337{
1338 text_poke_flush(NULL);
1339}
1340
768ae440 1341void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
18cbc8be
PZ
1342{
1343 struct text_poke_loc *tp;
1344
768ae440
PZ
1345 if (unlikely(system_state == SYSTEM_BOOTING)) {
1346 text_poke_early(addr, opcode, len);
1347 return;
1348 }
1349
18cbc8be
PZ
1350 text_poke_flush(addr);
1351
1352 tp = &tp_vec[tp_vec_nr++];
1353 text_poke_loc_init(tp, addr, opcode, len, emulate);
1354}
1355
c0213b0a
DBO
1356/**
1357 * text_poke_bp() -- update instructions on live kernel on SMP
1358 * @addr: address to patch
1359 * @opcode: opcode of new instruction
1360 * @len: length to copy
1361 * @handler: address to jump to when the temporary breakpoint is hit
1362 *
1363 * Update a single instruction with the vector in the stack, avoiding
1364 * dynamically allocated memory. This function should be used when it is
1365 * not possible to allocate memory.
1366 */
768ae440 1367void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
c0213b0a 1368{
c3d6324f 1369 struct text_poke_loc tp;
c0213b0a 1370
768ae440
PZ
1371 if (unlikely(system_state == SYSTEM_BOOTING)) {
1372 text_poke_early(addr, opcode, len);
1373 return;
1374 }
1375
c3d6324f 1376 text_poke_loc_init(&tp, addr, opcode, len, emulate);
c0213b0a
DBO
1377 text_poke_bp_batch(&tp, 1);
1378}