1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/mutex.h>
7 #include <linux/list.h>
8 #include <linux/stringify.h>
10 #include <linux/vmalloc.h>
11 #include <linux/memory.h>
12 #include <linux/stop_machine.h>
13 #include <linux/slab.h>
14 #include <linux/kdebug.h>
15 #include <linux/kprobes.h>
16 #include <linux/mmu_context.h>
17 #include <linux/bsearch.h>
18 #include <asm/text-patching.h>
19 #include <asm/alternative.h>
20 #include <asm/sections.h>
21 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
27 #include <asm/fixmap.h>
29 int __read_mostly alternatives_patched
;
31 EXPORT_SYMBOL_GPL(alternatives_patched
);
33 #define MAX_PATCH_LEN (255-1)
35 static int __initdata_or_module debug_alternative
;
37 static int __init
debug_alt(char *str
)
39 debug_alternative
= 1;
42 __setup("debug-alternative", debug_alt
);
44 static int noreplace_smp
;
46 static int __init
setup_noreplace_smp(char *str
)
51 __setup("noreplace-smp", setup_noreplace_smp
);
53 #define DPRINTK(fmt, args...) \
55 if (debug_alternative) \
56 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
59 #define DUMP_BYTES(buf, len, fmt, args...) \
61 if (unlikely(debug_alternative)) { \
67 printk(KERN_DEBUG fmt, ##args); \
68 for (j = 0; j < (len) - 1; j++) \
69 printk(KERN_CONT "%02hhx ", buf[j]); \
70 printk(KERN_CONT "%02hhx\n", buf[j]); \
75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
76 * that correspond to that nop. Getting from one nop to the next, we
77 * add to the array the offset that is equal to the sum of all sizes of
78 * nops preceding the one we are after.
80 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
81 * nice symmetry of sizes of the previous nops.
83 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
84 static const unsigned char intelnops
[] =
96 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
102 intelnops
+ 1 + 2 + 3,
103 intelnops
+ 1 + 2 + 3 + 4,
104 intelnops
+ 1 + 2 + 3 + 4 + 5,
105 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
106 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
107 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
112 static const unsigned char k8nops
[] =
124 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
131 k8nops
+ 1 + 2 + 3 + 4,
132 k8nops
+ 1 + 2 + 3 + 4 + 5,
133 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
134 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
135 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
139 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
140 static const unsigned char k7nops
[] =
152 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
159 k7nops
+ 1 + 2 + 3 + 4,
160 k7nops
+ 1 + 2 + 3 + 4 + 5,
161 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
162 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
163 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
168 static const unsigned char p6nops
[] =
180 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
187 p6nops
+ 1 + 2 + 3 + 4,
188 p6nops
+ 1 + 2 + 3 + 4 + 5,
189 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
190 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
191 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
195 /* Initialize these to a safe default */
197 const unsigned char * const *ideal_nops
= p6_nops
;
199 const unsigned char * const *ideal_nops
= intel_nops
;
202 void __init
arch_init_ideal_nops(void)
204 switch (boot_cpu_data
.x86_vendor
) {
205 case X86_VENDOR_INTEL
:
207 * Due to a decoder implementation quirk, some
208 * specific Intel CPUs actually perform better with
209 * the "k8_nops" than with the SDM-recommended NOPs.
211 if (boot_cpu_data
.x86
== 6 &&
212 boot_cpu_data
.x86_model
>= 0x0f &&
213 boot_cpu_data
.x86_model
!= 0x1c &&
214 boot_cpu_data
.x86_model
!= 0x26 &&
215 boot_cpu_data
.x86_model
!= 0x27 &&
216 boot_cpu_data
.x86_model
< 0x30) {
217 ideal_nops
= k8_nops
;
218 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
219 ideal_nops
= p6_nops
;
222 ideal_nops
= k8_nops
;
224 ideal_nops
= intel_nops
;
229 case X86_VENDOR_HYGON
:
230 ideal_nops
= p6_nops
;
234 if (boot_cpu_data
.x86
> 0xf) {
235 ideal_nops
= p6_nops
;
243 ideal_nops
= k8_nops
;
245 if (boot_cpu_has(X86_FEATURE_K8
))
246 ideal_nops
= k8_nops
;
247 else if (boot_cpu_has(X86_FEATURE_K7
))
248 ideal_nops
= k7_nops
;
250 ideal_nops
= intel_nops
;
255 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
256 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
259 unsigned int noplen
= len
;
260 if (noplen
> ASM_NOP_MAX
)
261 noplen
= ASM_NOP_MAX
;
262 memcpy(insns
, ideal_nops
[noplen
], noplen
);
268 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
269 extern s32 __smp_locks
[], __smp_locks_end
[];
270 void text_poke_early(void *addr
, const void *opcode
, size_t len
);
273 * Are we looking at a near JMP with a 1 or 4-byte displacement.
275 static inline bool is_jmp(const u8 opcode
)
277 return opcode
== 0xeb || opcode
== 0xe9;
280 static void __init_or_module
281 recompute_jump(struct alt_instr
*a
, u8
*orig_insn
, u8
*repl_insn
, u8
*insnbuf
)
283 u8
*next_rip
, *tgt_rip
;
287 if (a
->replacementlen
!= 5)
290 o_dspl
= *(s32
*)(insnbuf
+ 1);
292 /* next_rip of the replacement JMP */
293 next_rip
= repl_insn
+ a
->replacementlen
;
294 /* target rip of the replacement JMP */
295 tgt_rip
= next_rip
+ o_dspl
;
296 n_dspl
= tgt_rip
- orig_insn
;
298 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip
, n_dspl
);
300 if (tgt_rip
- orig_insn
>= 0) {
301 if (n_dspl
- 2 <= 127)
305 /* negative offset */
307 if (((n_dspl
- 2) & 0xff) == (n_dspl
- 2))
317 insnbuf
[1] = (s8
)n_dspl
;
318 add_nops(insnbuf
+ 2, 3);
327 *(s32
*)&insnbuf
[1] = n_dspl
;
333 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
334 n_dspl
, (unsigned long)orig_insn
+ n_dspl
+ repl_len
);
338 * "noinline" to cause control flow change and thus invalidate I$ and
339 * cause refetch after modification.
341 static void __init_or_module noinline
optimize_nops(struct alt_instr
*a
, u8
*instr
)
346 for (i
= 0; i
< a
->padlen
; i
++) {
347 if (instr
[i
] != 0x90)
351 local_irq_save(flags
);
352 add_nops(instr
+ (a
->instrlen
- a
->padlen
), a
->padlen
);
353 local_irq_restore(flags
);
355 DUMP_BYTES(instr
, a
->instrlen
, "%px: [%d:%d) optimized NOPs: ",
356 instr
, a
->instrlen
- a
->padlen
, a
->padlen
);
360 * Replace instructions with better alternatives for this CPU type. This runs
361 * before SMP is initialized to avoid SMP problems with self modifying code.
362 * This implies that asymmetric systems where APs have less capabilities than
363 * the boot processor are not handled. Tough. Make sure you disable such
366 * Marked "noinline" to cause control flow change and thus insn cache
367 * to refetch changed I$ lines.
369 void __init_or_module noinline
apply_alternatives(struct alt_instr
*start
,
370 struct alt_instr
*end
)
373 u8
*instr
, *replacement
;
374 u8 insnbuf
[MAX_PATCH_LEN
];
376 DPRINTK("alt table %px, -> %px", start
, end
);
378 * The scan order should be from start to end. A later scanned
379 * alternative code can overwrite previously scanned alternative code.
380 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
383 * So be careful if you want to change the scan order to any other
386 for (a
= start
; a
< end
; a
++) {
389 instr
= (u8
*)&a
->instr_offset
+ a
->instr_offset
;
390 replacement
= (u8
*)&a
->repl_offset
+ a
->repl_offset
;
391 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
392 BUG_ON(a
->cpuid
>= (NCAPINTS
+ NBUGINTS
) * 32);
393 if (!boot_cpu_has(a
->cpuid
)) {
395 optimize_nops(a
, instr
);
400 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
403 instr
, instr
, a
->instrlen
,
404 replacement
, a
->replacementlen
, a
->padlen
);
406 DUMP_BYTES(instr
, a
->instrlen
, "%px: old_insn: ", instr
);
407 DUMP_BYTES(replacement
, a
->replacementlen
, "%px: rpl_insn: ", replacement
);
409 memcpy(insnbuf
, replacement
, a
->replacementlen
);
410 insnbuf_sz
= a
->replacementlen
;
413 * 0xe8 is a relative jump; fix the offset.
415 * Instruction length is checked before the opcode to avoid
416 * accessing uninitialized bytes for zero-length replacements.
418 if (a
->replacementlen
== 5 && *insnbuf
== 0xe8) {
419 *(s32
*)(insnbuf
+ 1) += replacement
- instr
;
420 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
421 *(s32
*)(insnbuf
+ 1),
422 (unsigned long)instr
+ *(s32
*)(insnbuf
+ 1) + 5);
425 if (a
->replacementlen
&& is_jmp(replacement
[0]))
426 recompute_jump(a
, instr
, replacement
, insnbuf
);
428 if (a
->instrlen
> a
->replacementlen
) {
429 add_nops(insnbuf
+ a
->replacementlen
,
430 a
->instrlen
- a
->replacementlen
);
431 insnbuf_sz
+= a
->instrlen
- a
->replacementlen
;
433 DUMP_BYTES(insnbuf
, insnbuf_sz
, "%px: final_insn: ", instr
);
435 text_poke_early(instr
, insnbuf
, insnbuf_sz
);
440 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
441 u8
*text
, u8
*text_end
)
445 for (poff
= start
; poff
< end
; poff
++) {
446 u8
*ptr
= (u8
*)poff
+ *poff
;
448 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
450 /* turn DS segment override prefix into lock prefix */
452 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
456 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
457 u8
*text
, u8
*text_end
)
461 for (poff
= start
; poff
< end
; poff
++) {
462 u8
*ptr
= (u8
*)poff
+ *poff
;
464 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
466 /* turn lock prefix into DS segment override prefix */
468 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
472 struct smp_alt_module
{
473 /* what is this ??? */
477 /* ptrs to lock prefixes */
479 const s32
*locks_end
;
481 /* .text segment, needed to avoid patching init code ;) */
485 struct list_head next
;
487 static LIST_HEAD(smp_alt_modules
);
488 static bool uniproc_patched
= false; /* protected by text_mutex */
490 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
492 void *locks
, void *locks_end
,
493 void *text
, void *text_end
)
495 struct smp_alt_module
*smp
;
497 mutex_lock(&text_mutex
);
498 if (!uniproc_patched
)
501 if (num_possible_cpus() == 1)
502 /* Don't bother remembering, we'll never have to undo it. */
505 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
507 /* we'll run the (safe but slow) SMP code then ... */
513 smp
->locks_end
= locks_end
;
515 smp
->text_end
= text_end
;
516 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
517 smp
->locks
, smp
->locks_end
,
518 smp
->text
, smp
->text_end
, smp
->name
);
520 list_add_tail(&smp
->next
, &smp_alt_modules
);
522 alternatives_smp_unlock(locks
, locks_end
, text
, text_end
);
524 mutex_unlock(&text_mutex
);
527 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
529 struct smp_alt_module
*item
;
531 mutex_lock(&text_mutex
);
532 list_for_each_entry(item
, &smp_alt_modules
, next
) {
533 if (mod
!= item
->mod
)
535 list_del(&item
->next
);
539 mutex_unlock(&text_mutex
);
542 void alternatives_enable_smp(void)
544 struct smp_alt_module
*mod
;
546 /* Why bother if there are no other CPUs? */
547 BUG_ON(num_possible_cpus() == 1);
549 mutex_lock(&text_mutex
);
551 if (uniproc_patched
) {
552 pr_info("switching to SMP code\n");
553 BUG_ON(num_online_cpus() != 1);
554 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
555 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
556 list_for_each_entry(mod
, &smp_alt_modules
, next
)
557 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
558 mod
->text
, mod
->text_end
);
559 uniproc_patched
= false;
561 mutex_unlock(&text_mutex
);
565 * Return 1 if the address range is reserved for SMP-alternatives.
566 * Must hold text_mutex.
568 int alternatives_text_reserved(void *start
, void *end
)
570 struct smp_alt_module
*mod
;
572 u8
*text_start
= start
;
575 lockdep_assert_held(&text_mutex
);
577 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
578 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
580 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
581 const u8
*ptr
= (const u8
*)poff
+ *poff
;
583 if (text_start
<= ptr
&& text_end
> ptr
)
590 #endif /* CONFIG_SMP */
592 #ifdef CONFIG_PARAVIRT
593 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
594 struct paravirt_patch_site
*end
)
596 struct paravirt_patch_site
*p
;
597 char insnbuf
[MAX_PATCH_LEN
];
599 for (p
= start
; p
< end
; p
++) {
602 BUG_ON(p
->len
> MAX_PATCH_LEN
);
603 /* prep the buffer with the original instructions */
604 memcpy(insnbuf
, p
->instr
, p
->len
);
605 used
= pv_ops
.init
.patch(p
->instrtype
, insnbuf
,
606 (unsigned long)p
->instr
, p
->len
);
608 BUG_ON(used
> p
->len
);
610 /* Pad the rest with nops */
611 add_nops(insnbuf
+ used
, p
->len
- used
);
612 text_poke_early(p
->instr
, insnbuf
, p
->len
);
615 extern struct paravirt_patch_site __start_parainstructions
[],
616 __stop_parainstructions
[];
617 #endif /* CONFIG_PARAVIRT */
620 * Self-test for the INT3 based CALL emulation code.
622 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
623 * properly and that there is a stack gap between the INT3 frame and the
624 * previous context. Without this gap doing a virtual PUSH on the interrupted
625 * stack would corrupt the INT3 IRET frame.
627 * See entry_{32,64}.S for more details.
629 static void __init
int3_magic(unsigned int *ptr
)
634 extern __initdata
unsigned long int3_selftest_ip
; /* defined in asm below */
637 int3_exception_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
639 struct die_args
*args
= data
;
640 struct pt_regs
*regs
= args
->regs
;
642 if (!regs
|| user_mode(regs
))
648 if (regs
->ip
- INT3_INSN_SIZE
!= int3_selftest_ip
)
651 int3_emulate_call(regs
, (unsigned long)&int3_magic
);
655 static void __init
int3_selftest(void)
657 static __initdata
struct notifier_block int3_exception_nb
= {
658 .notifier_call
= int3_exception_notify
,
659 .priority
= INT_MAX
-1, /* last */
661 unsigned int val
= 0;
663 BUG_ON(register_die_notifier(&int3_exception_nb
));
666 * Basically: int3_magic(&val); but really complicated :-)
668 * Stick the address of the INT3 instruction into int3_selftest_ip,
669 * then trigger the INT3, padded with NOPs to match a CALL instruction
672 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
673 ".pushsection .init.data,\"aw\"\n\t"
674 ".align " __ASM_SEL(4, 8) "\n\t"
675 ".type int3_selftest_ip, @object\n\t"
676 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
677 "int3_selftest_ip:\n\t"
678 __ASM_SEL(.long, .quad
) " 1b\n\t"
680 : : __ASM_SEL_RAW(a
, D
) (&val
) : "memory");
684 unregister_die_notifier(&int3_exception_nb
);
687 void __init
alternative_instructions(void)
692 * The patching is not fully atomic, so try to avoid local
693 * interruptions that might execute the to be patched code.
694 * Other CPUs are not running.
699 * Don't stop machine check exceptions while patching.
700 * MCEs only happen when something got corrupted and in this
701 * case we must do something about the corruption.
702 * Ignoring it is worse than a unlikely patching race.
703 * Also machine checks tend to be broadcast and if one CPU
704 * goes into machine check the others follow quickly, so we don't
705 * expect a machine check to cause undue problems during to code
709 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
712 /* Patch to UP if other cpus not imminent. */
713 if (!noreplace_smp
&& (num_present_cpus() == 1 || setup_max_cpus
<= 1)) {
714 uniproc_patched
= true;
715 alternatives_smp_module_add(NULL
, "core kernel",
716 __smp_locks
, __smp_locks_end
,
720 if (!uniproc_patched
|| num_possible_cpus() == 1) {
721 free_init_pages("SMP alternatives",
722 (unsigned long)__smp_locks
,
723 (unsigned long)__smp_locks_end
);
727 apply_paravirt(__parainstructions
, __parainstructions_end
);
730 alternatives_patched
= 1;
734 * text_poke_early - Update instructions on a live kernel at boot time
735 * @addr: address to modify
736 * @opcode: source of the copy
737 * @len: length to copy
739 * When you use this code to patch more than one byte of an instruction
740 * you need to make sure that other CPUs cannot execute this code in parallel.
741 * Also no thread must be currently preempted in the middle of these
742 * instructions. And on the local CPU you need to be protected again NMI or MCE
743 * handlers seeing an inconsistent instruction while you patch.
745 void __init_or_module
text_poke_early(void *addr
, const void *opcode
,
750 if (boot_cpu_has(X86_FEATURE_NX
) &&
751 is_module_text_address((unsigned long)addr
)) {
753 * Modules text is marked initially as non-executable, so the
754 * code cannot be running and speculative code-fetches are
755 * prevented. Just change the code.
757 memcpy(addr
, opcode
, len
);
759 local_irq_save(flags
);
760 memcpy(addr
, opcode
, len
);
761 local_irq_restore(flags
);
765 * Could also do a CLFLUSH here to speed up CPU recovery; but
766 * that causes hangs on some VIA CPUs.
771 __ro_after_init
struct mm_struct
*poking_mm
;
772 __ro_after_init
unsigned long poking_addr
;
774 static void *__text_poke(void *addr
, const void *opcode
, size_t len
)
776 bool cross_page_boundary
= offset_in_page(addr
) + len
> PAGE_SIZE
;
777 struct page
*pages
[2] = {NULL
};
778 temp_mm_state_t prev
;
785 * While boot memory allocator is running we cannot use struct pages as
786 * they are not yet initialized. There is no way to recover.
788 BUG_ON(!after_bootmem
);
790 if (!core_kernel_text((unsigned long)addr
)) {
791 pages
[0] = vmalloc_to_page(addr
);
792 if (cross_page_boundary
)
793 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
795 pages
[0] = virt_to_page(addr
);
796 WARN_ON(!PageReserved(pages
[0]));
797 if (cross_page_boundary
)
798 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
801 * If something went wrong, crash and burn since recovery paths are not
804 BUG_ON(!pages
[0] || (cross_page_boundary
&& !pages
[1]));
806 local_irq_save(flags
);
809 * Map the page without the global bit, as TLB flushing is done with
810 * flush_tlb_mm_range(), which is intended for non-global PTEs.
812 pgprot
= __pgprot(pgprot_val(PAGE_KERNEL
) & ~_PAGE_GLOBAL
);
815 * The lock is not really needed, but this allows to avoid open-coding.
817 ptep
= get_locked_pte(poking_mm
, poking_addr
, &ptl
);
820 * This must not fail; preallocated in poking_init().
824 pte
= mk_pte(pages
[0], pgprot
);
825 set_pte_at(poking_mm
, poking_addr
, ptep
, pte
);
827 if (cross_page_boundary
) {
828 pte
= mk_pte(pages
[1], pgprot
);
829 set_pte_at(poking_mm
, poking_addr
+ PAGE_SIZE
, ptep
+ 1, pte
);
833 * Loading the temporary mm behaves as a compiler barrier, which
834 * guarantees that the PTE will be set at the time memcpy() is done.
836 prev
= use_temporary_mm(poking_mm
);
838 kasan_disable_current();
839 memcpy((u8
*)poking_addr
+ offset_in_page(addr
), opcode
, len
);
840 kasan_enable_current();
843 * Ensure that the PTE is only cleared after the instructions of memcpy
844 * were issued by using a compiler barrier.
848 pte_clear(poking_mm
, poking_addr
, ptep
);
849 if (cross_page_boundary
)
850 pte_clear(poking_mm
, poking_addr
+ PAGE_SIZE
, ptep
+ 1);
853 * Loading the previous page-table hierarchy requires a serializing
854 * instruction that already allows the core to see the updated version.
855 * Xen-PV is assumed to serialize execution in a similar manner.
857 unuse_temporary_mm(prev
);
860 * Flushing the TLB might involve IPIs, which would require enabled
861 * IRQs, but not if the mm is not used, as it is in this point.
863 flush_tlb_mm_range(poking_mm
, poking_addr
, poking_addr
+
864 (cross_page_boundary
? 2 : 1) * PAGE_SIZE
,
868 * If the text does not match what we just wrote then something is
869 * fundamentally screwy; there's nothing we can really do about that.
871 BUG_ON(memcmp(addr
, opcode
, len
));
873 pte_unmap_unlock(ptep
, ptl
);
874 local_irq_restore(flags
);
879 * text_poke - Update instructions on a live kernel
880 * @addr: address to modify
881 * @opcode: source of the copy
882 * @len: length to copy
884 * Only atomic text poke/set should be allowed when not doing early patching.
885 * It means the size must be writable atomically and the address must be aligned
886 * in a way that permits an atomic write. It also makes sure we fit on a single
889 * Note that the caller must ensure that if the modified code is part of a
890 * module, the module would not be removed during poking. This can be achieved
891 * by registering a module notifier, and ordering module removal and patching
894 void *text_poke(void *addr
, const void *opcode
, size_t len
)
896 lockdep_assert_held(&text_mutex
);
898 return __text_poke(addr
, opcode
, len
);
902 * text_poke_kgdb - Update instructions on a live kernel by kgdb
903 * @addr: address to modify
904 * @opcode: source of the copy
905 * @len: length to copy
907 * Only atomic text poke/set should be allowed when not doing early patching.
908 * It means the size must be writable atomically and the address must be aligned
909 * in a way that permits an atomic write. It also makes sure we fit on a single
912 * Context: should only be used by kgdb, which ensures no other core is running,
913 * despite the fact it does not hold the text_mutex.
915 void *text_poke_kgdb(void *addr
, const void *opcode
, size_t len
)
917 return __text_poke(addr
, opcode
, len
);
920 static void do_sync_core(void *info
)
925 static struct bp_patching_desc
{
926 struct text_poke_loc
*vec
;
930 static int patch_cmp(const void *key
, const void *elt
)
932 struct text_poke_loc
*tp
= (struct text_poke_loc
*) elt
;
940 NOKPROBE_SYMBOL(patch_cmp
);
942 int poke_int3_handler(struct pt_regs
*regs
)
944 struct text_poke_loc
*tp
;
945 unsigned char int3
= 0xcc;
949 * Having observed our INT3 instruction, we now must observe
950 * bp_patching.nr_entries.
952 * nr_entries != 0 INT3
954 * write INT3 if (nr_entries)
956 * Idem for other elements in bp_patching.
960 if (likely(!bp_patching
.nr_entries
))
967 * Discount the sizeof(int3). See text_poke_bp_batch().
969 ip
= (void *) regs
->ip
- sizeof(int3
);
972 * Skip the binary search if there is a single member in the vector.
974 if (unlikely(bp_patching
.nr_entries
> 1)) {
975 tp
= bsearch(ip
, bp_patching
.vec
, bp_patching
.nr_entries
,
976 sizeof(struct text_poke_loc
),
981 tp
= bp_patching
.vec
;
986 /* set up the specified breakpoint detour */
987 regs
->ip
= (unsigned long) tp
->detour
;
991 NOKPROBE_SYMBOL(poke_int3_handler
);
994 * text_poke_bp_batch() -- update instructions on live kernel on SMP
995 * @tp: vector of instructions to patch
996 * @nr_entries: number of entries in the vector
998 * Modify multi-byte instruction by using int3 breakpoint on SMP.
999 * We completely avoid stop_machine() here, and achieve the
1000 * synchronization using int3 breakpoint.
1002 * The way it is done:
1003 * - For each entry in the vector:
1004 * - add a int3 trap to the address that will be patched
1006 * - For each entry in the vector:
1007 * - update all but the first byte of the patched range
1009 * - For each entry in the vector:
1010 * - replace the first byte (int3) by the first byte of
1014 void text_poke_bp_batch(struct text_poke_loc
*tp
, unsigned int nr_entries
)
1016 int patched_all_but_first
= 0;
1017 unsigned char int3
= 0xcc;
1020 lockdep_assert_held(&text_mutex
);
1022 bp_patching
.vec
= tp
;
1023 bp_patching
.nr_entries
= nr_entries
;
1026 * Corresponding read barrier in int3 notifier for making sure the
1027 * nr_entries and handler are correctly ordered wrt. patching.
1032 * First step: add a int3 trap to the address that will be patched.
1034 for (i
= 0; i
< nr_entries
; i
++)
1035 text_poke(tp
[i
].addr
, &int3
, sizeof(int3
));
1037 on_each_cpu(do_sync_core
, NULL
, 1);
1040 * Second step: update all but the first byte of the patched range.
1042 for (i
= 0; i
< nr_entries
; i
++) {
1043 if (tp
[i
].len
- sizeof(int3
) > 0) {
1044 text_poke((char *)tp
[i
].addr
+ sizeof(int3
),
1045 (const char *)tp
[i
].opcode
+ sizeof(int3
),
1046 tp
[i
].len
- sizeof(int3
));
1047 patched_all_but_first
++;
1051 if (patched_all_but_first
) {
1053 * According to Intel, this core syncing is very likely
1054 * not necessary and we'd be safe even without it. But
1055 * better safe than sorry (plus there's not only Intel).
1057 on_each_cpu(do_sync_core
, NULL
, 1);
1061 * Third step: replace the first byte (int3) by the first byte of
1064 for (i
= 0; i
< nr_entries
; i
++)
1065 text_poke(tp
[i
].addr
, tp
[i
].opcode
, sizeof(int3
));
1067 on_each_cpu(do_sync_core
, NULL
, 1);
1069 * sync_core() implies an smp_mb() and orders this store against
1070 * the writing of the new instruction.
1072 bp_patching
.vec
= NULL
;
1073 bp_patching
.nr_entries
= 0;
1077 * text_poke_bp() -- update instructions on live kernel on SMP
1078 * @addr: address to patch
1079 * @opcode: opcode of new instruction
1080 * @len: length to copy
1081 * @handler: address to jump to when the temporary breakpoint is hit
1083 * Update a single instruction with the vector in the stack, avoiding
1084 * dynamically allocated memory. This function should be used when it is
1085 * not possible to allocate memory.
1087 void text_poke_bp(void *addr
, const void *opcode
, size_t len
, void *handler
)
1089 struct text_poke_loc tp
= {
1095 if (len
> POKE_MAX_OPCODE_SIZE
) {
1096 WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE
);
1100 memcpy((void *)tp
.opcode
, opcode
, len
);
1102 text_poke_bp_batch(&tp
, 1);