1 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <asm/text-patching.h>
15 #include <asm/alternative.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 #include <asm/fixmap.h>
25 int __read_mostly alternatives_patched
;
27 EXPORT_SYMBOL_GPL(alternatives_patched
);
29 #define MAX_PATCH_LEN (255-1)
31 static int __initdata_or_module debug_alternative
;
33 static int __init
debug_alt(char *str
)
35 debug_alternative
= 1;
38 __setup("debug-alternative", debug_alt
);
40 static int noreplace_smp
;
42 static int __init
setup_noreplace_smp(char *str
)
47 __setup("noreplace-smp", setup_noreplace_smp
);
49 #ifdef CONFIG_PARAVIRT
50 static int __initdata_or_module noreplace_paravirt
= 0;
52 static int __init
setup_noreplace_paravirt(char *str
)
54 noreplace_paravirt
= 1;
57 __setup("noreplace-paravirt", setup_noreplace_paravirt
);
60 #define DPRINTK(fmt, args...) \
62 if (debug_alternative) \
63 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
66 #define DUMP_BYTES(buf, len, fmt, args...) \
68 if (unlikely(debug_alternative)) { \
74 printk(KERN_DEBUG fmt, ##args); \
75 for (j = 0; j < (len) - 1; j++) \
76 printk(KERN_CONT "%02hhx ", buf[j]); \
77 printk(KERN_CONT "%02hhx\n", buf[j]); \
82 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
83 * that correspond to that nop. Getting from one nop to the next, we
84 * add to the array the offset that is equal to the sum of all sizes of
85 * nops preceding the one we are after.
87 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
88 * nice symmetry of sizes of the previous nops.
90 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
91 static const unsigned char intelnops
[] =
103 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
109 intelnops
+ 1 + 2 + 3,
110 intelnops
+ 1 + 2 + 3 + 4,
111 intelnops
+ 1 + 2 + 3 + 4 + 5,
112 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
113 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
114 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
119 static const unsigned char k8nops
[] =
131 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
138 k8nops
+ 1 + 2 + 3 + 4,
139 k8nops
+ 1 + 2 + 3 + 4 + 5,
140 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
141 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
142 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
146 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
147 static const unsigned char k7nops
[] =
159 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
166 k7nops
+ 1 + 2 + 3 + 4,
167 k7nops
+ 1 + 2 + 3 + 4 + 5,
168 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
169 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
170 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
175 static const unsigned char p6nops
[] =
187 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
194 p6nops
+ 1 + 2 + 3 + 4,
195 p6nops
+ 1 + 2 + 3 + 4 + 5,
196 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
197 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
198 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
202 /* Initialize these to a safe default */
204 const unsigned char * const *ideal_nops
= p6_nops
;
206 const unsigned char * const *ideal_nops
= intel_nops
;
209 void __init
arch_init_ideal_nops(void)
211 switch (boot_cpu_data
.x86_vendor
) {
212 case X86_VENDOR_INTEL
:
214 * Due to a decoder implementation quirk, some
215 * specific Intel CPUs actually perform better with
216 * the "k8_nops" than with the SDM-recommended NOPs.
218 if (boot_cpu_data
.x86
== 6 &&
219 boot_cpu_data
.x86_model
>= 0x0f &&
220 boot_cpu_data
.x86_model
!= 0x1c &&
221 boot_cpu_data
.x86_model
!= 0x26 &&
222 boot_cpu_data
.x86_model
!= 0x27 &&
223 boot_cpu_data
.x86_model
< 0x30) {
224 ideal_nops
= k8_nops
;
225 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
226 ideal_nops
= p6_nops
;
229 ideal_nops
= k8_nops
;
231 ideal_nops
= intel_nops
;
237 if (boot_cpu_data
.x86
> 0xf) {
238 ideal_nops
= p6_nops
;
246 ideal_nops
= k8_nops
;
248 if (boot_cpu_has(X86_FEATURE_K8
))
249 ideal_nops
= k8_nops
;
250 else if (boot_cpu_has(X86_FEATURE_K7
))
251 ideal_nops
= k7_nops
;
253 ideal_nops
= intel_nops
;
258 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
259 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
262 unsigned int noplen
= len
;
263 if (noplen
> ASM_NOP_MAX
)
264 noplen
= ASM_NOP_MAX
;
265 memcpy(insns
, ideal_nops
[noplen
], noplen
);
271 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
272 extern s32 __smp_locks
[], __smp_locks_end
[];
273 void *text_poke_early(void *addr
, const void *opcode
, size_t len
);
276 * Are we looking at a near JMP with a 1 or 4-byte displacement.
278 static inline bool is_jmp(const u8 opcode
)
280 return opcode
== 0xeb || opcode
== 0xe9;
283 static void __init_or_module
284 recompute_jump(struct alt_instr
*a
, u8
*orig_insn
, u8
*repl_insn
, u8
*insnbuf
)
286 u8
*next_rip
, *tgt_rip
;
290 if (a
->replacementlen
!= 5)
293 o_dspl
= *(s32
*)(insnbuf
+ 1);
295 /* next_rip of the replacement JMP */
296 next_rip
= repl_insn
+ a
->replacementlen
;
297 /* target rip of the replacement JMP */
298 tgt_rip
= next_rip
+ o_dspl
;
299 n_dspl
= tgt_rip
- orig_insn
;
301 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip
, n_dspl
);
303 if (tgt_rip
- orig_insn
>= 0) {
304 if (n_dspl
- 2 <= 127)
308 /* negative offset */
310 if (((n_dspl
- 2) & 0xff) == (n_dspl
- 2))
320 insnbuf
[1] = (s8
)n_dspl
;
321 add_nops(insnbuf
+ 2, 3);
330 *(s32
*)&insnbuf
[1] = n_dspl
;
336 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
337 n_dspl
, (unsigned long)orig_insn
+ n_dspl
+ repl_len
);
341 * "noinline" to cause control flow change and thus invalidate I$ and
342 * cause refetch after modification.
344 static void __init_or_module noinline
optimize_nops(struct alt_instr
*a
, u8
*instr
)
349 for (i
= 0; i
< a
->padlen
; i
++) {
350 if (instr
[i
] != 0x90)
354 local_irq_save(flags
);
355 add_nops(instr
+ (a
->instrlen
- a
->padlen
), a
->padlen
);
356 local_irq_restore(flags
);
358 DUMP_BYTES(instr
, a
->instrlen
, "%p: [%d:%d) optimized NOPs: ",
359 instr
, a
->instrlen
- a
->padlen
, a
->padlen
);
363 * Replace instructions with better alternatives for this CPU type. This runs
364 * before SMP is initialized to avoid SMP problems with self modifying code.
365 * This implies that asymmetric systems where APs have less capabilities than
366 * the boot processor are not handled. Tough. Make sure you disable such
369 * Marked "noinline" to cause control flow change and thus insn cache
370 * to refetch changed I$ lines.
372 void __init_or_module noinline
apply_alternatives(struct alt_instr
*start
,
373 struct alt_instr
*end
)
376 u8
*instr
, *replacement
;
377 u8 insnbuf
[MAX_PATCH_LEN
];
379 DPRINTK("alt table %p -> %p", start
, end
);
381 * The scan order should be from start to end. A later scanned
382 * alternative code can overwrite previously scanned alternative code.
383 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
386 * So be careful if you want to change the scan order to any other
389 for (a
= start
; a
< end
; a
++) {
392 instr
= (u8
*)&a
->instr_offset
+ a
->instr_offset
;
393 replacement
= (u8
*)&a
->repl_offset
+ a
->repl_offset
;
394 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
395 BUG_ON(a
->cpuid
>= (NCAPINTS
+ NBUGINTS
) * 32);
396 if (!boot_cpu_has(a
->cpuid
)) {
398 optimize_nops(a
, instr
);
403 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
407 replacement
, a
->replacementlen
, a
->padlen
);
409 DUMP_BYTES(instr
, a
->instrlen
, "%p: old_insn: ", instr
);
410 DUMP_BYTES(replacement
, a
->replacementlen
, "%p: rpl_insn: ", replacement
);
412 memcpy(insnbuf
, replacement
, a
->replacementlen
);
413 insnbuf_sz
= a
->replacementlen
;
416 * 0xe8 is a relative jump; fix the offset.
418 * Instruction length is checked before the opcode to avoid
419 * accessing uninitialized bytes for zero-length replacements.
421 if (a
->replacementlen
== 5 && *insnbuf
== 0xe8) {
422 *(s32
*)(insnbuf
+ 1) += replacement
- instr
;
423 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
424 *(s32
*)(insnbuf
+ 1),
425 (unsigned long)instr
+ *(s32
*)(insnbuf
+ 1) + 5);
428 if (a
->replacementlen
&& is_jmp(replacement
[0]))
429 recompute_jump(a
, instr
, replacement
, insnbuf
);
431 if (a
->instrlen
> a
->replacementlen
) {
432 add_nops(insnbuf
+ a
->replacementlen
,
433 a
->instrlen
- a
->replacementlen
);
434 insnbuf_sz
+= a
->instrlen
- a
->replacementlen
;
436 DUMP_BYTES(insnbuf
, insnbuf_sz
, "%p: final_insn: ", instr
);
438 text_poke_early(instr
, insnbuf
, insnbuf_sz
);
443 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
444 u8
*text
, u8
*text_end
)
448 mutex_lock(&text_mutex
);
449 for (poff
= start
; poff
< end
; poff
++) {
450 u8
*ptr
= (u8
*)poff
+ *poff
;
452 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
454 /* turn DS segment override prefix into lock prefix */
456 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
458 mutex_unlock(&text_mutex
);
461 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
462 u8
*text
, u8
*text_end
)
466 mutex_lock(&text_mutex
);
467 for (poff
= start
; poff
< end
; poff
++) {
468 u8
*ptr
= (u8
*)poff
+ *poff
;
470 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
472 /* turn lock prefix into DS segment override prefix */
474 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
476 mutex_unlock(&text_mutex
);
479 struct smp_alt_module
{
480 /* what is this ??? */
484 /* ptrs to lock prefixes */
486 const s32
*locks_end
;
488 /* .text segment, needed to avoid patching init code ;) */
492 struct list_head next
;
494 static LIST_HEAD(smp_alt_modules
);
495 static DEFINE_MUTEX(smp_alt
);
496 static bool uniproc_patched
= false; /* protected by smp_alt */
498 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
500 void *locks
, void *locks_end
,
501 void *text
, void *text_end
)
503 struct smp_alt_module
*smp
;
505 mutex_lock(&smp_alt
);
506 if (!uniproc_patched
)
509 if (num_possible_cpus() == 1)
510 /* Don't bother remembering, we'll never have to undo it. */
513 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
515 /* we'll run the (safe but slow) SMP code then ... */
521 smp
->locks_end
= locks_end
;
523 smp
->text_end
= text_end
;
524 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
525 smp
->locks
, smp
->locks_end
,
526 smp
->text
, smp
->text_end
, smp
->name
);
528 list_add_tail(&smp
->next
, &smp_alt_modules
);
530 alternatives_smp_unlock(locks
, locks_end
, text
, text_end
);
532 mutex_unlock(&smp_alt
);
535 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
537 struct smp_alt_module
*item
;
539 mutex_lock(&smp_alt
);
540 list_for_each_entry(item
, &smp_alt_modules
, next
) {
541 if (mod
!= item
->mod
)
543 list_del(&item
->next
);
547 mutex_unlock(&smp_alt
);
550 void alternatives_enable_smp(void)
552 struct smp_alt_module
*mod
;
554 /* Why bother if there are no other CPUs? */
555 BUG_ON(num_possible_cpus() == 1);
557 mutex_lock(&smp_alt
);
559 if (uniproc_patched
) {
560 pr_info("switching to SMP code\n");
561 BUG_ON(num_online_cpus() != 1);
562 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
563 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
564 list_for_each_entry(mod
, &smp_alt_modules
, next
)
565 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
566 mod
->text
, mod
->text_end
);
567 uniproc_patched
= false;
569 mutex_unlock(&smp_alt
);
572 /* Return 1 if the address range is reserved for smp-alternatives */
573 int alternatives_text_reserved(void *start
, void *end
)
575 struct smp_alt_module
*mod
;
577 u8
*text_start
= start
;
580 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
581 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
583 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
584 const u8
*ptr
= (const u8
*)poff
+ *poff
;
586 if (text_start
<= ptr
&& text_end
> ptr
)
593 #endif /* CONFIG_SMP */
595 #ifdef CONFIG_PARAVIRT
596 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
597 struct paravirt_patch_site
*end
)
599 struct paravirt_patch_site
*p
;
600 char insnbuf
[MAX_PATCH_LEN
];
602 if (noreplace_paravirt
)
605 for (p
= start
; p
< end
; p
++) {
608 BUG_ON(p
->len
> MAX_PATCH_LEN
);
609 /* prep the buffer with the original instructions */
610 memcpy(insnbuf
, p
->instr
, p
->len
);
611 used
= pv_init_ops
.patch(p
->instrtype
, p
->clobbers
, insnbuf
,
612 (unsigned long)p
->instr
, p
->len
);
614 BUG_ON(used
> p
->len
);
616 /* Pad the rest with nops */
617 add_nops(insnbuf
+ used
, p
->len
- used
);
618 text_poke_early(p
->instr
, insnbuf
, p
->len
);
621 extern struct paravirt_patch_site __start_parainstructions
[],
622 __stop_parainstructions
[];
623 #endif /* CONFIG_PARAVIRT */
625 void __init
alternative_instructions(void)
627 /* The patching is not fully atomic, so try to avoid local interruptions
628 that might execute the to be patched code.
629 Other CPUs are not running. */
633 * Don't stop machine check exceptions while patching.
634 * MCEs only happen when something got corrupted and in this
635 * case we must do something about the corruption.
636 * Ignoring it is worse than a unlikely patching race.
637 * Also machine checks tend to be broadcast and if one CPU
638 * goes into machine check the others follow quickly, so we don't
639 * expect a machine check to cause undue problems during to code
643 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
646 /* Patch to UP if other cpus not imminent. */
647 if (!noreplace_smp
&& (num_present_cpus() == 1 || setup_max_cpus
<= 1)) {
648 uniproc_patched
= true;
649 alternatives_smp_module_add(NULL
, "core kernel",
650 __smp_locks
, __smp_locks_end
,
654 if (!uniproc_patched
|| num_possible_cpus() == 1)
655 free_init_pages("SMP alternatives",
656 (unsigned long)__smp_locks
,
657 (unsigned long)__smp_locks_end
);
660 apply_paravirt(__parainstructions
, __parainstructions_end
);
663 alternatives_patched
= 1;
667 * text_poke_early - Update instructions on a live kernel at boot time
668 * @addr: address to modify
669 * @opcode: source of the copy
670 * @len: length to copy
672 * When you use this code to patch more than one byte of an instruction
673 * you need to make sure that other CPUs cannot execute this code in parallel.
674 * Also no thread must be currently preempted in the middle of these
675 * instructions. And on the local CPU you need to be protected again NMI or MCE
676 * handlers seeing an inconsistent instruction while you patch.
678 void *__init_or_module
text_poke_early(void *addr
, const void *opcode
,
682 local_irq_save(flags
);
683 memcpy(addr
, opcode
, len
);
684 local_irq_restore(flags
);
685 /* Could also do a CLFLUSH here to speed up CPU recovery; but
686 that causes hangs on some VIA CPUs. */
691 * text_poke - Update instructions on a live kernel
692 * @addr: address to modify
693 * @opcode: source of the copy
694 * @len: length to copy
696 * Only atomic text poke/set should be allowed when not doing early patching.
697 * It means the size must be writable atomically and the address must be aligned
698 * in a way that permits an atomic write. It also makes sure we fit on a single
701 * Note: Must be called under text_mutex.
703 void *text_poke(void *addr
, const void *opcode
, size_t len
)
707 struct page
*pages
[2];
710 if (!core_kernel_text((unsigned long)addr
)) {
711 pages
[0] = vmalloc_to_page(addr
);
712 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
714 pages
[0] = virt_to_page(addr
);
715 WARN_ON(!PageReserved(pages
[0]));
716 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
719 local_irq_save(flags
);
720 set_fixmap(FIX_TEXT_POKE0
, page_to_phys(pages
[0]));
722 set_fixmap(FIX_TEXT_POKE1
, page_to_phys(pages
[1]));
723 vaddr
= (char *)fix_to_virt(FIX_TEXT_POKE0
);
724 memcpy(&vaddr
[(unsigned long)addr
& ~PAGE_MASK
], opcode
, len
);
725 clear_fixmap(FIX_TEXT_POKE0
);
727 clear_fixmap(FIX_TEXT_POKE1
);
730 /* Could also do a CLFLUSH here to speed up CPU recovery; but
731 that causes hangs on some VIA CPUs. */
732 for (i
= 0; i
< len
; i
++)
733 BUG_ON(((char *)addr
)[i
] != ((char *)opcode
)[i
]);
734 local_irq_restore(flags
);
738 static void do_sync_core(void *info
)
743 static bool bp_patching_in_progress
;
744 static void *bp_int3_handler
, *bp_int3_addr
;
746 int poke_int3_handler(struct pt_regs
*regs
)
748 /* bp_patching_in_progress */
751 if (likely(!bp_patching_in_progress
))
754 if (user_mode(regs
) || regs
->ip
!= (unsigned long)bp_int3_addr
)
757 /* set up the specified breakpoint handler */
758 regs
->ip
= (unsigned long) bp_int3_handler
;
765 * text_poke_bp() -- update instructions on live kernel on SMP
766 * @addr: address to patch
767 * @opcode: opcode of new instruction
768 * @len: length to copy
769 * @handler: address to jump to when the temporary breakpoint is hit
771 * Modify multi-byte instruction by using int3 breakpoint on SMP.
772 * We completely avoid stop_machine() here, and achieve the
773 * synchronization using int3 breakpoint.
775 * The way it is done:
776 * - add a int3 trap to the address that will be patched
778 * - update all but the first byte of the patched range
780 * - replace the first byte (int3) by the first byte of
784 * Note: must be called under text_mutex.
786 void *text_poke_bp(void *addr
, const void *opcode
, size_t len
, void *handler
)
788 unsigned char int3
= 0xcc;
790 bp_int3_handler
= handler
;
791 bp_int3_addr
= (u8
*)addr
+ sizeof(int3
);
792 bp_patching_in_progress
= true;
794 * Corresponding read barrier in int3 notifier for
795 * making sure the in_progress flags is correctly ordered wrt.
800 text_poke(addr
, &int3
, sizeof(int3
));
802 on_each_cpu(do_sync_core
, NULL
, 1);
804 if (len
- sizeof(int3
) > 0) {
805 /* patch all but the first byte */
806 text_poke((char *)addr
+ sizeof(int3
),
807 (const char *) opcode
+ sizeof(int3
),
810 * According to Intel, this core syncing is very likely
811 * not necessary and we'd be safe even without it. But
812 * better safe than sorry (plus there's not only Intel).
814 on_each_cpu(do_sync_core
, NULL
, 1);
817 /* patch the first byte */
818 text_poke(addr
, opcode
, sizeof(int3
));
820 on_each_cpu(do_sync_core
, NULL
, 1);
822 bp_patching_in_progress
= false;