1 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <asm/text-patching.h>
15 #include <asm/alternative.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 #include <asm/fixmap.h>
25 int __read_mostly alternatives_patched
;
27 EXPORT_SYMBOL_GPL(alternatives_patched
);
29 #define MAX_PATCH_LEN (255-1)
31 static int __initdata_or_module debug_alternative
;
33 static int __init
debug_alt(char *str
)
35 debug_alternative
= 1;
38 __setup("debug-alternative", debug_alt
);
40 static int noreplace_smp
;
42 static int __init
setup_noreplace_smp(char *str
)
47 __setup("noreplace-smp", setup_noreplace_smp
);
49 #ifdef CONFIG_PARAVIRT
50 static int __initdata_or_module noreplace_paravirt
= 0;
52 static int __init
setup_noreplace_paravirt(char *str
)
54 noreplace_paravirt
= 1;
57 __setup("noreplace-paravirt", setup_noreplace_paravirt
);
60 #define DPRINTK(fmt, args...) \
62 if (debug_alternative) \
63 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
66 #define DUMP_BYTES(buf, len, fmt, args...) \
68 if (unlikely(debug_alternative)) { \
74 printk(KERN_DEBUG fmt, ##args); \
75 for (j = 0; j < (len) - 1; j++) \
76 printk(KERN_CONT "%02hhx ", buf[j]); \
77 printk(KERN_CONT "%02hhx\n", buf[j]); \
82 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
83 * that correspond to that nop. Getting from one nop to the next, we
84 * add to the array the offset that is equal to the sum of all sizes of
85 * nops preceding the one we are after.
87 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
88 * nice symmetry of sizes of the previous nops.
90 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
91 static const unsigned char intelnops
[] =
103 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
109 intelnops
+ 1 + 2 + 3,
110 intelnops
+ 1 + 2 + 3 + 4,
111 intelnops
+ 1 + 2 + 3 + 4 + 5,
112 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
113 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
114 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
119 static const unsigned char k8nops
[] =
131 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
138 k8nops
+ 1 + 2 + 3 + 4,
139 k8nops
+ 1 + 2 + 3 + 4 + 5,
140 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
141 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
142 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
146 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
147 static const unsigned char k7nops
[] =
159 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
166 k7nops
+ 1 + 2 + 3 + 4,
167 k7nops
+ 1 + 2 + 3 + 4 + 5,
168 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
169 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
170 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
175 static const unsigned char p6nops
[] =
187 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
194 p6nops
+ 1 + 2 + 3 + 4,
195 p6nops
+ 1 + 2 + 3 + 4 + 5,
196 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
197 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
198 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
202 /* Initialize these to a safe default */
204 const unsigned char * const *ideal_nops
= p6_nops
;
206 const unsigned char * const *ideal_nops
= intel_nops
;
209 void __init
arch_init_ideal_nops(void)
211 switch (boot_cpu_data
.x86_vendor
) {
212 case X86_VENDOR_INTEL
:
214 * Due to a decoder implementation quirk, some
215 * specific Intel CPUs actually perform better with
216 * the "k8_nops" than with the SDM-recommended NOPs.
218 if (boot_cpu_data
.x86
== 6 &&
219 boot_cpu_data
.x86_model
>= 0x0f &&
220 boot_cpu_data
.x86_model
!= 0x1c &&
221 boot_cpu_data
.x86_model
!= 0x26 &&
222 boot_cpu_data
.x86_model
!= 0x27 &&
223 boot_cpu_data
.x86_model
< 0x30) {
224 ideal_nops
= k8_nops
;
225 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
226 ideal_nops
= p6_nops
;
229 ideal_nops
= k8_nops
;
231 ideal_nops
= intel_nops
;
237 if (boot_cpu_data
.x86
> 0xf) {
238 ideal_nops
= p6_nops
;
246 ideal_nops
= k8_nops
;
248 if (boot_cpu_has(X86_FEATURE_K8
))
249 ideal_nops
= k8_nops
;
250 else if (boot_cpu_has(X86_FEATURE_K7
))
251 ideal_nops
= k7_nops
;
253 ideal_nops
= intel_nops
;
258 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
259 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
262 unsigned int noplen
= len
;
263 if (noplen
> ASM_NOP_MAX
)
264 noplen
= ASM_NOP_MAX
;
265 memcpy(insns
, ideal_nops
[noplen
], noplen
);
271 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
272 extern s32 __smp_locks
[], __smp_locks_end
[];
273 void *text_poke_early(void *addr
, const void *opcode
, size_t len
);
276 * Are we looking at a near JMP with a 1 or 4-byte displacement.
278 static inline bool is_jmp(const u8 opcode
)
280 return opcode
== 0xeb || opcode
== 0xe9;
283 static void __init_or_module
284 recompute_jump(struct alt_instr
*a
, u8
*orig_insn
, u8
*repl_insn
, u8
*insnbuf
)
286 u8
*next_rip
, *tgt_rip
;
290 if (a
->replacementlen
!= 5)
293 o_dspl
= *(s32
*)(insnbuf
+ 1);
295 /* next_rip of the replacement JMP */
296 next_rip
= repl_insn
+ a
->replacementlen
;
297 /* target rip of the replacement JMP */
298 tgt_rip
= next_rip
+ o_dspl
;
299 n_dspl
= tgt_rip
- orig_insn
;
301 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip
, n_dspl
);
303 if (tgt_rip
- orig_insn
>= 0) {
304 if (n_dspl
- 2 <= 127)
308 /* negative offset */
310 if (((n_dspl
- 2) & 0xff) == (n_dspl
- 2))
320 insnbuf
[1] = (s8
)n_dspl
;
321 add_nops(insnbuf
+ 2, 3);
330 *(s32
*)&insnbuf
[1] = n_dspl
;
336 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
337 n_dspl
, (unsigned long)orig_insn
+ n_dspl
+ repl_len
);
341 * "noinline" to cause control flow change and thus invalidate I$ and
342 * cause refetch after modification.
344 static void __init_or_module noinline
optimize_nops(struct alt_instr
*a
, u8
*instr
)
348 if (instr
[0] != 0x90)
351 local_irq_save(flags
);
352 add_nops(instr
+ (a
->instrlen
- a
->padlen
), a
->padlen
);
353 local_irq_restore(flags
);
355 DUMP_BYTES(instr
, a
->instrlen
, "%p: [%d:%d) optimized NOPs: ",
356 instr
, a
->instrlen
- a
->padlen
, a
->padlen
);
360 * Replace instructions with better alternatives for this CPU type. This runs
361 * before SMP is initialized to avoid SMP problems with self modifying code.
362 * This implies that asymmetric systems where APs have less capabilities than
363 * the boot processor are not handled. Tough. Make sure you disable such
366 * Marked "noinline" to cause control flow change and thus insn cache
367 * to refetch changed I$ lines.
369 void __init_or_module noinline
apply_alternatives(struct alt_instr
*start
,
370 struct alt_instr
*end
)
373 u8
*instr
, *replacement
;
374 u8 insnbuf
[MAX_PATCH_LEN
];
376 DPRINTK("alt table %p -> %p", start
, end
);
378 * The scan order should be from start to end. A later scanned
379 * alternative code can overwrite previously scanned alternative code.
380 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
383 * So be careful if you want to change the scan order to any other
386 for (a
= start
; a
< end
; a
++) {
389 instr
= (u8
*)&a
->instr_offset
+ a
->instr_offset
;
390 replacement
= (u8
*)&a
->repl_offset
+ a
->repl_offset
;
391 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
392 BUG_ON(a
->cpuid
>= (NCAPINTS
+ NBUGINTS
) * 32);
393 if (!boot_cpu_has(a
->cpuid
)) {
395 optimize_nops(a
, instr
);
400 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
404 replacement
, a
->replacementlen
, a
->padlen
);
406 DUMP_BYTES(instr
, a
->instrlen
, "%p: old_insn: ", instr
);
407 DUMP_BYTES(replacement
, a
->replacementlen
, "%p: rpl_insn: ", replacement
);
409 memcpy(insnbuf
, replacement
, a
->replacementlen
);
410 insnbuf_sz
= a
->replacementlen
;
413 * 0xe8 is a relative jump; fix the offset.
415 * Instruction length is checked before the opcode to avoid
416 * accessing uninitialized bytes for zero-length replacements.
418 if (a
->replacementlen
== 5 && *insnbuf
== 0xe8) {
419 *(s32
*)(insnbuf
+ 1) += replacement
- instr
;
420 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
421 *(s32
*)(insnbuf
+ 1),
422 (unsigned long)instr
+ *(s32
*)(insnbuf
+ 1) + 5);
425 if (a
->replacementlen
&& is_jmp(replacement
[0]))
426 recompute_jump(a
, instr
, replacement
, insnbuf
);
428 if (a
->instrlen
> a
->replacementlen
) {
429 add_nops(insnbuf
+ a
->replacementlen
,
430 a
->instrlen
- a
->replacementlen
);
431 insnbuf_sz
+= a
->instrlen
- a
->replacementlen
;
433 DUMP_BYTES(insnbuf
, insnbuf_sz
, "%p: final_insn: ", instr
);
435 text_poke_early(instr
, insnbuf
, insnbuf_sz
);
440 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
441 u8
*text
, u8
*text_end
)
445 mutex_lock(&text_mutex
);
446 for (poff
= start
; poff
< end
; poff
++) {
447 u8
*ptr
= (u8
*)poff
+ *poff
;
449 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
451 /* turn DS segment override prefix into lock prefix */
453 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
455 mutex_unlock(&text_mutex
);
458 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
459 u8
*text
, u8
*text_end
)
463 mutex_lock(&text_mutex
);
464 for (poff
= start
; poff
< end
; poff
++) {
465 u8
*ptr
= (u8
*)poff
+ *poff
;
467 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
469 /* turn lock prefix into DS segment override prefix */
471 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
473 mutex_unlock(&text_mutex
);
476 struct smp_alt_module
{
477 /* what is this ??? */
481 /* ptrs to lock prefixes */
483 const s32
*locks_end
;
485 /* .text segment, needed to avoid patching init code ;) */
489 struct list_head next
;
491 static LIST_HEAD(smp_alt_modules
);
492 static DEFINE_MUTEX(smp_alt
);
493 static bool uniproc_patched
= false; /* protected by smp_alt */
495 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
497 void *locks
, void *locks_end
,
498 void *text
, void *text_end
)
500 struct smp_alt_module
*smp
;
502 mutex_lock(&smp_alt
);
503 if (!uniproc_patched
)
506 if (num_possible_cpus() == 1)
507 /* Don't bother remembering, we'll never have to undo it. */
510 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
512 /* we'll run the (safe but slow) SMP code then ... */
518 smp
->locks_end
= locks_end
;
520 smp
->text_end
= text_end
;
521 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
522 smp
->locks
, smp
->locks_end
,
523 smp
->text
, smp
->text_end
, smp
->name
);
525 list_add_tail(&smp
->next
, &smp_alt_modules
);
527 alternatives_smp_unlock(locks
, locks_end
, text
, text_end
);
529 mutex_unlock(&smp_alt
);
532 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
534 struct smp_alt_module
*item
;
536 mutex_lock(&smp_alt
);
537 list_for_each_entry(item
, &smp_alt_modules
, next
) {
538 if (mod
!= item
->mod
)
540 list_del(&item
->next
);
544 mutex_unlock(&smp_alt
);
547 void alternatives_enable_smp(void)
549 struct smp_alt_module
*mod
;
551 /* Why bother if there are no other CPUs? */
552 BUG_ON(num_possible_cpus() == 1);
554 mutex_lock(&smp_alt
);
556 if (uniproc_patched
) {
557 pr_info("switching to SMP code\n");
558 BUG_ON(num_online_cpus() != 1);
559 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
560 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
561 list_for_each_entry(mod
, &smp_alt_modules
, next
)
562 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
563 mod
->text
, mod
->text_end
);
564 uniproc_patched
= false;
566 mutex_unlock(&smp_alt
);
569 /* Return 1 if the address range is reserved for smp-alternatives */
570 int alternatives_text_reserved(void *start
, void *end
)
572 struct smp_alt_module
*mod
;
574 u8
*text_start
= start
;
577 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
578 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
580 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
581 const u8
*ptr
= (const u8
*)poff
+ *poff
;
583 if (text_start
<= ptr
&& text_end
> ptr
)
590 #endif /* CONFIG_SMP */
592 #ifdef CONFIG_PARAVIRT
593 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
594 struct paravirt_patch_site
*end
)
596 struct paravirt_patch_site
*p
;
597 char insnbuf
[MAX_PATCH_LEN
];
599 if (noreplace_paravirt
)
602 for (p
= start
; p
< end
; p
++) {
605 BUG_ON(p
->len
> MAX_PATCH_LEN
);
606 /* prep the buffer with the original instructions */
607 memcpy(insnbuf
, p
->instr
, p
->len
);
608 used
= pv_init_ops
.patch(p
->instrtype
, p
->clobbers
, insnbuf
,
609 (unsigned long)p
->instr
, p
->len
);
611 BUG_ON(used
> p
->len
);
613 /* Pad the rest with nops */
614 add_nops(insnbuf
+ used
, p
->len
- used
);
615 text_poke_early(p
->instr
, insnbuf
, p
->len
);
618 extern struct paravirt_patch_site __start_parainstructions
[],
619 __stop_parainstructions
[];
620 #endif /* CONFIG_PARAVIRT */
622 void __init
alternative_instructions(void)
624 /* The patching is not fully atomic, so try to avoid local interruptions
625 that might execute the to be patched code.
626 Other CPUs are not running. */
630 * Don't stop machine check exceptions while patching.
631 * MCEs only happen when something got corrupted and in this
632 * case we must do something about the corruption.
633 * Ignoring it is worse than a unlikely patching race.
634 * Also machine checks tend to be broadcast and if one CPU
635 * goes into machine check the others follow quickly, so we don't
636 * expect a machine check to cause undue problems during to code
640 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
643 /* Patch to UP if other cpus not imminent. */
644 if (!noreplace_smp
&& (num_present_cpus() == 1 || setup_max_cpus
<= 1)) {
645 uniproc_patched
= true;
646 alternatives_smp_module_add(NULL
, "core kernel",
647 __smp_locks
, __smp_locks_end
,
651 if (!uniproc_patched
|| num_possible_cpus() == 1)
652 free_init_pages("SMP alternatives",
653 (unsigned long)__smp_locks
,
654 (unsigned long)__smp_locks_end
);
657 apply_paravirt(__parainstructions
, __parainstructions_end
);
660 alternatives_patched
= 1;
664 * text_poke_early - Update instructions on a live kernel at boot time
665 * @addr: address to modify
666 * @opcode: source of the copy
667 * @len: length to copy
669 * When you use this code to patch more than one byte of an instruction
670 * you need to make sure that other CPUs cannot execute this code in parallel.
671 * Also no thread must be currently preempted in the middle of these
672 * instructions. And on the local CPU you need to be protected again NMI or MCE
673 * handlers seeing an inconsistent instruction while you patch.
675 void *__init_or_module
text_poke_early(void *addr
, const void *opcode
,
679 local_irq_save(flags
);
680 memcpy(addr
, opcode
, len
);
681 local_irq_restore(flags
);
682 /* Could also do a CLFLUSH here to speed up CPU recovery; but
683 that causes hangs on some VIA CPUs. */
688 * text_poke - Update instructions on a live kernel
689 * @addr: address to modify
690 * @opcode: source of the copy
691 * @len: length to copy
693 * Only atomic text poke/set should be allowed when not doing early patching.
694 * It means the size must be writable atomically and the address must be aligned
695 * in a way that permits an atomic write. It also makes sure we fit on a single
698 * Note: Must be called under text_mutex.
700 void *text_poke(void *addr
, const void *opcode
, size_t len
)
704 struct page
*pages
[2];
707 if (!core_kernel_text((unsigned long)addr
)) {
708 pages
[0] = vmalloc_to_page(addr
);
709 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
711 pages
[0] = virt_to_page(addr
);
712 WARN_ON(!PageReserved(pages
[0]));
713 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
716 local_irq_save(flags
);
717 set_fixmap(FIX_TEXT_POKE0
, page_to_phys(pages
[0]));
719 set_fixmap(FIX_TEXT_POKE1
, page_to_phys(pages
[1]));
720 vaddr
= (char *)fix_to_virt(FIX_TEXT_POKE0
);
721 memcpy(&vaddr
[(unsigned long)addr
& ~PAGE_MASK
], opcode
, len
);
722 clear_fixmap(FIX_TEXT_POKE0
);
724 clear_fixmap(FIX_TEXT_POKE1
);
727 /* Could also do a CLFLUSH here to speed up CPU recovery; but
728 that causes hangs on some VIA CPUs. */
729 for (i
= 0; i
< len
; i
++)
730 BUG_ON(((char *)addr
)[i
] != ((char *)opcode
)[i
]);
731 local_irq_restore(flags
);
735 static void do_sync_core(void *info
)
740 static bool bp_patching_in_progress
;
741 static void *bp_int3_handler
, *bp_int3_addr
;
743 int poke_int3_handler(struct pt_regs
*regs
)
745 /* bp_patching_in_progress */
748 if (likely(!bp_patching_in_progress
))
751 if (user_mode(regs
) || regs
->ip
!= (unsigned long)bp_int3_addr
)
754 /* set up the specified breakpoint handler */
755 regs
->ip
= (unsigned long) bp_int3_handler
;
762 * text_poke_bp() -- update instructions on live kernel on SMP
763 * @addr: address to patch
764 * @opcode: opcode of new instruction
765 * @len: length to copy
766 * @handler: address to jump to when the temporary breakpoint is hit
768 * Modify multi-byte instruction by using int3 breakpoint on SMP.
769 * We completely avoid stop_machine() here, and achieve the
770 * synchronization using int3 breakpoint.
772 * The way it is done:
773 * - add a int3 trap to the address that will be patched
775 * - update all but the first byte of the patched range
777 * - replace the first byte (int3) by the first byte of
781 * Note: must be called under text_mutex.
783 void *text_poke_bp(void *addr
, const void *opcode
, size_t len
, void *handler
)
785 unsigned char int3
= 0xcc;
787 bp_int3_handler
= handler
;
788 bp_int3_addr
= (u8
*)addr
+ sizeof(int3
);
789 bp_patching_in_progress
= true;
791 * Corresponding read barrier in int3 notifier for
792 * making sure the in_progress flags is correctly ordered wrt.
797 text_poke(addr
, &int3
, sizeof(int3
));
799 on_each_cpu(do_sync_core
, NULL
, 1);
801 if (len
- sizeof(int3
) > 0) {
802 /* patch all but the first byte */
803 text_poke((char *)addr
+ sizeof(int3
),
804 (const char *) opcode
+ sizeof(int3
),
807 * According to Intel, this core syncing is very likely
808 * not necessary and we'd be safe even without it. But
809 * better safe than sorry (plus there's not only Intel).
811 on_each_cpu(do_sync_core
, NULL
, 1);
814 /* patch the first byte */
815 text_poke(addr
, opcode
, sizeof(int3
));
817 on_each_cpu(do_sync_core
, NULL
, 1);
819 bp_patching_in_progress
= false;