1 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <asm/alternative.h>
15 #include <asm/sections.h>
16 #include <asm/pgtable.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
22 #include <asm/fixmap.h>
24 #define MAX_PATCH_LEN (255-1)
26 static int __initdata_or_module debug_alternative
;
28 static int __init
debug_alt(char *str
)
30 debug_alternative
= 1;
33 __setup("debug-alternative", debug_alt
);
35 static int noreplace_smp
;
37 static int __init
setup_noreplace_smp(char *str
)
42 __setup("noreplace-smp", setup_noreplace_smp
);
44 #ifdef CONFIG_PARAVIRT
45 static int __initdata_or_module noreplace_paravirt
= 0;
47 static int __init
setup_noreplace_paravirt(char *str
)
49 noreplace_paravirt
= 1;
52 __setup("noreplace-paravirt", setup_noreplace_paravirt
);
55 #define DPRINTK(fmt, args...) \
57 if (debug_alternative) \
58 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
61 #define DUMP_BYTES(buf, len, fmt, args...) \
63 if (unlikely(debug_alternative)) { \
69 printk(KERN_DEBUG fmt, ##args); \
70 for (j = 0; j < (len) - 1; j++) \
71 printk(KERN_CONT "%02hhx ", buf[j]); \
72 printk(KERN_CONT "%02hhx\n", buf[j]); \
77 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
78 * that correspond to that nop. Getting from one nop to the next, we
79 * add to the array the offset that is equal to the sum of all sizes of
80 * nops preceding the one we are after.
82 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
83 * nice symmetry of sizes of the previous nops.
85 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
86 static const unsigned char intelnops
[] =
98 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
104 intelnops
+ 1 + 2 + 3,
105 intelnops
+ 1 + 2 + 3 + 4,
106 intelnops
+ 1 + 2 + 3 + 4 + 5,
107 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
108 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
109 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
114 static const unsigned char k8nops
[] =
126 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
133 k8nops
+ 1 + 2 + 3 + 4,
134 k8nops
+ 1 + 2 + 3 + 4 + 5,
135 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
136 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
137 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
141 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
142 static const unsigned char k7nops
[] =
154 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
161 k7nops
+ 1 + 2 + 3 + 4,
162 k7nops
+ 1 + 2 + 3 + 4 + 5,
163 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
164 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
165 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
170 static const unsigned char p6nops
[] =
182 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
189 p6nops
+ 1 + 2 + 3 + 4,
190 p6nops
+ 1 + 2 + 3 + 4 + 5,
191 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
192 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
193 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
197 /* Initialize these to a safe default */
199 const unsigned char * const *ideal_nops
= p6_nops
;
201 const unsigned char * const *ideal_nops
= intel_nops
;
204 void __init
arch_init_ideal_nops(void)
206 switch (boot_cpu_data
.x86_vendor
) {
207 case X86_VENDOR_INTEL
:
209 * Due to a decoder implementation quirk, some
210 * specific Intel CPUs actually perform better with
211 * the "k8_nops" than with the SDM-recommended NOPs.
213 if (boot_cpu_data
.x86
== 6 &&
214 boot_cpu_data
.x86_model
>= 0x0f &&
215 boot_cpu_data
.x86_model
!= 0x1c &&
216 boot_cpu_data
.x86_model
!= 0x26 &&
217 boot_cpu_data
.x86_model
!= 0x27 &&
218 boot_cpu_data
.x86_model
< 0x30) {
219 ideal_nops
= k8_nops
;
220 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
221 ideal_nops
= p6_nops
;
224 ideal_nops
= k8_nops
;
226 ideal_nops
= intel_nops
;
232 ideal_nops
= k8_nops
;
234 if (boot_cpu_has(X86_FEATURE_K8
))
235 ideal_nops
= k8_nops
;
236 else if (boot_cpu_has(X86_FEATURE_K7
))
237 ideal_nops
= k7_nops
;
239 ideal_nops
= intel_nops
;
244 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
245 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
248 unsigned int noplen
= len
;
249 if (noplen
> ASM_NOP_MAX
)
250 noplen
= ASM_NOP_MAX
;
251 memcpy(insns
, ideal_nops
[noplen
], noplen
);
257 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
258 extern s32 __smp_locks
[], __smp_locks_end
[];
259 void *text_poke_early(void *addr
, const void *opcode
, size_t len
);
262 * Are we looking at a near JMP with a 1 or 4-byte displacement.
264 static inline bool is_jmp(const u8 opcode
)
266 return opcode
== 0xeb || opcode
== 0xe9;
269 static void __init_or_module
270 recompute_jump(struct alt_instr
*a
, u8
*orig_insn
, u8
*repl_insn
, u8
*insnbuf
)
272 u8
*next_rip
, *tgt_rip
;
276 if (a
->replacementlen
!= 5)
279 o_dspl
= *(s32
*)(insnbuf
+ 1);
281 /* next_rip of the replacement JMP */
282 next_rip
= repl_insn
+ a
->replacementlen
;
283 /* target rip of the replacement JMP */
284 tgt_rip
= next_rip
+ o_dspl
;
285 n_dspl
= tgt_rip
- orig_insn
;
287 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip
, n_dspl
);
289 if (tgt_rip
- orig_insn
>= 0) {
290 if (n_dspl
- 2 <= 127)
294 /* negative offset */
296 if (((n_dspl
- 2) & 0xff) == (n_dspl
- 2))
306 insnbuf
[1] = (s8
)n_dspl
;
307 add_nops(insnbuf
+ 2, 3);
316 *(s32
*)&insnbuf
[1] = n_dspl
;
322 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
323 n_dspl
, (unsigned long)orig_insn
+ n_dspl
+ repl_len
);
326 static void __init_or_module
optimize_nops(struct alt_instr
*a
, u8
*instr
)
328 if (instr
[0] != 0x90)
331 add_nops(instr
+ (a
->instrlen
- a
->padlen
), a
->padlen
);
333 DUMP_BYTES(instr
, a
->instrlen
, "%p: [%d:%d) optimized NOPs: ",
334 instr
, a
->instrlen
- a
->padlen
, a
->padlen
);
338 * Replace instructions with better alternatives for this CPU type. This runs
339 * before SMP is initialized to avoid SMP problems with self modifying code.
340 * This implies that asymmetric systems where APs have less capabilities than
341 * the boot processor are not handled. Tough. Make sure you disable such
344 void __init_or_module
apply_alternatives(struct alt_instr
*start
,
345 struct alt_instr
*end
)
348 u8
*instr
, *replacement
;
349 u8 insnbuf
[MAX_PATCH_LEN
];
351 DPRINTK("alt table %p -> %p", start
, end
);
353 * The scan order should be from start to end. A later scanned
354 * alternative code can overwrite previously scanned alternative code.
355 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
358 * So be careful if you want to change the scan order to any other
361 for (a
= start
; a
< end
; a
++) {
364 instr
= (u8
*)&a
->instr_offset
+ a
->instr_offset
;
365 replacement
= (u8
*)&a
->repl_offset
+ a
->repl_offset
;
366 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
367 BUG_ON(a
->cpuid
>= (NCAPINTS
+ NBUGINTS
) * 32);
368 if (!boot_cpu_has(a
->cpuid
)) {
370 optimize_nops(a
, instr
);
375 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
379 replacement
, a
->replacementlen
, a
->padlen
);
381 DUMP_BYTES(instr
, a
->instrlen
, "%p: old_insn: ", instr
);
382 DUMP_BYTES(replacement
, a
->replacementlen
, "%p: rpl_insn: ", replacement
);
384 memcpy(insnbuf
, replacement
, a
->replacementlen
);
385 insnbuf_sz
= a
->replacementlen
;
387 /* 0xe8 is a relative jump; fix the offset. */
388 if (*insnbuf
== 0xe8 && a
->replacementlen
== 5) {
389 *(s32
*)(insnbuf
+ 1) += replacement
- instr
;
390 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
391 *(s32
*)(insnbuf
+ 1),
392 (unsigned long)instr
+ *(s32
*)(insnbuf
+ 1) + 5);
395 if (a
->replacementlen
&& is_jmp(replacement
[0]))
396 recompute_jump(a
, instr
, replacement
, insnbuf
);
398 if (a
->instrlen
> a
->replacementlen
) {
399 add_nops(insnbuf
+ a
->replacementlen
,
400 a
->instrlen
- a
->replacementlen
);
401 insnbuf_sz
+= a
->instrlen
- a
->replacementlen
;
403 DUMP_BYTES(insnbuf
, insnbuf_sz
, "%p: final_insn: ", instr
);
405 text_poke_early(instr
, insnbuf
, insnbuf_sz
);
410 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
411 u8
*text
, u8
*text_end
)
415 mutex_lock(&text_mutex
);
416 for (poff
= start
; poff
< end
; poff
++) {
417 u8
*ptr
= (u8
*)poff
+ *poff
;
419 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
421 /* turn DS segment override prefix into lock prefix */
423 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
425 mutex_unlock(&text_mutex
);
428 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
429 u8
*text
, u8
*text_end
)
433 mutex_lock(&text_mutex
);
434 for (poff
= start
; poff
< end
; poff
++) {
435 u8
*ptr
= (u8
*)poff
+ *poff
;
437 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
439 /* turn lock prefix into DS segment override prefix */
441 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
443 mutex_unlock(&text_mutex
);
446 struct smp_alt_module
{
447 /* what is this ??? */
451 /* ptrs to lock prefixes */
453 const s32
*locks_end
;
455 /* .text segment, needed to avoid patching init code ;) */
459 struct list_head next
;
461 static LIST_HEAD(smp_alt_modules
);
462 static DEFINE_MUTEX(smp_alt
);
463 static bool uniproc_patched
= false; /* protected by smp_alt */
465 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
467 void *locks
, void *locks_end
,
468 void *text
, void *text_end
)
470 struct smp_alt_module
*smp
;
472 mutex_lock(&smp_alt
);
473 if (!uniproc_patched
)
476 if (num_possible_cpus() == 1)
477 /* Don't bother remembering, we'll never have to undo it. */
480 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
482 /* we'll run the (safe but slow) SMP code then ... */
488 smp
->locks_end
= locks_end
;
490 smp
->text_end
= text_end
;
491 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
492 smp
->locks
, smp
->locks_end
,
493 smp
->text
, smp
->text_end
, smp
->name
);
495 list_add_tail(&smp
->next
, &smp_alt_modules
);
497 alternatives_smp_unlock(locks
, locks_end
, text
, text_end
);
499 mutex_unlock(&smp_alt
);
502 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
504 struct smp_alt_module
*item
;
506 mutex_lock(&smp_alt
);
507 list_for_each_entry(item
, &smp_alt_modules
, next
) {
508 if (mod
!= item
->mod
)
510 list_del(&item
->next
);
514 mutex_unlock(&smp_alt
);
517 void alternatives_enable_smp(void)
519 struct smp_alt_module
*mod
;
521 /* Why bother if there are no other CPUs? */
522 BUG_ON(num_possible_cpus() == 1);
524 mutex_lock(&smp_alt
);
526 if (uniproc_patched
) {
527 pr_info("switching to SMP code\n");
528 BUG_ON(num_online_cpus() != 1);
529 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
530 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
531 list_for_each_entry(mod
, &smp_alt_modules
, next
)
532 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
533 mod
->text
, mod
->text_end
);
534 uniproc_patched
= false;
536 mutex_unlock(&smp_alt
);
539 /* Return 1 if the address range is reserved for smp-alternatives */
540 int alternatives_text_reserved(void *start
, void *end
)
542 struct smp_alt_module
*mod
;
544 u8
*text_start
= start
;
547 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
548 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
550 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
551 const u8
*ptr
= (const u8
*)poff
+ *poff
;
553 if (text_start
<= ptr
&& text_end
> ptr
)
560 #endif /* CONFIG_SMP */
562 #ifdef CONFIG_PARAVIRT
563 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
564 struct paravirt_patch_site
*end
)
566 struct paravirt_patch_site
*p
;
567 char insnbuf
[MAX_PATCH_LEN
];
569 if (noreplace_paravirt
)
572 for (p
= start
; p
< end
; p
++) {
575 BUG_ON(p
->len
> MAX_PATCH_LEN
);
576 /* prep the buffer with the original instructions */
577 memcpy(insnbuf
, p
->instr
, p
->len
);
578 used
= pv_init_ops
.patch(p
->instrtype
, p
->clobbers
, insnbuf
,
579 (unsigned long)p
->instr
, p
->len
);
581 BUG_ON(used
> p
->len
);
583 /* Pad the rest with nops */
584 add_nops(insnbuf
+ used
, p
->len
- used
);
585 text_poke_early(p
->instr
, insnbuf
, p
->len
);
588 extern struct paravirt_patch_site __start_parainstructions
[],
589 __stop_parainstructions
[];
590 #endif /* CONFIG_PARAVIRT */
592 void __init
alternative_instructions(void)
594 /* The patching is not fully atomic, so try to avoid local interruptions
595 that might execute the to be patched code.
596 Other CPUs are not running. */
600 * Don't stop machine check exceptions while patching.
601 * MCEs only happen when something got corrupted and in this
602 * case we must do something about the corruption.
603 * Ignoring it is worse than a unlikely patching race.
604 * Also machine checks tend to be broadcast and if one CPU
605 * goes into machine check the others follow quickly, so we don't
606 * expect a machine check to cause undue problems during to code
610 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
613 /* Patch to UP if other cpus not imminent. */
614 if (!noreplace_smp
&& (num_present_cpus() == 1 || setup_max_cpus
<= 1)) {
615 uniproc_patched
= true;
616 alternatives_smp_module_add(NULL
, "core kernel",
617 __smp_locks
, __smp_locks_end
,
621 if (!uniproc_patched
|| num_possible_cpus() == 1)
622 free_init_pages("SMP alternatives",
623 (unsigned long)__smp_locks
,
624 (unsigned long)__smp_locks_end
);
627 apply_paravirt(__parainstructions
, __parainstructions_end
);
633 * text_poke_early - Update instructions on a live kernel at boot time
634 * @addr: address to modify
635 * @opcode: source of the copy
636 * @len: length to copy
638 * When you use this code to patch more than one byte of an instruction
639 * you need to make sure that other CPUs cannot execute this code in parallel.
640 * Also no thread must be currently preempted in the middle of these
641 * instructions. And on the local CPU you need to be protected again NMI or MCE
642 * handlers seeing an inconsistent instruction while you patch.
644 void *__init_or_module
text_poke_early(void *addr
, const void *opcode
,
648 local_irq_save(flags
);
649 memcpy(addr
, opcode
, len
);
651 local_irq_restore(flags
);
652 /* Could also do a CLFLUSH here to speed up CPU recovery; but
653 that causes hangs on some VIA CPUs. */
658 * text_poke - Update instructions on a live kernel
659 * @addr: address to modify
660 * @opcode: source of the copy
661 * @len: length to copy
663 * Only atomic text poke/set should be allowed when not doing early patching.
664 * It means the size must be writable atomically and the address must be aligned
665 * in a way that permits an atomic write. It also makes sure we fit on a single
668 * Note: Must be called under text_mutex.
670 void *text_poke(void *addr
, const void *opcode
, size_t len
)
674 struct page
*pages
[2];
677 if (!core_kernel_text((unsigned long)addr
)) {
678 pages
[0] = vmalloc_to_page(addr
);
679 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
681 pages
[0] = virt_to_page(addr
);
682 WARN_ON(!PageReserved(pages
[0]));
683 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
686 local_irq_save(flags
);
687 set_fixmap(FIX_TEXT_POKE0
, page_to_phys(pages
[0]));
689 set_fixmap(FIX_TEXT_POKE1
, page_to_phys(pages
[1]));
690 vaddr
= (char *)fix_to_virt(FIX_TEXT_POKE0
);
691 memcpy(&vaddr
[(unsigned long)addr
& ~PAGE_MASK
], opcode
, len
);
692 clear_fixmap(FIX_TEXT_POKE0
);
694 clear_fixmap(FIX_TEXT_POKE1
);
697 /* Could also do a CLFLUSH here to speed up CPU recovery; but
698 that causes hangs on some VIA CPUs. */
699 for (i
= 0; i
< len
; i
++)
700 BUG_ON(((char *)addr
)[i
] != ((char *)opcode
)[i
]);
701 local_irq_restore(flags
);
705 static void do_sync_core(void *info
)
710 static bool bp_patching_in_progress
;
711 static void *bp_int3_handler
, *bp_int3_addr
;
713 int poke_int3_handler(struct pt_regs
*regs
)
715 /* bp_patching_in_progress */
718 if (likely(!bp_patching_in_progress
))
721 if (user_mode(regs
) || regs
->ip
!= (unsigned long)bp_int3_addr
)
724 /* set up the specified breakpoint handler */
725 regs
->ip
= (unsigned long) bp_int3_handler
;
732 * text_poke_bp() -- update instructions on live kernel on SMP
733 * @addr: address to patch
734 * @opcode: opcode of new instruction
735 * @len: length to copy
736 * @handler: address to jump to when the temporary breakpoint is hit
738 * Modify multi-byte instruction by using int3 breakpoint on SMP.
739 * We completely avoid stop_machine() here, and achieve the
740 * synchronization using int3 breakpoint.
742 * The way it is done:
743 * - add a int3 trap to the address that will be patched
745 * - update all but the first byte of the patched range
747 * - replace the first byte (int3) by the first byte of
751 * Note: must be called under text_mutex.
753 void *text_poke_bp(void *addr
, const void *opcode
, size_t len
, void *handler
)
755 unsigned char int3
= 0xcc;
757 bp_int3_handler
= handler
;
758 bp_int3_addr
= (u8
*)addr
+ sizeof(int3
);
759 bp_patching_in_progress
= true;
761 * Corresponding read barrier in int3 notifier for
762 * making sure the in_progress flags is correctly ordered wrt.
767 text_poke(addr
, &int3
, sizeof(int3
));
769 on_each_cpu(do_sync_core
, NULL
, 1);
771 if (len
- sizeof(int3
) > 0) {
772 /* patch all but the first byte */
773 text_poke((char *)addr
+ sizeof(int3
),
774 (const char *) opcode
+ sizeof(int3
),
777 * According to Intel, this core syncing is very likely
778 * not necessary and we'd be safe even without it. But
779 * better safe than sorry (plus there's not only Intel).
781 on_each_cpu(do_sync_core
, NULL
, 1);
784 /* patch the first byte */
785 text_poke(addr
, opcode
, sizeof(int3
));
787 on_each_cpu(do_sync_core
, NULL
, 1);
789 bp_patching_in_progress
= false;