]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/paravirt_patch_64.c
x86/spec_ctrl: Add lock to serialize changes to ibrs and ibpb control
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / paravirt_patch_64.c
1 #include <asm/paravirt.h>
2 #include <asm/asm-offsets.h>
3 #include <linux/stringify.h>
4
5 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
6 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
7 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
8 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
9 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
10 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
11 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
12 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
13
14 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
15 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
16
17 DEF_NATIVE(, mov32, "mov %edi, %eax");
18 DEF_NATIVE(, mov64, "mov %rdi, %rax");
19
20 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
21 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
22 DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
23 #endif
24
25 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
26 {
27 return paravirt_patch_insns(insnbuf, len,
28 start__mov32, end__mov32);
29 }
30
31 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
32 {
33 return paravirt_patch_insns(insnbuf, len,
34 start__mov64, end__mov64);
35 }
36
37 extern bool pv_is_native_spin_unlock(void);
38 extern bool pv_is_native_vcpu_is_preempted(void);
39
40 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
41 unsigned long addr, unsigned len)
42 {
43 const unsigned char *start, *end;
44 unsigned ret;
45
46 #define PATCH_SITE(ops, x) \
47 case PARAVIRT_PATCH(ops.x): \
48 start = start_##ops##_##x; \
49 end = end_##ops##_##x; \
50 goto patch_site
51 switch(type) {
52 PATCH_SITE(pv_irq_ops, restore_fl);
53 PATCH_SITE(pv_irq_ops, save_fl);
54 PATCH_SITE(pv_irq_ops, irq_enable);
55 PATCH_SITE(pv_irq_ops, irq_disable);
56 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
57 PATCH_SITE(pv_cpu_ops, swapgs);
58 PATCH_SITE(pv_mmu_ops, read_cr2);
59 PATCH_SITE(pv_mmu_ops, read_cr3);
60 PATCH_SITE(pv_mmu_ops, write_cr3);
61 PATCH_SITE(pv_cpu_ops, wbinvd);
62 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
63 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
64 if (pv_is_native_spin_unlock()) {
65 start = start_pv_lock_ops_queued_spin_unlock;
66 end = end_pv_lock_ops_queued_spin_unlock;
67 goto patch_site;
68 }
69 goto patch_default;
70
71 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
72 if (pv_is_native_vcpu_is_preempted()) {
73 start = start_pv_lock_ops_vcpu_is_preempted;
74 end = end_pv_lock_ops_vcpu_is_preempted;
75 goto patch_site;
76 }
77 goto patch_default;
78 #endif
79
80 default:
81 patch_default: __maybe_unused
82 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
83 break;
84
85 patch_site:
86 ret = paravirt_patch_insns(ibuf, len, start, end);
87 break;
88 }
89 #undef PATCH_SITE
90 return ret;
91 }