1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Maxim Levitsky <mlevitsk@redhat.com>
3 Date: Wed, 3 Aug 2022 18:50:11 +0300
4 Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 When #SMI is asserted, the CPU can be in interrupt shadow
12 It is not mandatory in Intel/AMD prm to have the #SMI
13 blocked during the shadow, and on top of
14 that, since neither SVM nor VMX has true support for SMI
15 window, waiting for one instruction would mean single stepping
18 Instead, allow #SMI in this case, but both reset the interrupt
19 window and stash its value in SMRAM to restore it on exit
22 This fixes rare failures seen mostly on windows guests on VMX,
23 when #SMI falls on the sti instruction which mainfest in
24 VM entry failure due to EFLAGS.IF not being set, but STI interrupt
25 window still being set in the VMCS.
27 Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
28 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
29 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
31 arch/x86/kvm/emulate.c | 17 ++++++++++++++---
32 arch/x86/kvm/kvm_emulate.h | 10 ++++++----
33 arch/x86/kvm/x86.c | 12 ++++++++++++
34 3 files changed, 32 insertions(+), 7 deletions(-)
36 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
37 index 03f9e5aa036e..bb008a5be539 100644
38 --- a/arch/x86/kvm/emulate.c
39 +++ b/arch/x86/kvm/emulate.c
40 @@ -2435,7 +2435,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
41 const struct kvm_smram_state_32 *smstate)
47 ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
48 ctxt->_eip = smstate->eip;
49 @@ -2470,8 +2470,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
51 ctxt->ops->set_smbase(ctxt, smstate->smbase);
53 - return rsm_enter_protected_mode(ctxt, smstate->cr0,
54 - smstate->cr3, smstate->cr4);
55 + r = rsm_enter_protected_mode(ctxt, smstate->cr0,
56 + smstate->cr3, smstate->cr4);
58 + if (r != X86EMUL_CONTINUE)
61 + ctxt->ops->set_int_shadow(ctxt, 0);
62 + ctxt->interruptibility = (u8)smstate->int_shadow;
64 + return X86EMUL_CONTINUE;
68 @@ -2520,6 +2528,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
69 rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
70 rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
72 + ctxt->ops->set_int_shadow(ctxt, 0);
73 + ctxt->interruptibility = (u8)smstate->int_shadow;
75 return X86EMUL_CONTINUE;
78 diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
79 index 76c0b8e7890b..a7313add0f2a 100644
80 --- a/arch/x86/kvm/kvm_emulate.h
81 +++ b/arch/x86/kvm/kvm_emulate.h
82 @@ -234,6 +234,7 @@ struct x86_emulate_ops {
83 bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
85 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
86 + void (*set_int_shadow)(struct x86_emulate_ctxt *ctxt, u8 shadow);
88 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
89 void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
90 @@ -518,7 +519,8 @@ struct kvm_smram_state_32 {
96 + u32 int_shadow; /* KVM extension */
97 u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
100 @@ -566,6 +568,7 @@ static inline void __check_smram32_offsets(void)
101 __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
102 __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
103 __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
104 + __CHECK_SMRAM32_OFFSET(int_shadow, 0xFF10);
105 __CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
106 __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
107 __CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
108 @@ -625,7 +628,7 @@ struct kvm_smram_state_64 {
111 u32 io_restart_dword;
117 @@ -663,7 +666,6 @@ struct kvm_smram_state_64 {
118 u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
122 static inline void __check_smram64_offsets(void)
124 #define __CHECK_SMRAM64_OFFSET(field, offset) \
125 @@ -684,7 +686,7 @@ static inline void __check_smram64_offsets(void)
126 __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
127 __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
128 __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
129 - __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
130 + __CHECK_SMRAM64_OFFSET(int_shadow, 0xFEC4);
131 __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
132 __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
133 __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
134 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
135 index f5d3c0ff083d..e4573ac6ee94 100644
136 --- a/arch/x86/kvm/x86.c
137 +++ b/arch/x86/kvm/x86.c
138 @@ -8192,6 +8192,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
139 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
142 +static void emulator_set_int_shadow(struct x86_emulate_ctxt *ctxt, u8 shadow)
144 + static_call(kvm_x86_set_interrupt_shadow)(emul_to_vcpu(ctxt), shadow);
147 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
149 return emul_to_vcpu(ctxt)->arch.hflags;
150 @@ -8272,6 +8277,7 @@ static const struct x86_emulate_ops emulate_ops = {
151 .guest_has_fxsr = emulator_guest_has_fxsr,
152 .guest_has_rdpid = emulator_guest_has_rdpid,
153 .set_nmi_mask = emulator_set_nmi_mask,
154 + .set_int_shadow = emulator_set_int_shadow,
155 .get_hflags = emulator_get_hflags,
156 .exiting_smm = emulator_exiting_smm,
157 .leave_smm = emulator_leave_smm,
158 @@ -10191,6 +10197,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
159 smram->cr4 = kvm_read_cr4(vcpu);
160 smram->smm_revision = 0x00020000;
161 smram->smbase = vcpu->arch.smbase;
163 + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
167 @@ -10239,6 +10247,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_stat
168 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
169 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
170 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
172 + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
176 @@ -10275,6 +10285,8 @@ static void enter_smm(struct kvm_vcpu *vcpu)
177 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
178 kvm_rip_write(vcpu, 0x8000);
180 + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
182 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
183 static_call(kvm_x86_set_cr0)(vcpu, cr0);
184 vcpu->arch.cr0 = cr0;