]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0015-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch
update submodule and patches to 6.1.14
[pve-kernel.git] / patches / kernel / 0015-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch
CommitLineData
54ebe3cb
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Maxim Levitsky <mlevitsk@redhat.com>
4fc427d9 3Date: Wed, 3 Aug 2022 18:50:11 +0300
54ebe3cb 4Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM
826eb0ff
FG
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
54ebe3cb
TL
8
9When #SMI is asserted, the CPU can be in interrupt shadow
10due to sti or mov ss.
11
12It is not mandatory in Intel/AMD prm to have the #SMI
13blocked during the shadow, and on top of
14that, since neither SVM nor VMX has true support for SMI
15window, waiting for one instruction would mean single stepping
16the guest.
17
18Instead, allow #SMI in this case, but both reset the interrupt
19window and stash its value in SMRAM to restore it on exit
20from SMM.
21
22This fixes rare failures seen mostly on windows guests on VMX,
23when #SMI falls on the sti instruction which mainfest in
24VM entry failure due to EFLAGS.IF not being set, but STI interrupt
25window still being set in the VMCS.
26
27Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
28Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
826eb0ff 29Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
54ebe3cb
TL
30---
31 arch/x86/kvm/emulate.c | 17 ++++++++++++++---
4fc427d9 32 arch/x86/kvm/kvm_emulate.h | 10 ++++++----
54ebe3cb 33 arch/x86/kvm/x86.c | 12 ++++++++++++
4fc427d9 34 3 files changed, 32 insertions(+), 7 deletions(-)
54ebe3cb
TL
35
36diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
12247ad0 37index 03f9e5aa036e..bb008a5be539 100644
54ebe3cb
TL
38--- a/arch/x86/kvm/emulate.c
39+++ b/arch/x86/kvm/emulate.c
4fc427d9
TL
40@@ -2435,7 +2435,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
41 const struct kvm_smram_state_32 *smstate)
54ebe3cb
TL
42 {
43 struct desc_ptr dt;
44- int i;
45+ int i, r;
46
47 ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
48 ctxt->_eip = smstate->eip;
4fc427d9 49@@ -2470,8 +2470,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
54ebe3cb
TL
50
51 ctxt->ops->set_smbase(ctxt, smstate->smbase);
52
53- return rsm_enter_protected_mode(ctxt, smstate->cr0,
54- smstate->cr3, smstate->cr4);
55+ r = rsm_enter_protected_mode(ctxt, smstate->cr0,
56+ smstate->cr3, smstate->cr4);
57+
58+ if (r != X86EMUL_CONTINUE)
59+ return r;
60+
61+ ctxt->ops->set_int_shadow(ctxt, 0);
62+ ctxt->interruptibility = (u8)smstate->int_shadow;
63+
64+ return X86EMUL_CONTINUE;
65 }
66
67 #ifdef CONFIG_X86_64
4fc427d9 68@@ -2520,6 +2528,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
54ebe3cb
TL
69 rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
70 rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
71
72+ ctxt->ops->set_int_shadow(ctxt, 0);
73+ ctxt->interruptibility = (u8)smstate->int_shadow;
74+
75 return X86EMUL_CONTINUE;
76 }
77 #endif
78diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
12247ad0 79index 76c0b8e7890b..a7313add0f2a 100644
54ebe3cb
TL
80--- a/arch/x86/kvm/kvm_emulate.h
81+++ b/arch/x86/kvm/kvm_emulate.h
12247ad0 82@@ -234,6 +234,7 @@ struct x86_emulate_ops {
54ebe3cb
TL
83 bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
84
85 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
86+ void (*set_int_shadow)(struct x86_emulate_ctxt *ctxt, u8 shadow);
87
88 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
89 void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
12247ad0 90@@ -518,7 +519,8 @@ struct kvm_smram_state_32 {
4fc427d9
TL
91 u32 reserved1[62];
92 u32 smbase;
93 u32 smm_revision;
94- u32 reserved2[5];
95+ u32 reserved2[4];
96+ u32 int_shadow; /* KVM extension */
97 u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
98 u32 reserved3[5];
54ebe3cb 99
12247ad0 100@@ -566,6 +568,7 @@ static inline void __check_smram32_offsets(void)
4fc427d9
TL
101 __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
102 __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
103 __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
104+ __CHECK_SMRAM32_OFFSET(int_shadow, 0xFF10);
105 __CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
106 __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
107 __CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
12247ad0 108@@ -625,7 +628,7 @@ struct kvm_smram_state_64 {
4fc427d9
TL
109 u64 io_restart_rsi;
110 u64 io_restart_rdi;
111 u32 io_restart_dword;
112- u32 reserved1;
113+ u32 int_shadow;
114 u8 io_inst_restart;
115 u8 auto_hlt_restart;
116 u8 reserved2[6];
12247ad0 117@@ -663,7 +666,6 @@ struct kvm_smram_state_64 {
4fc427d9
TL
118 u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
119 };
120
121-
122 static inline void __check_smram64_offsets(void)
123 {
124 #define __CHECK_SMRAM64_OFFSET(field, offset) \
12247ad0 125@@ -684,7 +686,7 @@ static inline void __check_smram64_offsets(void)
4fc427d9
TL
126 __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
127 __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
128 __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
129- __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
130+ __CHECK_SMRAM64_OFFSET(int_shadow, 0xFEC4);
131 __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
132 __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
133 __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
54ebe3cb 134diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
fc2b61b1 135index f5d3c0ff083d..e4573ac6ee94 100644
54ebe3cb
TL
136--- a/arch/x86/kvm/x86.c
137+++ b/arch/x86/kvm/x86.c
fc2b61b1 138@@ -8192,6 +8192,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
54ebe3cb
TL
139 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
140 }
141
142+static void emulator_set_int_shadow(struct x86_emulate_ctxt *ctxt, u8 shadow)
143+{
144+ static_call(kvm_x86_set_interrupt_shadow)(emul_to_vcpu(ctxt), shadow);
145+}
146+
147 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
148 {
149 return emul_to_vcpu(ctxt)->arch.hflags;
fc2b61b1 150@@ -8272,6 +8277,7 @@ static const struct x86_emulate_ops emulate_ops = {
54ebe3cb
TL
151 .guest_has_fxsr = emulator_guest_has_fxsr,
152 .guest_has_rdpid = emulator_guest_has_rdpid,
153 .set_nmi_mask = emulator_set_nmi_mask,
154+ .set_int_shadow = emulator_set_int_shadow,
155 .get_hflags = emulator_get_hflags,
156 .exiting_smm = emulator_exiting_smm,
157 .leave_smm = emulator_leave_smm,
fc2b61b1 158@@ -10191,6 +10197,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
54ebe3cb
TL
159 smram->cr4 = kvm_read_cr4(vcpu);
160 smram->smm_revision = 0x00020000;
161 smram->smbase = vcpu->arch.smbase;
162+
163+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
164 }
165
166 #ifdef CONFIG_X86_64
fc2b61b1 167@@ -10239,6 +10247,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_stat
54ebe3cb
TL
168 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
169 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
170 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
171+
172+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
173 }
174 #endif
175
fc2b61b1 176@@ -10275,6 +10285,8 @@ static void enter_smm(struct kvm_vcpu *vcpu)
54ebe3cb
TL
177 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
178 kvm_rip_write(vcpu, 0x8000);
179
180+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
181+
182 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
183 static_call(kvm_x86_set_cr0)(vcpu, cr0);
184 vcpu->arch.cr0 = cr0;