]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0029-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch
update sources to Ubuntu-5.19.0-14.14
[pve-kernel.git] / patches / kernel / 0029-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch
CommitLineData
54ebe3cb
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Maxim Levitsky <mlevitsk@redhat.com>
3Date: Tue, 21 Jun 2022 18:09:02 +0300
4Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM
5
6When #SMI is asserted, the CPU can be in interrupt shadow
7due to sti or mov ss.
8
9It is not mandatory in Intel/AMD prm to have the #SMI
10blocked during the shadow, and on top of
11that, since neither SVM nor VMX has true support for SMI
12window, waiting for one instruction would mean single stepping
13the guest.
14
15Instead, allow #SMI in this case, but both reset the interrupt
16window and stash its value in SMRAM to restore it on exit
17from SMM.
18
19This fixes rare failures seen mostly on windows guests on VMX,
20when #SMI falls on the sti instruction which mainfest in
21VM entry failure due to EFLAGS.IF not being set, but STI interrupt
22window still being set in the VMCS.
23
24Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
25Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
26---
27 arch/x86/kvm/emulate.c | 17 ++++++++++++++---
28 arch/x86/kvm/kvm_emulate.h | 13 ++++++++++---
29 arch/x86/kvm/x86.c | 12 ++++++++++++
30 3 files changed, 36 insertions(+), 6 deletions(-)
31
32diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
33index d34ed0475128..f4373213bef8 100644
34--- a/arch/x86/kvm/emulate.c
35+++ b/arch/x86/kvm/emulate.c
36@@ -2431,7 +2431,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
37 struct kvm_smram_state_32 *smstate)
38 {
39 struct desc_ptr dt;
40- int i;
41+ int i, r;
42
43 ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
44 ctxt->_eip = smstate->eip;
45@@ -2466,8 +2466,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
46
47 ctxt->ops->set_smbase(ctxt, smstate->smbase);
48
49- return rsm_enter_protected_mode(ctxt, smstate->cr0,
50- smstate->cr3, smstate->cr4);
51+ r = rsm_enter_protected_mode(ctxt, smstate->cr0,
52+ smstate->cr3, smstate->cr4);
53+
54+ if (r != X86EMUL_CONTINUE)
55+ return r;
56+
57+ ctxt->ops->set_int_shadow(ctxt, 0);
58+ ctxt->interruptibility = (u8)smstate->int_shadow;
59+
60+ return X86EMUL_CONTINUE;
61 }
62
63 #ifdef CONFIG_X86_64
64@@ -2516,6 +2524,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
65 rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
66 rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
67
68+ ctxt->ops->set_int_shadow(ctxt, 0);
69+ ctxt->interruptibility = (u8)smstate->int_shadow;
70+
71 return X86EMUL_CONTINUE;
72 }
73 #endif
74diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
75index 3bbf7c1c5b18..e7acf0052389 100644
76--- a/arch/x86/kvm/kvm_emulate.h
77+++ b/arch/x86/kvm/kvm_emulate.h
78@@ -231,6 +231,7 @@ struct x86_emulate_ops {
79 bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
80
81 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
82+ void (*set_int_shadow)(struct x86_emulate_ctxt *ctxt, u8 shadow);
83
84 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
85 void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
86@@ -498,7 +499,9 @@ struct kvm_smram_state_32 {
87 u32 reserved1[62]; /* FE00 - FEF7 */
88 u32 smbase; /* FEF8 */
89 u32 smm_revision; /* FEFC */
90- u32 reserved2[5]; /* FF00-FF13 */
91+ u32 reserved2[4]; /* FF00-FF0F*/
92+ /* int_shadow is KVM extension*/
93+ u32 int_shadow; /* FF10 */
94 /* CR4 is not present in Intel/AMD SMRAM image*/
95 u32 cr4; /* FF14 */
96 u32 reserved3[5]; /* FF18 */
97@@ -570,13 +573,17 @@ struct kvm_smram_state_64 {
98 struct kvm_smm_seg_state_64 idtr; /* FE80 (R/O) */
99 struct kvm_smm_seg_state_64 tr; /* FE90 (R/O) */
100
101- /* I/O restart and auto halt restart are not implemented by KVM */
102+ /*
103+ * I/O restart and auto halt restart are not implemented by KVM
104+ * int_shadow is KVM's extension
105+ */
106+
107 u64 io_restart_rip; /* FEA0 (R/O) */
108 u64 io_restart_rcx; /* FEA8 (R/O) */
109 u64 io_restart_rsi; /* FEB0 (R/O) */
110 u64 io_restart_rdi; /* FEB8 (R/O) */
111 u32 io_restart_dword; /* FEC0 (R/O) */
112- u32 reserved1; /* FEC4 */
113+ u32 int_shadow; /* FEC4 (R/O) */
114 u8 io_instruction_restart; /* FEC8 (R/W) */
115 u8 auto_halt_restart; /* FEC9 (R/W) */
116 u8 reserved2[6]; /* FECA-FECF */
117diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
118index f7b2fe174574..2072d994b06f 100644
119--- a/arch/x86/kvm/x86.c
120+++ b/arch/x86/kvm/x86.c
121@@ -7840,6 +7840,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
122 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
123 }
124
125+static void emulator_set_int_shadow(struct x86_emulate_ctxt *ctxt, u8 shadow)
126+{
127+ static_call(kvm_x86_set_interrupt_shadow)(emul_to_vcpu(ctxt), shadow);
128+}
129+
130 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
131 {
132 return emul_to_vcpu(ctxt)->arch.hflags;
133@@ -7911,6 +7916,7 @@ static const struct x86_emulate_ops emulate_ops = {
134 .guest_has_fxsr = emulator_guest_has_fxsr,
135 .guest_has_rdpid = emulator_guest_has_rdpid,
136 .set_nmi_mask = emulator_set_nmi_mask,
137+ .set_int_shadow = emulator_set_int_shadow,
138 .get_hflags = emulator_get_hflags,
139 .exiting_smm = emulator_exiting_smm,
140 .leave_smm = emulator_leave_smm,
141@@ -9688,6 +9694,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
142 smram->cr4 = kvm_read_cr4(vcpu);
143 smram->smm_revision = 0x00020000;
144 smram->smbase = vcpu->arch.smbase;
145+
146+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
147 }
148
149 #ifdef CONFIG_X86_64
150@@ -9736,6 +9744,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_stat
151 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
152 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
153 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
154+
155+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
156 }
157 #endif
158
159@@ -9772,6 +9782,8 @@ static void enter_smm(struct kvm_vcpu *vcpu)
160 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
161 kvm_rip_write(vcpu, 0x8000);
162
163+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
164+
165 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
166 static_call(kvm_x86_set_cr0)(vcpu, cr0);
167 vcpu->arch.cr0 = cr0;