]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0016-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch
update to Ubuntu-6.1.0-1.1
[pve-kernel.git] / patches / kernel / 0016-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch
CommitLineData
54ebe3cb
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Maxim Levitsky <mlevitsk@redhat.com>
4fc427d9 3Date: Wed, 3 Aug 2022 18:50:11 +0300
54ebe3cb
TL
4Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM
5
6When #SMI is asserted, the CPU can be in interrupt shadow
7due to sti or mov ss.
8
9It is not mandatory in Intel/AMD prm to have the #SMI
10blocked during the shadow, and on top of
11that, since neither SVM nor VMX has true support for SMI
12window, waiting for one instruction would mean single stepping
13the guest.
14
15Instead, allow #SMI in this case, but both reset the interrupt
16window and stash its value in SMRAM to restore it on exit
17from SMM.
18
19This fixes rare failures seen mostly on windows guests on VMX,
20when #SMI falls on the sti instruction which mainfest in
21VM entry failure due to EFLAGS.IF not being set, but STI interrupt
22window still being set in the VMCS.
23
24Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
25Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
26---
27 arch/x86/kvm/emulate.c | 17 ++++++++++++++---
4fc427d9 28 arch/x86/kvm/kvm_emulate.h | 10 ++++++----
54ebe3cb 29 arch/x86/kvm/x86.c | 12 ++++++++++++
4fc427d9 30 3 files changed, 32 insertions(+), 7 deletions(-)
54ebe3cb
TL
31
32diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
12247ad0 33index 03f9e5aa036e..bb008a5be539 100644
54ebe3cb
TL
34--- a/arch/x86/kvm/emulate.c
35+++ b/arch/x86/kvm/emulate.c
4fc427d9
TL
36@@ -2435,7 +2435,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
37 const struct kvm_smram_state_32 *smstate)
54ebe3cb
TL
38 {
39 struct desc_ptr dt;
40- int i;
41+ int i, r;
42
43 ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
44 ctxt->_eip = smstate->eip;
4fc427d9 45@@ -2470,8 +2470,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
54ebe3cb
TL
46
47 ctxt->ops->set_smbase(ctxt, smstate->smbase);
48
49- return rsm_enter_protected_mode(ctxt, smstate->cr0,
50- smstate->cr3, smstate->cr4);
51+ r = rsm_enter_protected_mode(ctxt, smstate->cr0,
52+ smstate->cr3, smstate->cr4);
53+
54+ if (r != X86EMUL_CONTINUE)
55+ return r;
56+
57+ ctxt->ops->set_int_shadow(ctxt, 0);
58+ ctxt->interruptibility = (u8)smstate->int_shadow;
59+
60+ return X86EMUL_CONTINUE;
61 }
62
63 #ifdef CONFIG_X86_64
4fc427d9 64@@ -2520,6 +2528,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
54ebe3cb
TL
65 rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
66 rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
67
68+ ctxt->ops->set_int_shadow(ctxt, 0);
69+ ctxt->interruptibility = (u8)smstate->int_shadow;
70+
71 return X86EMUL_CONTINUE;
72 }
73 #endif
74diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
12247ad0 75index 76c0b8e7890b..a7313add0f2a 100644
54ebe3cb
TL
76--- a/arch/x86/kvm/kvm_emulate.h
77+++ b/arch/x86/kvm/kvm_emulate.h
12247ad0 78@@ -234,6 +234,7 @@ struct x86_emulate_ops {
54ebe3cb
TL
79 bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
80
81 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
82+ void (*set_int_shadow)(struct x86_emulate_ctxt *ctxt, u8 shadow);
83
84 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
85 void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
12247ad0 86@@ -518,7 +519,8 @@ struct kvm_smram_state_32 {
4fc427d9
TL
87 u32 reserved1[62];
88 u32 smbase;
89 u32 smm_revision;
90- u32 reserved2[5];
91+ u32 reserved2[4];
92+ u32 int_shadow; /* KVM extension */
93 u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
94 u32 reserved3[5];
54ebe3cb 95
12247ad0 96@@ -566,6 +568,7 @@ static inline void __check_smram32_offsets(void)
4fc427d9
TL
97 __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
98 __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
99 __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
100+ __CHECK_SMRAM32_OFFSET(int_shadow, 0xFF10);
101 __CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
102 __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
103 __CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
12247ad0 104@@ -625,7 +628,7 @@ struct kvm_smram_state_64 {
4fc427d9
TL
105 u64 io_restart_rsi;
106 u64 io_restart_rdi;
107 u32 io_restart_dword;
108- u32 reserved1;
109+ u32 int_shadow;
110 u8 io_inst_restart;
111 u8 auto_hlt_restart;
112 u8 reserved2[6];
12247ad0 113@@ -663,7 +666,6 @@ struct kvm_smram_state_64 {
4fc427d9
TL
114 u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
115 };
116
117-
118 static inline void __check_smram64_offsets(void)
119 {
120 #define __CHECK_SMRAM64_OFFSET(field, offset) \
12247ad0 121@@ -684,7 +686,7 @@ static inline void __check_smram64_offsets(void)
4fc427d9
TL
122 __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
123 __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
124 __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
125- __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
126+ __CHECK_SMRAM64_OFFSET(int_shadow, 0xFEC4);
127 __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
128 __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
129 __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
54ebe3cb 130diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
12247ad0 131index 7a4d86f9bdcd..609829ec1d13 100644
54ebe3cb
TL
132--- a/arch/x86/kvm/x86.c
133+++ b/arch/x86/kvm/x86.c
12247ad0 134@@ -8173,6 +8173,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
54ebe3cb
TL
135 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
136 }
137
138+static void emulator_set_int_shadow(struct x86_emulate_ctxt *ctxt, u8 shadow)
139+{
140+ static_call(kvm_x86_set_interrupt_shadow)(emul_to_vcpu(ctxt), shadow);
141+}
142+
143 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
144 {
145 return emul_to_vcpu(ctxt)->arch.hflags;
12247ad0 146@@ -8253,6 +8258,7 @@ static const struct x86_emulate_ops emulate_ops = {
54ebe3cb
TL
147 .guest_has_fxsr = emulator_guest_has_fxsr,
148 .guest_has_rdpid = emulator_guest_has_rdpid,
149 .set_nmi_mask = emulator_set_nmi_mask,
150+ .set_int_shadow = emulator_set_int_shadow,
151 .get_hflags = emulator_get_hflags,
152 .exiting_smm = emulator_exiting_smm,
153 .leave_smm = emulator_leave_smm,
12247ad0 154@@ -10170,6 +10176,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
54ebe3cb
TL
155 smram->cr4 = kvm_read_cr4(vcpu);
156 smram->smm_revision = 0x00020000;
157 smram->smbase = vcpu->arch.smbase;
158+
159+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
160 }
161
162 #ifdef CONFIG_X86_64
12247ad0 163@@ -10218,6 +10226,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_stat
54ebe3cb
TL
164 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
165 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
166 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
167+
168+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
169 }
170 #endif
171
12247ad0 172@@ -10254,6 +10264,8 @@ static void enter_smm(struct kvm_vcpu *vcpu)
54ebe3cb
TL
173 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
174 kvm_rip_write(vcpu, 0x8000);
175
176+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
177+
178 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
179 static_call(kvm_x86_set_cr0)(vcpu, cr0);
180 vcpu->arch.cr0 = cr0;