]>
Commit | Line | Data |
---|---|---|
f6df304f TL |
1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
2 | From: Maxim Levitsky <mlevitsk@redhat.com> | |
3 | Date: Tue, 21 Jun 2022 18:09:02 +0300 | |
4 | Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM | |
5 | ||
6 | When #SMI is asserted, the CPU can be in interrupt shadow | |
7 | due to sti or mov ss. | |
8 | ||
9 | It is not mandatory in Intel/AMD prm to have the #SMI | |
10 | blocked during the shadow, and on top of | |
11 | that, since neither SVM nor VMX has true support for SMI | |
12 | window, waiting for one instruction would mean single stepping | |
13 | the guest. | |
14 | ||
15 | Instead, allow #SMI in this case, but both reset the interrupt | |
16 | window and stash its value in SMRAM to restore it on exit | |
17 | from SMM. | |
18 | ||
19 | This fixes rare failures seen mostly on windows guests on VMX, | |
20 | when #SMI falls on the sti instruction which mainfest in | |
21 | VM entry failure due to EFLAGS.IF not being set, but STI interrupt | |
22 | window still being set in the VMCS. | |
23 | ||
24 | Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> | |
25 | Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> | |
26 | --- | |
27 | arch/x86/kvm/emulate.c | 17 ++++++++++++++--- | |
28 | arch/x86/kvm/kvm_emulate.h | 13 ++++++++++--- | |
29 | arch/x86/kvm/x86.c | 12 ++++++++++++ | |
30 | 3 files changed, 36 insertions(+), 6 deletions(-) | |
31 | ||
32 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c | |
33 | index 98c2cf169b39..5614456de922 100644 | |
34 | --- a/arch/x86/kvm/emulate.c | |
35 | +++ b/arch/x86/kvm/emulate.c | |
36 | @@ -2416,7 +2416,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, | |
37 | struct kvm_smram_state_32 *smstate) | |
38 | { | |
39 | struct desc_ptr dt; | |
40 | - int i; | |
41 | + int i, r; | |
42 | ||
43 | ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED; | |
44 | ctxt->_eip = smstate->eip; | |
45 | @@ -2451,8 +2451,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, | |
46 | ||
47 | ctxt->ops->set_smbase(ctxt, smstate->smbase); | |
48 | ||
49 | - return rsm_enter_protected_mode(ctxt, smstate->cr0, | |
50 | - smstate->cr3, smstate->cr4); | |
51 | + r = rsm_enter_protected_mode(ctxt, smstate->cr0, | |
52 | + smstate->cr3, smstate->cr4); | |
53 | + | |
54 | + if (r != X86EMUL_CONTINUE) | |
55 | + return r; | |
56 | + | |
57 | + ctxt->ops->set_int_shadow(ctxt, 0); | |
58 | + ctxt->interruptibility = (u8)smstate->int_shadow; | |
59 | + | |
60 | + return X86EMUL_CONTINUE; | |
61 | } | |
62 | ||
63 | #ifdef CONFIG_X86_64 | |
64 | @@ -2501,6 +2509,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, | |
65 | rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS); | |
66 | rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS); | |
67 | ||
68 | + ctxt->ops->set_int_shadow(ctxt, 0); | |
69 | + ctxt->interruptibility = (u8)smstate->int_shadow; | |
70 | + | |
71 | return X86EMUL_CONTINUE; | |
72 | } | |
73 | #endif | |
74 | diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h | |
75 | index d16b377be70b..5b881a3a5ed9 100644 | |
76 | --- a/arch/x86/kvm/kvm_emulate.h | |
77 | +++ b/arch/x86/kvm/kvm_emulate.h | |
78 | @@ -229,6 +229,7 @@ struct x86_emulate_ops { | |
79 | bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); | |
80 | ||
81 | void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); | |
82 | + void (*set_int_shadow)(struct x86_emulate_ctxt *ctxt, u8 shadow); | |
83 | ||
84 | unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); | |
85 | void (*exiting_smm)(struct x86_emulate_ctxt *ctxt); | |
86 | @@ -499,7 +500,9 @@ struct kvm_smram_state_32 { | |
87 | u32 reserved1[62]; /* FE00 - FEF7 */ | |
88 | u32 smbase; /* FEF8 */ | |
89 | u32 smm_revision; /* FEFC */ | |
90 | - u32 reserved2[5]; /* FF00-FF13 */ | |
91 | + u32 reserved2[4]; /* FF00-FF0F*/ | |
92 | + /* int_shadow is KVM extension*/ | |
93 | + u32 int_shadow; /* FF10 */ | |
94 | /* CR4 is not present in Intel/AMD SMRAM image*/ | |
95 | u32 cr4; /* FF14 */ | |
96 | u32 reserved3[5]; /* FF18 */ | |
97 | @@ -571,13 +574,17 @@ struct kvm_smram_state_64 { | |
98 | struct kvm_smm_seg_state_64 idtr; /* FE80 (R/O) */ | |
99 | struct kvm_smm_seg_state_64 tr; /* FE90 (R/O) */ | |
100 | ||
101 | - /* I/O restart and auto halt restart are not implemented by KVM */ | |
102 | + /* | |
103 | + * I/O restart and auto halt restart are not implemented by KVM | |
104 | + * int_shadow is KVM's extension | |
105 | + */ | |
106 | + | |
107 | u64 io_restart_rip; /* FEA0 (R/O) */ | |
108 | u64 io_restart_rcx; /* FEA8 (R/O) */ | |
109 | u64 io_restart_rsi; /* FEB0 (R/O) */ | |
110 | u64 io_restart_rdi; /* FEB8 (R/O) */ | |
111 | u32 io_restart_dword; /* FEC0 (R/O) */ | |
112 | - u32 reserved1; /* FEC4 */ | |
113 | + u32 int_shadow; /* FEC4 (R/O) */ | |
114 | u8 io_instruction_restart; /* FEC8 (R/W) */ | |
115 | u8 auto_halt_restart; /* FEC9 (R/W) */ | |
116 | u8 reserved2[6]; /* FECA-FECF */ | |
117 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c | |
118 | index f40cd45b6a01..9afac97ea98c 100644 | |
119 | --- a/arch/x86/kvm/x86.c | |
120 | +++ b/arch/x86/kvm/x86.c | |
121 | @@ -7299,6 +7299,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) | |
122 | static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); | |
123 | } | |
124 | ||
125 | +static void emulator_set_int_shadow(struct x86_emulate_ctxt *ctxt, u8 shadow) | |
126 | +{ | |
127 | + static_call(kvm_x86_set_interrupt_shadow)(emul_to_vcpu(ctxt), shadow); | |
128 | +} | |
129 | + | |
130 | static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) | |
131 | { | |
132 | return emul_to_vcpu(ctxt)->arch.hflags; | |
133 | @@ -7368,6 +7373,7 @@ static const struct x86_emulate_ops emulate_ops = { | |
134 | .guest_has_fxsr = emulator_guest_has_fxsr, | |
135 | .guest_has_rdpid = emulator_guest_has_rdpid, | |
136 | .set_nmi_mask = emulator_set_nmi_mask, | |
137 | + .set_int_shadow = emulator_set_int_shadow, | |
138 | .get_hflags = emulator_get_hflags, | |
139 | .exiting_smm = emulator_exiting_smm, | |
140 | .leave_smm = emulator_leave_smm, | |
141 | @@ -9088,6 +9094,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat | |
142 | smram->cr4 = kvm_read_cr4(vcpu); | |
143 | smram->smm_revision = 0x00020000; | |
144 | smram->smbase = vcpu->arch.smbase; | |
145 | + | |
146 | + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); | |
147 | } | |
148 | ||
149 | #ifdef CONFIG_X86_64 | |
150 | @@ -9136,6 +9144,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_stat | |
151 | enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS); | |
152 | enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS); | |
153 | enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS); | |
154 | + | |
155 | + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); | |
156 | } | |
157 | #endif | |
158 | ||
159 | @@ -9172,6 +9182,8 @@ static void enter_smm(struct kvm_vcpu *vcpu) | |
160 | kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); | |
161 | kvm_rip_write(vcpu, 0x8000); | |
162 | ||
163 | + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0); | |
164 | + | |
165 | cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); | |
166 | static_call(kvm_x86_set_cr0)(vcpu, cr0); | |
167 | vcpu->arch.cr0 = cr0; |