]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0015-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch
rebase patches on top of Ubuntu-5.19.0-14.14
[pve-kernel.git] / patches / kernel / 0015-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Maxim Levitsky <mlevitsk@redhat.com>
3 Date: Wed, 3 Aug 2022 18:50:07 +0300
4 Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 32 bit smram
5 load/restore
6
7 Use kvm_smram_state_32 struct to save/restore 32 bit SMM state
8 (used when X86_FEATURE_LM is not present in the guest CPUID).
9
10 Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
11 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
12 ---
13 arch/x86/kvm/emulate.c | 81 +++++++++++++++---------------------------
14 arch/x86/kvm/x86.c | 75 +++++++++++++++++---------------------
15 2 files changed, 60 insertions(+), 96 deletions(-)
16
17 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18 index 05c4d9dfbced..47bb09f02304 100644
19 --- a/arch/x86/kvm/emulate.c
20 +++ b/arch/x86/kvm/emulate.c
21 @@ -2359,25 +2359,17 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
22 desc->type = (flags >> 8) & 15;
23 }
24
25 -static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
26 +static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt,
27 + const struct kvm_smm_seg_state_32 *state,
28 + u16 selector,
29 int n)
30 {
31 struct desc_struct desc;
32 - int offset;
33 - u16 selector;
34 -
35 - selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
36 -
37 - if (n < 3)
38 - offset = 0x7f84 + n * 12;
39 - else
40 - offset = 0x7f2c + (n - 3) * 12;
41
42 - set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
43 - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
44 - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
45 + set_desc_base(&desc, state->base);
46 + set_desc_limit(&desc, state->limit);
47 + rsm_set_desc_flags(&desc, state->flags);
48 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
49 - return X86EMUL_CONTINUE;
50 }
51
52 #ifdef CONFIG_X86_64
53 @@ -2448,63 +2440,46 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
54 }
55
56 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
57 - const char *smstate)
58 + const struct kvm_smram_state_32 *smstate)
59 {
60 - struct desc_struct desc;
61 struct desc_ptr dt;
62 - u16 selector;
63 - u32 val, cr0, cr3, cr4;
64 int i;
65
66 - cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
67 - cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
68 - ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
69 - ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
70 + ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
71 + ctxt->_eip = smstate->eip;
72
73 for (i = 0; i < 8; i++)
74 - *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
75 -
76 - val = GET_SMSTATE(u32, smstate, 0x7fcc);
77 + *reg_write(ctxt, i) = smstate->gprs[i];
78
79 - if (ctxt->ops->set_dr(ctxt, 6, val))
80 + if (ctxt->ops->set_dr(ctxt, 6, smstate->dr6))
81 return X86EMUL_UNHANDLEABLE;
82 -
83 - val = GET_SMSTATE(u32, smstate, 0x7fc8);
84 -
85 - if (ctxt->ops->set_dr(ctxt, 7, val))
86 + if (ctxt->ops->set_dr(ctxt, 7, smstate->dr7))
87 return X86EMUL_UNHANDLEABLE;
88
89 - selector = GET_SMSTATE(u32, smstate, 0x7fc4);
90 - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
91 - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
92 - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
93 - ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
94 + rsm_load_seg_32(ctxt, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR);
95 + rsm_load_seg_32(ctxt, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR);
96
97 - selector = GET_SMSTATE(u32, smstate, 0x7fc0);
98 - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
99 - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
100 - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
101 - ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
102
103 - dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
104 - dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
105 + dt.address = smstate->gdtr.base;
106 + dt.size = smstate->gdtr.limit;
107 ctxt->ops->set_gdt(ctxt, &dt);
108
109 - dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
110 - dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
111 + dt.address = smstate->idtr.base;
112 + dt.size = smstate->idtr.limit;
113 ctxt->ops->set_idt(ctxt, &dt);
114
115 - for (i = 0; i < 6; i++) {
116 - int r = rsm_load_seg_32(ctxt, smstate, i);
117 - if (r != X86EMUL_CONTINUE)
118 - return r;
119 - }
120 + rsm_load_seg_32(ctxt, &smstate->es, smstate->es_sel, VCPU_SREG_ES);
121 + rsm_load_seg_32(ctxt, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS);
122 + rsm_load_seg_32(ctxt, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS);
123
124 - cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
125 + rsm_load_seg_32(ctxt, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS);
126 + rsm_load_seg_32(ctxt, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS);
127 + rsm_load_seg_32(ctxt, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS);
128
129 - ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
130 + ctxt->ops->set_smbase(ctxt, smstate->smbase);
131
132 - return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
133 + return rsm_enter_protected_mode(ctxt, smstate->cr0,
134 + smstate->cr3, smstate->cr4);
135 }
136
137 #ifdef CONFIG_X86_64
138 @@ -2651,7 +2626,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
139 ret = rsm_load_state_64(ctxt, (const char *)&smram);
140 else
141 #endif
142 - ret = rsm_load_state_32(ctxt, (const char *)&smram);
143 + ret = rsm_load_state_32(ctxt, &smram.smram32);
144
145 if (ret != X86EMUL_CONTINUE)
146 goto emulate_shutdown;
147 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
148 index e48e7b7b8dde..eb029c131d0d 100644
149 --- a/arch/x86/kvm/x86.c
150 +++ b/arch/x86/kvm/x86.c
151 @@ -9618,22 +9618,18 @@ static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
152 return flags;
153 }
154
155 -static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
156 +static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
157 + struct kvm_smm_seg_state_32 *state,
158 + u32 *selector,
159 + int n)
160 {
161 struct kvm_segment seg;
162 - int offset;
163
164 kvm_get_segment(vcpu, &seg, n);
165 - put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
166 -
167 - if (n < 3)
168 - offset = 0x7f84 + n * 12;
169 - else
170 - offset = 0x7f2c + (n - 3) * 12;
171 -
172 - put_smstate(u32, buf, offset + 8, seg.base);
173 - put_smstate(u32, buf, offset + 4, seg.limit);
174 - put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
175 + *selector = seg.selector;
176 + state->base = seg.base;
177 + state->limit = seg.limit;
178 + state->flags = enter_smm_get_segment_flags(&seg);
179 }
180
181 #ifdef CONFIG_X86_64
182 @@ -9654,54 +9650,47 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
183 }
184 #endif
185
186 -static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
187 +static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram)
188 {
189 struct desc_ptr dt;
190 - struct kvm_segment seg;
191 unsigned long val;
192 int i;
193
194 - put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
195 - put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
196 - put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
197 - put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
198 + smram->cr0 = kvm_read_cr0(vcpu);
199 + smram->cr3 = kvm_read_cr3(vcpu);
200 + smram->eflags = kvm_get_rflags(vcpu);
201 + smram->eip = kvm_rip_read(vcpu);
202
203 for (i = 0; i < 8; i++)
204 - put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
205 + smram->gprs[i] = kvm_register_read_raw(vcpu, i);
206
207 kvm_get_dr(vcpu, 6, &val);
208 - put_smstate(u32, buf, 0x7fcc, (u32)val);
209 + smram->dr6 = (u32)val;
210 kvm_get_dr(vcpu, 7, &val);
211 - put_smstate(u32, buf, 0x7fc8, (u32)val);
212 + smram->dr7 = (u32)val;
213
214 - kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
215 - put_smstate(u32, buf, 0x7fc4, seg.selector);
216 - put_smstate(u32, buf, 0x7f64, seg.base);
217 - put_smstate(u32, buf, 0x7f60, seg.limit);
218 - put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
219 -
220 - kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
221 - put_smstate(u32, buf, 0x7fc0, seg.selector);
222 - put_smstate(u32, buf, 0x7f80, seg.base);
223 - put_smstate(u32, buf, 0x7f7c, seg.limit);
224 - put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
225 + enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
226 + enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
227
228 static_call(kvm_x86_get_gdt)(vcpu, &dt);
229 - put_smstate(u32, buf, 0x7f74, dt.address);
230 - put_smstate(u32, buf, 0x7f70, dt.size);
231 + smram->gdtr.base = dt.address;
232 + smram->gdtr.limit = dt.size;
233
234 static_call(kvm_x86_get_idt)(vcpu, &dt);
235 - put_smstate(u32, buf, 0x7f58, dt.address);
236 - put_smstate(u32, buf, 0x7f54, dt.size);
237 + smram->idtr.base = dt.address;
238 + smram->idtr.limit = dt.size;
239
240 - for (i = 0; i < 6; i++)
241 - enter_smm_save_seg_32(vcpu, buf, i);
242 + enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES);
243 + enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS);
244 + enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS);
245
246 - put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
247 + enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS);
248 + enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS);
249 + enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS);
250
251 - /* revision id */
252 - put_smstate(u32, buf, 0x7efc, 0x00020000);
253 - put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
254 + smram->cr4 = kvm_read_cr4(vcpu);
255 + smram->smm_revision = 0x00020000;
256 + smram->smbase = vcpu->arch.smbase;
257 }
258
259 #ifdef CONFIG_X86_64
260 @@ -9772,7 +9761,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
261 enter_smm_save_state_64(vcpu, (char *)&smram);
262 else
263 #endif
264 - enter_smm_save_state_32(vcpu, (char *)&smram);
265 + enter_smm_save_state_32(vcpu, &smram.smram32);
266
267 /*
268 * Give enter_smm() a chance to make ISA-specific changes to the vCPU