]> git.proxmox.com Git - qemu.git/blame - target-i386/svm_helper.c
migration: make qemu_ftell() public and support writable files
[qemu.git] / target-i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
022c62cb 21#include "exec/cpu-all.h"
6bada5e8
BS
22#include "helper.h"
23
92fc4b58 24#if !defined(CONFIG_USER_ONLY)
022c62cb 25#include "exec/softmmu_exec.h"
92fc4b58
BS
26#endif /* !defined(CONFIG_USER_ONLY) */
27
6bada5e8
BS
28/* Secure Virtual Machine helpers */
29
30#if defined(CONFIG_USER_ONLY)
31
052e80d5 32void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
33{
34}
35
052e80d5 36void helper_vmmcall(CPUX86State *env)
6bada5e8
BS
37{
38}
39
052e80d5 40void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
41{
42}
43
052e80d5 44void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
45{
46}
47
052e80d5 48void helper_stgi(CPUX86State *env)
6bada5e8
BS
49{
50}
51
052e80d5 52void helper_clgi(CPUX86State *env)
6bada5e8
BS
53{
54}
55
052e80d5 56void helper_skinit(CPUX86State *env)
6bada5e8
BS
57{
58}
59
052e80d5 60void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
61{
62}
63
052e80d5 64void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8
BS
65{
66}
67
68void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69{
70}
71
052e80d5
BS
72void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
6bada5e8
BS
74{
75}
76
77void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
79{
80}
81
052e80d5 82void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
83 uint32_t next_eip_addend)
84{
85}
86#else
87
a8170e5e 88static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
6bada5e8
BS
89 const SegmentCache *sc)
90{
91 stw_phys(addr + offsetof(struct vmcb_seg, selector),
92 sc->selector);
93 stq_phys(addr + offsetof(struct vmcb_seg, base),
94 sc->base);
95 stl_phys(addr + offsetof(struct vmcb_seg, limit),
96 sc->limit);
97 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99}
100
a8170e5e 101static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
052e80d5 102 SegmentCache *sc)
6bada5e8
BS
103{
104 unsigned int flags;
105
106 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
107 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
108 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
109 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
a8170e5e 113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
052e80d5 114 int seg_reg)
6bada5e8
BS
115{
116 SegmentCache sc1, *sc = &sc1;
117
052e80d5 118 svm_load_seg(env, addr, sc);
6bada5e8
BS
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
052e80d5 123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
6bada5e8
BS
124{
125 target_ulong addr;
126 uint32_t event_inj;
127 uint32_t int_ctl;
128
052e80d5 129 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
6bada5e8
BS
130
131 if (aflag == 2) {
132 addr = EAX;
133 } else {
134 addr = (uint32_t)EAX;
135 }
136
137 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
138
139 env->vm_vmcb = addr;
140
141 /* save the current CPU state in the hsave page */
142 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
143 env->gdt.base);
144 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
145 env->gdt.limit);
146
147 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
148 env->idt.base);
149 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
150 env->idt.limit);
151
152 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
153 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
158
159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
160 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
161 cpu_compute_eflags(env));
162
052e80d5 163 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
6bada5e8 164 &env->segs[R_ES]);
052e80d5 165 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
6bada5e8 166 &env->segs[R_CS]);
052e80d5 167 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
6bada5e8 168 &env->segs[R_SS]);
052e80d5 169 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
6bada5e8
BS
170 &env->segs[R_DS]);
171
172 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
173 EIP + next_eip_addend);
174 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
175 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
176
177 /* load the interception bitmaps so we do not need to access the
178 vmcb in svm mode */
179 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
180 control.intercept));
181 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
182 offsetof(struct vmcb,
183 control.intercept_cr_read));
184 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
185 offsetof(struct vmcb,
186 control.intercept_cr_write));
187 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
188 offsetof(struct vmcb,
189 control.intercept_dr_read));
190 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
191 offsetof(struct vmcb,
192 control.intercept_dr_write));
193 env->intercept_exceptions = ldl_phys(env->vm_vmcb +
194 offsetof(struct vmcb,
195 control.intercept_exceptions
196 ));
197
198 /* enable intercepts */
199 env->hflags |= HF_SVMI_MASK;
200
201 env->tsc_offset = ldq_phys(env->vm_vmcb +
202 offsetof(struct vmcb, control.tsc_offset));
203
204 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
205 save.gdtr.base));
206 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
207 save.gdtr.limit));
208
209 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
210 save.idtr.base));
211 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
212 save.idtr.limit));
213
214 /* clear exit_info_2 so we behave like the real hardware */
215 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
216
217 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
218 save.cr0)));
219 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
220 save.cr4)));
221 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
222 save.cr3)));
223 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
224 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
225 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
226 if (int_ctl & V_INTR_MASKING_MASK) {
227 env->v_tpr = int_ctl & V_TPR_MASK;
228 env->hflags2 |= HF2_VINTR_MASK;
229 if (env->eflags & IF_MASK) {
230 env->hflags2 |= HF2_HIF_MASK;
231 }
232 }
233
234 cpu_load_efer(env,
235 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
236 env->eflags = 0;
237 cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
238 save.rflags)),
239 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
240 CC_OP = CC_OP_EFLAGS;
241
052e80d5
BS
242 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
243 R_ES);
244 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
245 R_CS);
246 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
247 R_SS);
248 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
249 R_DS);
6bada5e8
BS
250
251 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
252 env->eip = EIP;
253 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
254 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
255 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
256 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
257 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
258 save.cpl)));
259
260 /* FIXME: guest state consistency checks */
261
262 switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
263 case TLB_CONTROL_DO_NOTHING:
264 break;
265 case TLB_CONTROL_FLUSH_ALL_ASID:
266 /* FIXME: this is not 100% correct but should work for now */
267 tlb_flush(env, 1);
268 break;
269 }
270
271 env->hflags2 |= HF2_GIF_MASK;
272
273 if (int_ctl & V_IRQ_MASK) {
274 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
275 }
276
277 /* maybe we need to inject an event */
278 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
279 control.event_inj));
280 if (event_inj & SVM_EVTINJ_VALID) {
281 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
282 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
283 uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
284 offsetof(struct vmcb,
285 control.event_inj_err));
286
287 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
288 /* FIXME: need to implement valid_err */
289 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
290 case SVM_EVTINJ_TYPE_INTR:
291 env->exception_index = vector;
292 env->error_code = event_inj_err;
293 env->exception_is_int = 0;
294 env->exception_next_eip = -1;
295 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
296 /* XXX: is it always correct? */
297 do_interrupt_x86_hardirq(env, vector, 1);
298 break;
299 case SVM_EVTINJ_TYPE_NMI:
300 env->exception_index = EXCP02_NMI;
301 env->error_code = event_inj_err;
302 env->exception_is_int = 0;
303 env->exception_next_eip = EIP;
304 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
305 cpu_loop_exit(env);
306 break;
307 case SVM_EVTINJ_TYPE_EXEPT:
308 env->exception_index = vector;
309 env->error_code = event_inj_err;
310 env->exception_is_int = 0;
311 env->exception_next_eip = -1;
312 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
313 cpu_loop_exit(env);
314 break;
315 case SVM_EVTINJ_TYPE_SOFT:
316 env->exception_index = vector;
317 env->error_code = event_inj_err;
318 env->exception_is_int = 1;
319 env->exception_next_eip = EIP;
320 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
321 cpu_loop_exit(env);
322 break;
323 }
324 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
325 env->error_code);
326 }
327}
328
052e80d5 329void helper_vmmcall(CPUX86State *env)
6bada5e8 330{
052e80d5 331 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
6bada5e8
BS
332 raise_exception(env, EXCP06_ILLOP);
333}
334
052e80d5 335void helper_vmload(CPUX86State *env, int aflag)
6bada5e8
BS
336{
337 target_ulong addr;
338
052e80d5 339 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
6bada5e8
BS
340
341 if (aflag == 2) {
342 addr = EAX;
343 } else {
344 addr = (uint32_t)EAX;
345 }
346
347 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
348 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
052e80d5
BS
349 addr, ldq_phys(addr + offsetof(struct vmcb,
350 save.fs.base)),
6bada5e8
BS
351 env->segs[R_FS].base);
352
052e80d5
BS
353 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
354 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
355 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
356 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
6bada5e8
BS
357
358#ifdef TARGET_X86_64
359 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
360 save.kernel_gs_base));
361 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
362 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
363 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
364#endif
365 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
366 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
367 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
368 save.sysenter_esp));
369 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
370 save.sysenter_eip));
371}
372
052e80d5 373void helper_vmsave(CPUX86State *env, int aflag)
6bada5e8
BS
374{
375 target_ulong addr;
376
052e80d5 377 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
6bada5e8
BS
378
379 if (aflag == 2) {
380 addr = EAX;
381 } else {
382 addr = (uint32_t)EAX;
383 }
384
385 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
386 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
387 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
388 env->segs[R_FS].base);
389
052e80d5 390 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
6bada5e8 391 &env->segs[R_FS]);
052e80d5 392 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
6bada5e8 393 &env->segs[R_GS]);
052e80d5 394 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
6bada5e8 395 &env->tr);
052e80d5 396 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
6bada5e8
BS
397 &env->ldt);
398
399#ifdef TARGET_X86_64
400 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
401 env->kernelgsbase);
402 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
403 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
404 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
405#endif
406 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
407 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
408 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
409 env->sysenter_esp);
410 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
411 env->sysenter_eip);
412}
413
052e80d5 414void helper_stgi(CPUX86State *env)
6bada5e8 415{
052e80d5 416 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
6bada5e8
BS
417 env->hflags2 |= HF2_GIF_MASK;
418}
419
052e80d5 420void helper_clgi(CPUX86State *env)
6bada5e8 421{
052e80d5 422 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
6bada5e8
BS
423 env->hflags2 &= ~HF2_GIF_MASK;
424}
425
052e80d5 426void helper_skinit(CPUX86State *env)
6bada5e8 427{
052e80d5 428 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
6bada5e8
BS
429 /* XXX: not implemented */
430 raise_exception(env, EXCP06_ILLOP);
431}
432
052e80d5 433void helper_invlpga(CPUX86State *env, int aflag)
6bada5e8
BS
434{
435 target_ulong addr;
436
052e80d5 437 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
6bada5e8
BS
438
439 if (aflag == 2) {
440 addr = EAX;
441 } else {
442 addr = (uint32_t)EAX;
443 }
444
445 /* XXX: could use the ASID to see if it is needed to do the
446 flush */
447 tlb_flush_page(env, addr);
448}
449
052e80d5
BS
450void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
451 uint64_t param)
6bada5e8
BS
452{
453 if (likely(!(env->hflags & HF_SVMI_MASK))) {
454 return;
455 }
456 switch (type) {
457 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
458 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
052e80d5 459 helper_vmexit(env, type, param);
6bada5e8
BS
460 }
461 break;
462 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
463 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
052e80d5 464 helper_vmexit(env, type, param);
6bada5e8
BS
465 }
466 break;
467 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
468 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
052e80d5 469 helper_vmexit(env, type, param);
6bada5e8
BS
470 }
471 break;
472 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
473 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
052e80d5 474 helper_vmexit(env, type, param);
6bada5e8
BS
475 }
476 break;
477 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
478 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
052e80d5 479 helper_vmexit(env, type, param);
6bada5e8
BS
480 }
481 break;
482 case SVM_EXIT_MSR:
483 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
484 /* FIXME: this should be read in at vmrun (faster this way?) */
485 uint64_t addr = ldq_phys(env->vm_vmcb +
486 offsetof(struct vmcb,
487 control.msrpm_base_pa));
488 uint32_t t0, t1;
489
490 switch ((uint32_t)ECX) {
491 case 0 ... 0x1fff:
492 t0 = (ECX * 2) % 8;
493 t1 = (ECX * 2) / 8;
494 break;
495 case 0xc0000000 ... 0xc0001fff:
496 t0 = (8192 + ECX - 0xc0000000) * 2;
497 t1 = (t0 / 8);
498 t0 %= 8;
499 break;
500 case 0xc0010000 ... 0xc0011fff:
501 t0 = (16384 + ECX - 0xc0010000) * 2;
502 t1 = (t0 / 8);
503 t0 %= 8;
504 break;
505 default:
052e80d5 506 helper_vmexit(env, type, param);
6bada5e8
BS
507 t0 = 0;
508 t1 = 0;
509 break;
510 }
511 if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
052e80d5 512 helper_vmexit(env, type, param);
6bada5e8
BS
513 }
514 }
515 break;
516 default:
517 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
052e80d5 518 helper_vmexit(env, type, param);
6bada5e8
BS
519 }
520 break;
521 }
522}
523
052e80d5 524void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
6bada5e8
BS
525 uint64_t param)
526{
052e80d5 527 helper_svm_check_intercept_param(env, type, param);
6bada5e8
BS
528}
529
052e80d5 530void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
6bada5e8
BS
531 uint32_t next_eip_addend)
532{
533 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
534 /* FIXME: this should be read in at vmrun (faster this way?) */
535 uint64_t addr = ldq_phys(env->vm_vmcb +
536 offsetof(struct vmcb, control.iopm_base_pa));
537 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
538
539 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
540 /* next EIP */
541 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
542 env->eip + next_eip_addend);
052e80d5 543 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
6bada5e8
BS
544 }
545 }
546}
547
548/* Note: currently only 32 bits of exit_code are used */
052e80d5 549void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8
BS
550{
551 uint32_t int_ctl;
552
553 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
554 PRIx64 ", " TARGET_FMT_lx ")!\n",
555 exit_code, exit_info_1,
556 ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
557 control.exit_info_2)),
558 EIP);
559
560 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
561 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
562 SVM_INTERRUPT_SHADOW_MASK);
563 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
564 } else {
565 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
566 }
567
568 /* Save the VM state in the vmcb */
052e80d5 569 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
6bada5e8 570 &env->segs[R_ES]);
052e80d5 571 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
6bada5e8 572 &env->segs[R_CS]);
052e80d5 573 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
6bada5e8 574 &env->segs[R_SS]);
052e80d5 575 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
6bada5e8
BS
576 &env->segs[R_DS]);
577
578 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
579 env->gdt.base);
580 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
581 env->gdt.limit);
582
583 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
584 env->idt.base);
585 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
586 env->idt.limit);
587
588 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
590 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
591 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
592 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
593
594 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
595 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
596 int_ctl |= env->v_tpr & V_TPR_MASK;
597 if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
598 int_ctl |= V_IRQ_MASK;
599 }
600 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
601
602 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
603 cpu_compute_eflags(env));
052e80d5
BS
604 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
605 env->eip);
6bada5e8
BS
606 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
607 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
610 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
611 env->hflags & HF_CPL_MASK);
612
613 /* Reload the host state from vm_hsave */
614 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
615 env->hflags &= ~HF_SVMI_MASK;
616 env->intercept = 0;
617 env->intercept_exceptions = 0;
618 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
619 env->tsc_offset = 0;
620
621 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
622 save.gdtr.base));
623 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
624 save.gdtr.limit));
625
626 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
627 save.idtr.base));
628 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
629 save.idtr.limit));
630
631 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
632 save.cr0)) |
633 CR0_PE_MASK);
634 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
635 save.cr4)));
636 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
637 save.cr3)));
638 /* we need to set the efer after the crs so the hidden flags get
639 set properly */
640 cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
641 save.efer)));
642 env->eflags = 0;
643 cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
644 save.rflags)),
645 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
646 CC_OP = CC_OP_EFLAGS;
647
052e80d5
BS
648 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
649 R_ES);
650 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
651 R_CS);
652 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
653 R_SS);
654 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
655 R_DS);
6bada5e8
BS
656
657 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
658 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
659 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
660
661 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
662 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
663
664 /* other setups */
665 cpu_x86_set_cpl(env, 0);
666 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
667 exit_code);
668 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
669 exit_info_1);
670
671 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
672 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
673 control.event_inj)));
674 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
675 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
676 control.event_inj_err)));
677 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
678
679 env->hflags2 &= ~HF2_GIF_MASK;
680 /* FIXME: Resets the current ASID register to zero (host ASID). */
681
682 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
683
684 /* Clears the TSC_OFFSET inside the processor. */
685
686 /* If the host is in PAE mode, the processor reloads the host's PDPEs
687 from the page table indicated the host's CR3. If the PDPEs contain
688 illegal state, the processor causes a shutdown. */
689
690 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
691 env->cr[0] |= CR0_PE_MASK;
692 env->eflags &= ~VM_MASK;
693
694 /* Disables all breakpoints in the host DR7 register. */
695
696 /* Checks the reloaded host state for consistency. */
697
698 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
699 host's code segment or non-canonical (in the case of long mode), a
700 #GP fault is delivered inside the host. */
701
702 /* remove any pending exception */
703 env->exception_index = -1;
704 env->error_code = 0;
705 env->old_exception = -1;
706
707 cpu_loop_exit(env);
708}
709
052e80d5 710void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
6bada5e8 711{
052e80d5 712 helper_vmexit(env, exit_code, exit_info_1);
6bada5e8
BS
713}
714
715#endif