]> git.proxmox.com Git - qemu.git/blame - target-i386/svm_helper.c
x86: split off SVM helpers
[qemu.git] / target-i386 / svm_helper.c
CommitLineData
6bada5e8
BS
1/*
2 * x86 SVM helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
21#include "dyngen-exec.h"
22#include "helper.h"
23
24/* Secure Virtual Machine helpers */
25
26#if defined(CONFIG_USER_ONLY)
27
28void helper_vmrun(int aflag, int next_eip_addend)
29{
30}
31
32void helper_vmmcall(void)
33{
34}
35
36void helper_vmload(int aflag)
37{
38}
39
40void helper_vmsave(int aflag)
41{
42}
43
44void helper_stgi(void)
45{
46}
47
48void helper_clgi(void)
49{
50}
51
52void helper_skinit(void)
53{
54}
55
56void helper_invlpga(int aflag)
57{
58}
59
60void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
61{
62}
63
64void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
65{
66}
67
68void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
69{
70}
71
72void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
74{
75}
76
77void helper_svm_check_io(uint32_t port, uint32_t param,
78 uint32_t next_eip_addend)
79{
80}
81#else
82
83static inline void svm_save_seg(target_phys_addr_t addr,
84 const SegmentCache *sc)
85{
86 stw_phys(addr + offsetof(struct vmcb_seg, selector),
87 sc->selector);
88 stq_phys(addr + offsetof(struct vmcb_seg, base),
89 sc->base);
90 stl_phys(addr + offsetof(struct vmcb_seg, limit),
91 sc->limit);
92 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
93 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
94}
95
96static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
97{
98 unsigned int flags;
99
100 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
101 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
102 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
103 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
104 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
105}
106
107static inline void svm_load_seg_cache(target_phys_addr_t addr,
108 CPUX86State *env, int seg_reg)
109{
110 SegmentCache sc1, *sc = &sc1;
111
112 svm_load_seg(addr, sc);
113 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
114 sc->base, sc->limit, sc->flags);
115}
116
117void helper_vmrun(int aflag, int next_eip_addend)
118{
119 target_ulong addr;
120 uint32_t event_inj;
121 uint32_t int_ctl;
122
123 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
124
125 if (aflag == 2) {
126 addr = EAX;
127 } else {
128 addr = (uint32_t)EAX;
129 }
130
131 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
132
133 env->vm_vmcb = addr;
134
135 /* save the current CPU state in the hsave page */
136 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
137 env->gdt.base);
138 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
139 env->gdt.limit);
140
141 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
142 env->idt.base);
143 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
144 env->idt.limit);
145
146 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
147 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
148 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
149 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
150 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
151 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
152
153 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
155 cpu_compute_eflags(env));
156
157 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
158 &env->segs[R_ES]);
159 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
160 &env->segs[R_CS]);
161 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
162 &env->segs[R_SS]);
163 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
164 &env->segs[R_DS]);
165
166 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
167 EIP + next_eip_addend);
168 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
169 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
170
171 /* load the interception bitmaps so we do not need to access the
172 vmcb in svm mode */
173 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
174 control.intercept));
175 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
176 offsetof(struct vmcb,
177 control.intercept_cr_read));
178 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
179 offsetof(struct vmcb,
180 control.intercept_cr_write));
181 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
182 offsetof(struct vmcb,
183 control.intercept_dr_read));
184 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
185 offsetof(struct vmcb,
186 control.intercept_dr_write));
187 env->intercept_exceptions = ldl_phys(env->vm_vmcb +
188 offsetof(struct vmcb,
189 control.intercept_exceptions
190 ));
191
192 /* enable intercepts */
193 env->hflags |= HF_SVMI_MASK;
194
195 env->tsc_offset = ldq_phys(env->vm_vmcb +
196 offsetof(struct vmcb, control.tsc_offset));
197
198 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
199 save.gdtr.base));
200 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
201 save.gdtr.limit));
202
203 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
204 save.idtr.base));
205 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
206 save.idtr.limit));
207
208 /* clear exit_info_2 so we behave like the real hardware */
209 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
210
211 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
212 save.cr0)));
213 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
214 save.cr4)));
215 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
216 save.cr3)));
217 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
218 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
219 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
220 if (int_ctl & V_INTR_MASKING_MASK) {
221 env->v_tpr = int_ctl & V_TPR_MASK;
222 env->hflags2 |= HF2_VINTR_MASK;
223 if (env->eflags & IF_MASK) {
224 env->hflags2 |= HF2_HIF_MASK;
225 }
226 }
227
228 cpu_load_efer(env,
229 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
230 env->eflags = 0;
231 cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
232 save.rflags)),
233 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
234 CC_OP = CC_OP_EFLAGS;
235
236 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
237 env, R_ES);
238 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
239 env, R_CS);
240 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
241 env, R_SS);
242 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
243 env, R_DS);
244
245 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
246 env->eip = EIP;
247 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
248 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
249 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
250 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
251 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
252 save.cpl)));
253
254 /* FIXME: guest state consistency checks */
255
256 switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
257 case TLB_CONTROL_DO_NOTHING:
258 break;
259 case TLB_CONTROL_FLUSH_ALL_ASID:
260 /* FIXME: this is not 100% correct but should work for now */
261 tlb_flush(env, 1);
262 break;
263 }
264
265 env->hflags2 |= HF2_GIF_MASK;
266
267 if (int_ctl & V_IRQ_MASK) {
268 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
269 }
270
271 /* maybe we need to inject an event */
272 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
273 control.event_inj));
274 if (event_inj & SVM_EVTINJ_VALID) {
275 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
276 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
277 uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
278 offsetof(struct vmcb,
279 control.event_inj_err));
280
281 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
282 /* FIXME: need to implement valid_err */
283 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
284 case SVM_EVTINJ_TYPE_INTR:
285 env->exception_index = vector;
286 env->error_code = event_inj_err;
287 env->exception_is_int = 0;
288 env->exception_next_eip = -1;
289 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
290 /* XXX: is it always correct? */
291 do_interrupt_x86_hardirq(env, vector, 1);
292 break;
293 case SVM_EVTINJ_TYPE_NMI:
294 env->exception_index = EXCP02_NMI;
295 env->error_code = event_inj_err;
296 env->exception_is_int = 0;
297 env->exception_next_eip = EIP;
298 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
299 cpu_loop_exit(env);
300 break;
301 case SVM_EVTINJ_TYPE_EXEPT:
302 env->exception_index = vector;
303 env->error_code = event_inj_err;
304 env->exception_is_int = 0;
305 env->exception_next_eip = -1;
306 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
307 cpu_loop_exit(env);
308 break;
309 case SVM_EVTINJ_TYPE_SOFT:
310 env->exception_index = vector;
311 env->error_code = event_inj_err;
312 env->exception_is_int = 1;
313 env->exception_next_eip = EIP;
314 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
315 cpu_loop_exit(env);
316 break;
317 }
318 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
319 env->error_code);
320 }
321}
322
323void helper_vmmcall(void)
324{
325 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
326 raise_exception(env, EXCP06_ILLOP);
327}
328
329void helper_vmload(int aflag)
330{
331 target_ulong addr;
332
333 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
334
335 if (aflag == 2) {
336 addr = EAX;
337 } else {
338 addr = (uint32_t)EAX;
339 }
340
341 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
342 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
343 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
344 env->segs[R_FS].base);
345
346 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
347 env, R_FS);
348 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
349 env, R_GS);
350 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
351 &env->tr);
352 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
353 &env->ldt);
354
355#ifdef TARGET_X86_64
356 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
357 save.kernel_gs_base));
358 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
359 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
360 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
361#endif
362 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
363 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
364 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
365 save.sysenter_esp));
366 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
367 save.sysenter_eip));
368}
369
370void helper_vmsave(int aflag)
371{
372 target_ulong addr;
373
374 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
375
376 if (aflag == 2) {
377 addr = EAX;
378 } else {
379 addr = (uint32_t)EAX;
380 }
381
382 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
383 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
384 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
385 env->segs[R_FS].base);
386
387 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
388 &env->segs[R_FS]);
389 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
390 &env->segs[R_GS]);
391 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
392 &env->tr);
393 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
394 &env->ldt);
395
396#ifdef TARGET_X86_64
397 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
398 env->kernelgsbase);
399 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
400 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
401 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
402#endif
403 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
404 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
405 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
406 env->sysenter_esp);
407 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
408 env->sysenter_eip);
409}
410
411void helper_stgi(void)
412{
413 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
414 env->hflags2 |= HF2_GIF_MASK;
415}
416
417void helper_clgi(void)
418{
419 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
420 env->hflags2 &= ~HF2_GIF_MASK;
421}
422
423void helper_skinit(void)
424{
425 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
426 /* XXX: not implemented */
427 raise_exception(env, EXCP06_ILLOP);
428}
429
430void helper_invlpga(int aflag)
431{
432 target_ulong addr;
433
434 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
435
436 if (aflag == 2) {
437 addr = EAX;
438 } else {
439 addr = (uint32_t)EAX;
440 }
441
442 /* XXX: could use the ASID to see if it is needed to do the
443 flush */
444 tlb_flush_page(env, addr);
445}
446
447void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
448{
449 if (likely(!(env->hflags & HF_SVMI_MASK))) {
450 return;
451 }
452 switch (type) {
453 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
454 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
455 helper_vmexit(type, param);
456 }
457 break;
458 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
459 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
460 helper_vmexit(type, param);
461 }
462 break;
463 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
464 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
465 helper_vmexit(type, param);
466 }
467 break;
468 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
469 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
470 helper_vmexit(type, param);
471 }
472 break;
473 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
474 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
475 helper_vmexit(type, param);
476 }
477 break;
478 case SVM_EXIT_MSR:
479 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
480 /* FIXME: this should be read in at vmrun (faster this way?) */
481 uint64_t addr = ldq_phys(env->vm_vmcb +
482 offsetof(struct vmcb,
483 control.msrpm_base_pa));
484 uint32_t t0, t1;
485
486 switch ((uint32_t)ECX) {
487 case 0 ... 0x1fff:
488 t0 = (ECX * 2) % 8;
489 t1 = (ECX * 2) / 8;
490 break;
491 case 0xc0000000 ... 0xc0001fff:
492 t0 = (8192 + ECX - 0xc0000000) * 2;
493 t1 = (t0 / 8);
494 t0 %= 8;
495 break;
496 case 0xc0010000 ... 0xc0011fff:
497 t0 = (16384 + ECX - 0xc0010000) * 2;
498 t1 = (t0 / 8);
499 t0 %= 8;
500 break;
501 default:
502 helper_vmexit(type, param);
503 t0 = 0;
504 t1 = 0;
505 break;
506 }
507 if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
508 helper_vmexit(type, param);
509 }
510 }
511 break;
512 default:
513 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
514 helper_vmexit(type, param);
515 }
516 break;
517 }
518}
519
520void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
521 uint64_t param)
522{
523 CPUX86State *saved_env;
524
525 saved_env = env;
526 env = env1;
527 helper_svm_check_intercept_param(type, param);
528 env = saved_env;
529}
530
531void helper_svm_check_io(uint32_t port, uint32_t param,
532 uint32_t next_eip_addend)
533{
534 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
535 /* FIXME: this should be read in at vmrun (faster this way?) */
536 uint64_t addr = ldq_phys(env->vm_vmcb +
537 offsetof(struct vmcb, control.iopm_base_pa));
538 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
539
540 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
541 /* next EIP */
542 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
543 env->eip + next_eip_addend);
544 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
545 }
546 }
547}
548
549/* Note: currently only 32 bits of exit_code are used */
550void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
551{
552 uint32_t int_ctl;
553
554 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
555 PRIx64 ", " TARGET_FMT_lx ")!\n",
556 exit_code, exit_info_1,
557 ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
558 control.exit_info_2)),
559 EIP);
560
561 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
562 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
563 SVM_INTERRUPT_SHADOW_MASK);
564 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
565 } else {
566 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
567 }
568
569 /* Save the VM state in the vmcb */
570 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
571 &env->segs[R_ES]);
572 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
573 &env->segs[R_CS]);
574 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
575 &env->segs[R_SS]);
576 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
577 &env->segs[R_DS]);
578
579 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
580 env->gdt.base);
581 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
582 env->gdt.limit);
583
584 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
585 env->idt.base);
586 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
587 env->idt.limit);
588
589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
590 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
591 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
592 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
593 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
594
595 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
596 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
597 int_ctl |= env->v_tpr & V_TPR_MASK;
598 if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
599 int_ctl |= V_IRQ_MASK;
600 }
601 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
602
603 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
604 cpu_compute_eflags(env));
605 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
606 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
607 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
610 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
611 env->hflags & HF_CPL_MASK);
612
613 /* Reload the host state from vm_hsave */
614 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
615 env->hflags &= ~HF_SVMI_MASK;
616 env->intercept = 0;
617 env->intercept_exceptions = 0;
618 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
619 env->tsc_offset = 0;
620
621 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
622 save.gdtr.base));
623 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
624 save.gdtr.limit));
625
626 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
627 save.idtr.base));
628 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
629 save.idtr.limit));
630
631 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
632 save.cr0)) |
633 CR0_PE_MASK);
634 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
635 save.cr4)));
636 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
637 save.cr3)));
638 /* we need to set the efer after the crs so the hidden flags get
639 set properly */
640 cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
641 save.efer)));
642 env->eflags = 0;
643 cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
644 save.rflags)),
645 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
646 CC_OP = CC_OP_EFLAGS;
647
648 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
649 env, R_ES);
650 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
651 env, R_CS);
652 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
653 env, R_SS);
654 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
655 env, R_DS);
656
657 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
658 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
659 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
660
661 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
662 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
663
664 /* other setups */
665 cpu_x86_set_cpl(env, 0);
666 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
667 exit_code);
668 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
669 exit_info_1);
670
671 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
672 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
673 control.event_inj)));
674 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
675 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
676 control.event_inj_err)));
677 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
678
679 env->hflags2 &= ~HF2_GIF_MASK;
680 /* FIXME: Resets the current ASID register to zero (host ASID). */
681
682 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
683
684 /* Clears the TSC_OFFSET inside the processor. */
685
686 /* If the host is in PAE mode, the processor reloads the host's PDPEs
687 from the page table indicated the host's CR3. If the PDPEs contain
688 illegal state, the processor causes a shutdown. */
689
690 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
691 env->cr[0] |= CR0_PE_MASK;
692 env->eflags &= ~VM_MASK;
693
694 /* Disables all breakpoints in the host DR7 register. */
695
696 /* Checks the reloaded host state for consistency. */
697
698 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
699 host's code segment or non-canonical (in the case of long mode), a
700 #GP fault is delivered inside the host. */
701
702 /* remove any pending exception */
703 env->exception_index = -1;
704 env->error_code = 0;
705 env->old_exception = -1;
706
707 cpu_loop_exit(env);
708}
709
710void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
711{
712 env = nenv;
713 helper_vmexit(exit_code, exit_info_1);
714}
715
716#endif