4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/cpu-all.h"
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
28 /* Secure Virtual Machine helpers */
30 #if defined(CONFIG_USER_ONLY)
32 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
36 void helper_vmmcall(CPUX86State
*env
)
40 void helper_vmload(CPUX86State
*env
, int aflag
)
44 void helper_vmsave(CPUX86State
*env
, int aflag
)
48 void helper_stgi(CPUX86State
*env
)
52 void helper_clgi(CPUX86State
*env
)
56 void helper_skinit(CPUX86State
*env
)
60 void helper_invlpga(CPUX86State
*env
, int aflag
)
64 void helper_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
68 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
)
72 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
77 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
82 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
83 uint32_t next_eip_addend
)
88 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
89 const SegmentCache
*sc
)
91 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
93 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
95 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
97 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
98 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
101 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
104 CPUState
*cs
= ENV_GET_CPU(env
);
107 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
108 sc
->base
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb_seg
, base
));
109 sc
->limit
= ldl_phys(cs
->as
, addr
+ offsetof(struct vmcb_seg
, limit
));
110 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
111 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
114 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
117 SegmentCache sc1
, *sc
= &sc1
;
119 svm_load_seg(env
, addr
, sc
);
120 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
121 sc
->base
, sc
->limit
, sc
->flags
);
124 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
126 CPUState
*cs
= ENV_GET_CPU(env
);
131 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0);
134 addr
= env
->regs
[R_EAX
];
136 addr
= (uint32_t)env
->regs
[R_EAX
];
139 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
143 /* save the current CPU state in the hsave page */
144 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
146 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
149 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
151 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
154 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
155 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
156 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
157 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
158 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
159 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
161 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
162 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
163 cpu_compute_eflags(env
));
165 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
167 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
169 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
171 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
174 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
175 env
->eip
+ next_eip_addend
);
176 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
177 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
179 /* load the interception bitmaps so we do not need to access the
181 env
->intercept
= ldq_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
183 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+
184 offsetof(struct vmcb
,
185 control
.intercept_cr_read
));
186 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+
187 offsetof(struct vmcb
,
188 control
.intercept_cr_write
));
189 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+
190 offsetof(struct vmcb
,
191 control
.intercept_dr_read
));
192 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+
193 offsetof(struct vmcb
,
194 control
.intercept_dr_write
));
195 env
->intercept_exceptions
= ldl_phys(cs
->as
, env
->vm_vmcb
+
196 offsetof(struct vmcb
,
197 control
.intercept_exceptions
200 /* enable intercepts */
201 env
->hflags
|= HF_SVMI_MASK
;
203 env
->tsc_offset
= ldq_phys(cs
->as
, env
->vm_vmcb
+
204 offsetof(struct vmcb
, control
.tsc_offset
));
206 env
->gdt
.base
= ldq_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
208 env
->gdt
.limit
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
211 env
->idt
.base
= ldq_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
213 env
->idt
.limit
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
216 /* clear exit_info_2 so we behave like the real hardware */
217 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
219 cpu_x86_update_cr0(env
, ldq_phys(cs
->as
,
220 env
->vm_vmcb
+ offsetof(struct vmcb
,
222 cpu_x86_update_cr4(env
, ldq_phys(cs
->as
,
223 env
->vm_vmcb
+ offsetof(struct vmcb
,
225 cpu_x86_update_cr3(env
, ldq_phys(cs
->as
,
226 env
->vm_vmcb
+ offsetof(struct vmcb
,
228 env
->cr
[2] = ldq_phys(cs
->as
,
229 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
230 int_ctl
= ldl_phys(cs
->as
,
231 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
232 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
233 if (int_ctl
& V_INTR_MASKING_MASK
) {
234 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
235 env
->hflags2
|= HF2_VINTR_MASK
;
236 if (env
->eflags
& IF_MASK
) {
237 env
->hflags2
|= HF2_HIF_MASK
;
243 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
245 cpu_load_eflags(env
, ldq_phys(cs
->as
,
246 env
->vm_vmcb
+ offsetof(struct vmcb
,
248 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
249 CC_OP
= CC_OP_EFLAGS
;
251 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
253 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
255 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
257 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
260 env
->eip
= ldq_phys(cs
->as
,
261 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
263 env
->regs
[R_ESP
] = ldq_phys(cs
->as
,
264 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
265 env
->regs
[R_EAX
] = ldq_phys(cs
->as
,
266 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
267 env
->dr
[7] = ldq_phys(cs
->as
,
268 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
269 env
->dr
[6] = ldq_phys(cs
->as
,
270 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
271 cpu_x86_set_cpl(env
, ldub_phys(cs
->as
,
272 env
->vm_vmcb
+ offsetof(struct vmcb
,
275 /* FIXME: guest state consistency checks */
277 switch (ldub_phys(cs
->as
,
278 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
279 case TLB_CONTROL_DO_NOTHING
:
281 case TLB_CONTROL_FLUSH_ALL_ASID
:
282 /* FIXME: this is not 100% correct but should work for now */
287 env
->hflags2
|= HF2_GIF_MASK
;
289 if (int_ctl
& V_IRQ_MASK
) {
290 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
292 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
295 /* maybe we need to inject an event */
296 event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
298 if (event_inj
& SVM_EVTINJ_VALID
) {
299 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
300 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
301 uint32_t event_inj_err
= ldl_phys(cs
->as
, env
->vm_vmcb
+
302 offsetof(struct vmcb
,
303 control
.event_inj_err
));
305 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
306 /* FIXME: need to implement valid_err */
307 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
308 case SVM_EVTINJ_TYPE_INTR
:
309 env
->exception_index
= vector
;
310 env
->error_code
= event_inj_err
;
311 env
->exception_is_int
= 0;
312 env
->exception_next_eip
= -1;
313 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
314 /* XXX: is it always correct? */
315 do_interrupt_x86_hardirq(env
, vector
, 1);
317 case SVM_EVTINJ_TYPE_NMI
:
318 env
->exception_index
= EXCP02_NMI
;
319 env
->error_code
= event_inj_err
;
320 env
->exception_is_int
= 0;
321 env
->exception_next_eip
= env
->eip
;
322 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
325 case SVM_EVTINJ_TYPE_EXEPT
:
326 env
->exception_index
= vector
;
327 env
->error_code
= event_inj_err
;
328 env
->exception_is_int
= 0;
329 env
->exception_next_eip
= -1;
330 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
333 case SVM_EVTINJ_TYPE_SOFT
:
334 env
->exception_index
= vector
;
335 env
->error_code
= event_inj_err
;
336 env
->exception_is_int
= 1;
337 env
->exception_next_eip
= env
->eip
;
338 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
342 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
,
347 void helper_vmmcall(CPUX86State
*env
)
349 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0);
350 raise_exception(env
, EXCP06_ILLOP
);
353 void helper_vmload(CPUX86State
*env
, int aflag
)
355 CPUState
*cs
= ENV_GET_CPU(env
);
358 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0);
361 addr
= env
->regs
[R_EAX
];
363 addr
= (uint32_t)env
->regs
[R_EAX
];
366 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
367 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
368 addr
, ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
,
370 env
->segs
[R_FS
].base
);
372 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
373 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
374 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
375 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
378 env
->kernelgsbase
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
,
379 save
.kernel_gs_base
));
380 env
->lstar
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
, save
.lstar
));
381 env
->cstar
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
, save
.cstar
));
382 env
->fmask
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
384 env
->star
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
, save
.star
));
385 env
->sysenter_cs
= ldq_phys(cs
->as
,
386 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
387 env
->sysenter_esp
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
,
389 env
->sysenter_eip
= ldq_phys(cs
->as
, addr
+ offsetof(struct vmcb
,
393 void helper_vmsave(CPUX86State
*env
, int aflag
)
395 CPUState
*cs
= ENV_GET_CPU(env
);
398 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0);
401 addr
= env
->regs
[R_EAX
];
403 addr
= (uint32_t)env
->regs
[R_EAX
];
406 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
407 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
408 addr
, ldq_phys(cs
->as
,
409 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
410 env
->segs
[R_FS
].base
);
412 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
414 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
416 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
418 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
422 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
424 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
425 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
426 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
428 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
429 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
430 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
432 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
436 void helper_stgi(CPUX86State
*env
)
438 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0);
439 env
->hflags2
|= HF2_GIF_MASK
;
442 void helper_clgi(CPUX86State
*env
)
444 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0);
445 env
->hflags2
&= ~HF2_GIF_MASK
;
448 void helper_skinit(CPUX86State
*env
)
450 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0);
451 /* XXX: not implemented */
452 raise_exception(env
, EXCP06_ILLOP
);
455 void helper_invlpga(CPUX86State
*env
, int aflag
)
459 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0);
462 addr
= env
->regs
[R_EAX
];
464 addr
= (uint32_t)env
->regs
[R_EAX
];
467 /* XXX: could use the ASID to see if it is needed to do the
469 tlb_flush_page(env
, addr
);
472 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
475 CPUState
*cs
= ENV_GET_CPU(env
);
477 if (likely(!(env
->hflags
& HF_SVMI_MASK
))) {
481 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
482 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
483 helper_vmexit(env
, type
, param
);
486 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
487 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
488 helper_vmexit(env
, type
, param
);
491 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
492 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
493 helper_vmexit(env
, type
, param
);
496 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
497 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
498 helper_vmexit(env
, type
, param
);
501 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
502 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
503 helper_vmexit(env
, type
, param
);
507 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
508 /* FIXME: this should be read in at vmrun (faster this way?) */
509 uint64_t addr
= ldq_phys(cs
->as
, env
->vm_vmcb
+
510 offsetof(struct vmcb
,
511 control
.msrpm_base_pa
));
514 switch ((uint32_t)env
->regs
[R_ECX
]) {
516 t0
= (env
->regs
[R_ECX
] * 2) % 8;
517 t1
= (env
->regs
[R_ECX
] * 2) / 8;
519 case 0xc0000000 ... 0xc0001fff:
520 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
524 case 0xc0010000 ... 0xc0011fff:
525 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
530 helper_vmexit(env
, type
, param
);
535 if (ldub_phys(cs
->as
, addr
+ t1
) & ((1 << param
) << t0
)) {
536 helper_vmexit(env
, type
, param
);
541 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
542 helper_vmexit(env
, type
, param
);
548 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
551 helper_svm_check_intercept_param(env
, type
, param
);
554 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
555 uint32_t next_eip_addend
)
557 CPUState
*cs
= ENV_GET_CPU(env
);
558 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
559 /* FIXME: this should be read in at vmrun (faster this way?) */
560 uint64_t addr
= ldq_phys(cs
->as
, env
->vm_vmcb
+
561 offsetof(struct vmcb
, control
.iopm_base_pa
));
562 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
564 if (lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
566 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
567 env
->eip
+ next_eip_addend
);
568 helper_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16));
573 /* Note: currently only 32 bits of exit_code are used */
574 void helper_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
576 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
579 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
580 PRIx64
", " TARGET_FMT_lx
")!\n",
581 exit_code
, exit_info_1
,
582 ldq_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
583 control
.exit_info_2
)),
586 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
587 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
588 SVM_INTERRUPT_SHADOW_MASK
);
589 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
591 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
594 /* Save the VM state in the vmcb */
595 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
597 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
599 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
601 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
604 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
606 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
609 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
611 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
614 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
615 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
616 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
617 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
618 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
620 int_ctl
= ldl_phys(cs
->as
,
621 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
622 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
623 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
624 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
625 int_ctl
|= V_IRQ_MASK
;
627 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
629 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
630 cpu_compute_eflags(env
));
631 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
633 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
634 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
635 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
636 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
637 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
638 env
->hflags
& HF_CPL_MASK
);
640 /* Reload the host state from vm_hsave */
641 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
642 env
->hflags
&= ~HF_SVMI_MASK
;
644 env
->intercept_exceptions
= 0;
645 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
648 env
->gdt
.base
= ldq_phys(cs
->as
, env
->vm_hsave
+ offsetof(struct vmcb
,
650 env
->gdt
.limit
= ldl_phys(cs
->as
, env
->vm_hsave
+ offsetof(struct vmcb
,
653 env
->idt
.base
= ldq_phys(cs
->as
, env
->vm_hsave
+ offsetof(struct vmcb
,
655 env
->idt
.limit
= ldl_phys(cs
->as
, env
->vm_hsave
+ offsetof(struct vmcb
,
658 cpu_x86_update_cr0(env
, ldq_phys(cs
->as
,
659 env
->vm_hsave
+ offsetof(struct vmcb
,
662 cpu_x86_update_cr4(env
, ldq_phys(cs
->as
,
663 env
->vm_hsave
+ offsetof(struct vmcb
,
665 cpu_x86_update_cr3(env
, ldq_phys(cs
->as
,
666 env
->vm_hsave
+ offsetof(struct vmcb
,
668 /* we need to set the efer after the crs so the hidden flags get
670 cpu_load_efer(env
, ldq_phys(cs
->as
, env
->vm_hsave
+ offsetof(struct vmcb
,
673 cpu_load_eflags(env
, ldq_phys(cs
->as
,
674 env
->vm_hsave
+ offsetof(struct vmcb
,
676 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
677 CC_OP
= CC_OP_EFLAGS
;
679 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
681 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
683 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
685 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
688 env
->eip
= ldq_phys(cs
->as
,
689 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
690 env
->regs
[R_ESP
] = ldq_phys(cs
->as
, env
->vm_hsave
+
691 offsetof(struct vmcb
, save
.rsp
));
692 env
->regs
[R_EAX
] = ldq_phys(cs
->as
, env
->vm_hsave
+
693 offsetof(struct vmcb
, save
.rax
));
695 env
->dr
[6] = ldq_phys(cs
->as
,
696 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
697 env
->dr
[7] = ldq_phys(cs
->as
,
698 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
701 cpu_x86_set_cpl(env
, 0);
702 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
704 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
707 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
708 ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
709 control
.event_inj
)));
710 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
711 ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
712 control
.event_inj_err
)));
713 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
715 env
->hflags2
&= ~HF2_GIF_MASK
;
716 /* FIXME: Resets the current ASID register to zero (host ASID). */
718 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
720 /* Clears the TSC_OFFSET inside the processor. */
722 /* If the host is in PAE mode, the processor reloads the host's PDPEs
723 from the page table indicated the host's CR3. If the PDPEs contain
724 illegal state, the processor causes a shutdown. */
726 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
727 env
->cr
[0] |= CR0_PE_MASK
;
728 env
->eflags
&= ~VM_MASK
;
730 /* Disables all breakpoints in the host DR7 register. */
732 /* Checks the reloaded host state for consistency. */
734 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
735 host's code segment or non-canonical (in the case of long mode), a
736 #GP fault is delivered inside the host. */
738 /* remove any pending exception */
739 env
->exception_index
= -1;
741 env
->old_exception
= -1;
746 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
748 helper_vmexit(env
, exit_code
, exit_info_1
);