1 /* Copyright 2008 IBM Corporation
3 * Copyright 2011 Intel Corporation
4 * Copyright 2016 Veertu, Inc.
5 * Copyright 2017 The Android Open Source Project
7 * QEMU Hypervisor.framework support
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
24 * Parts Copyright (c) 2011 NetApp, Inc.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
36 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 #include "qemu/osdep.h"
50 #include "qemu/error-report.h"
51 #include "qemu/memalign.h"
53 #include "sysemu/hvf.h"
54 #include "sysemu/hvf_int.h"
55 #include "sysemu/runstate.h"
56 #include "sysemu/cpus.h"
61 #include "x86_descr.h"
63 #include "x86_decode.h"
68 #include <Hypervisor/hv.h>
69 #include <Hypervisor/hv_vmx.h>
70 #include <sys/sysctl.h>
72 #include "hw/i386/apic_internal.h"
73 #include "qemu/main-loop.h"
74 #include "qemu/accel.h"
75 #include "target/i386/cpu.h"
77 void vmx_update_tpr(CPUState
*cpu
)
79 /* TODO: need integrate APIC handling */
80 X86CPU
*x86_cpu
= X86_CPU(cpu
);
81 int tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
) << 4;
82 int irr
= apic_get_highest_priority_irr(x86_cpu
->apic_state
);
84 wreg(cpu
->hvf
->fd
, HV_X86_TPR
, tpr
);
86 wvmcs(cpu
->hvf
->fd
, VMCS_TPR_THRESHOLD
, 0);
88 wvmcs(cpu
->hvf
->fd
, VMCS_TPR_THRESHOLD
, (irr
> tpr
) ? tpr
>> 4 :
93 static void update_apic_tpr(CPUState
*cpu
)
95 X86CPU
*x86_cpu
= X86_CPU(cpu
);
96 int tpr
= rreg(cpu
->hvf
->fd
, HV_X86_TPR
) >> 4;
97 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
100 #define VECTORING_INFO_VECTOR_MASK 0xff
102 void hvf_handle_io(CPUArchState
*env
, uint16_t port
, void *buffer
,
103 int direction
, int size
, int count
)
106 uint8_t *ptr
= buffer
;
108 for (i
= 0; i
< count
; i
++) {
109 address_space_rw(&address_space_io
, port
, MEMTXATTRS_UNSPECIFIED
,
116 static bool ept_emulation_fault(hvf_slot
*slot
, uint64_t gpa
, uint64_t ept_qual
)
120 /* EPT fault on an instruction fetch doesn't make sense here */
121 if (ept_qual
& EPT_VIOLATION_INST_FETCH
) {
125 /* EPT fault must be a read fault or a write fault */
126 read
= ept_qual
& EPT_VIOLATION_DATA_READ
? 1 : 0;
127 write
= ept_qual
& EPT_VIOLATION_DATA_WRITE
? 1 : 0;
128 if ((read
| write
) == 0) {
133 if (slot
->flags
& HVF_SLOT_LOG
) {
134 memory_region_set_dirty(slot
->region
, gpa
- slot
->start
, 1);
135 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
136 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
141 * The EPT violation must have been caused by accessing a
142 * guest-physical address that is a translation of a guest-linear
145 if ((ept_qual
& EPT_VIOLATION_GLA_VALID
) == 0 ||
146 (ept_qual
& EPT_VIOLATION_XLAT_VALID
) == 0) {
153 if (!memory_region_is_ram(slot
->region
) &&
154 !(read
&& memory_region_is_romd(slot
->region
))) {
160 void hvf_arch_vcpu_destroy(CPUState
*cpu
)
162 X86CPU
*x86_cpu
= X86_CPU(cpu
);
163 CPUX86State
*env
= &x86_cpu
->env
;
165 g_free(env
->hvf_mmio_buf
);
168 static void init_tsc_freq(CPUX86State
*env
)
173 if (env
->tsc_khz
!= 0) {
177 length
= sizeof(uint64_t);
178 if (sysctlbyname("machdep.tsc.frequency", &tsc_freq
, &length
, NULL
, 0)) {
181 env
->tsc_khz
= tsc_freq
/ 1000; /* Hz to KHz */
184 static void init_apic_bus_freq(CPUX86State
*env
)
189 if (env
->apic_bus_freq
!= 0) {
193 length
= sizeof(uint64_t);
194 if (sysctlbyname("hw.busfrequency", &bus_freq
, &length
, NULL
, 0)) {
197 env
->apic_bus_freq
= bus_freq
;
200 static inline bool tsc_is_known(CPUX86State
*env
)
202 return env
->tsc_khz
!= 0;
205 static inline bool apic_bus_freq_is_known(CPUX86State
*env
)
207 return env
->apic_bus_freq
!= 0;
210 void hvf_kick_vcpu_thread(CPUState
*cpu
)
212 cpus_kick_thread(cpu
);
215 int hvf_arch_init(void)
220 int hvf_arch_init_vcpu(CPUState
*cpu
)
222 X86CPU
*x86cpu
= X86_CPU(cpu
);
223 CPUX86State
*env
= &x86cpu
->env
;
228 hvf_state
->hvf_caps
= g_new0(struct hvf_vcpu_caps
, 1);
229 env
->hvf_mmio_buf
= g_new(char, 4096);
231 if (x86cpu
->vmware_cpuid_freq
) {
233 init_apic_bus_freq(env
);
235 if (!tsc_is_known(env
) || !apic_bus_freq_is_known(env
)) {
236 error_report("vmware-cpuid-freq: feature couldn't be enabled");
240 if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED
,
241 &hvf_state
->hvf_caps
->vmx_cap_pinbased
)) {
244 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED
,
245 &hvf_state
->hvf_caps
->vmx_cap_procbased
)) {
248 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2
,
249 &hvf_state
->hvf_caps
->vmx_cap_procbased2
)) {
252 if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY
,
253 &hvf_state
->hvf_caps
->vmx_cap_entry
)) {
257 /* set VMCS control fields */
258 wvmcs(cpu
->hvf
->fd
, VMCS_PIN_BASED_CTLS
,
259 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_pinbased
,
260 VMCS_PIN_BASED_CTLS_EXTINT
|
261 VMCS_PIN_BASED_CTLS_NMI
|
262 VMCS_PIN_BASED_CTLS_VNMI
));
263 wvmcs(cpu
->hvf
->fd
, VMCS_PRI_PROC_BASED_CTLS
,
264 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased
,
265 VMCS_PRI_PROC_BASED_CTLS_HLT
|
266 VMCS_PRI_PROC_BASED_CTLS_MWAIT
|
267 VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET
|
268 VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW
) |
269 VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL
);
270 wvmcs(cpu
->hvf
->fd
, VMCS_SEC_PROC_BASED_CTLS
,
271 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased2
,
272 VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES
));
274 wvmcs(cpu
->hvf
->fd
, VMCS_ENTRY_CTLS
, cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_entry
,
276 wvmcs(cpu
->hvf
->fd
, VMCS_EXCEPTION_BITMAP
, 0); /* Double fault */
278 wvmcs(cpu
->hvf
->fd
, VMCS_TPR_THRESHOLD
, 0);
280 x86cpu
= X86_CPU(cpu
);
281 x86cpu
->env
.xsave_buf_len
= 4096;
282 x86cpu
->env
.xsave_buf
= qemu_memalign(4096, x86cpu
->env
.xsave_buf_len
);
285 * The allocated storage must be large enough for all of the
286 * possible XSAVE state components.
288 assert(hvf_get_supported_cpuid(0xd, 0, R_ECX
) <= x86cpu
->env
.xsave_buf_len
);
290 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_STAR
, 1);
291 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_LSTAR
, 1);
292 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_CSTAR
, 1);
293 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_FMASK
, 1);
294 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_FSBASE
, 1);
295 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_GSBASE
, 1);
296 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_KERNELGSBASE
, 1);
297 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_TSC_AUX
, 1);
298 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_IA32_TSC
, 1);
299 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_IA32_SYSENTER_CS
, 1);
300 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_IA32_SYSENTER_EIP
, 1);
301 hv_vcpu_enable_native_msr(cpu
->hvf
->fd
, MSR_IA32_SYSENTER_ESP
, 1);
306 static void hvf_store_events(CPUState
*cpu
, uint32_t ins_len
, uint64_t idtvec_info
)
308 X86CPU
*x86_cpu
= X86_CPU(cpu
);
309 CPUX86State
*env
= &x86_cpu
->env
;
311 env
->exception_nr
= -1;
312 env
->exception_pending
= 0;
313 env
->exception_injected
= 0;
314 env
->interrupt_injected
= -1;
315 env
->nmi_injected
= false;
317 env
->has_error_code
= false;
318 if (idtvec_info
& VMCS_IDT_VEC_VALID
) {
319 switch (idtvec_info
& VMCS_IDT_VEC_TYPE
) {
320 case VMCS_IDT_VEC_HWINTR
:
321 case VMCS_IDT_VEC_SWINTR
:
322 env
->interrupt_injected
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
324 case VMCS_IDT_VEC_NMI
:
325 env
->nmi_injected
= true;
327 case VMCS_IDT_VEC_HWEXCEPTION
:
328 case VMCS_IDT_VEC_SWEXCEPTION
:
329 env
->exception_nr
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
330 env
->exception_injected
= 1;
332 case VMCS_IDT_VEC_PRIV_SWEXCEPTION
:
336 if ((idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWEXCEPTION
||
337 (idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWINTR
) {
338 env
->ins_len
= ins_len
;
340 if (idtvec_info
& VMCS_IDT_VEC_ERRCODE_VALID
) {
341 env
->has_error_code
= true;
342 env
->error_code
= rvmcs(cpu
->hvf
->fd
, VMCS_IDT_VECTORING_ERROR
);
345 if ((rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
346 VMCS_INTERRUPTIBILITY_NMI_BLOCKING
)) {
347 env
->hflags2
|= HF2_NMI_MASK
;
349 env
->hflags2
&= ~HF2_NMI_MASK
;
351 if (rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
352 (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
353 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
354 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
356 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
360 static void hvf_cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
361 uint32_t *eax
, uint32_t *ebx
,
362 uint32_t *ecx
, uint32_t *edx
)
365 * A wrapper extends cpu_x86_cpuid with 0x40000000 and 0x40000010 leafs,
366 * leafs 0x40000001-0x4000000F are filled with zeros
367 * Provides vmware-cpuid-freq support to hvf
369 * Note: leaf 0x40000000 not exposes HVF,
370 * leaving hypervisor signature empty
373 if (index
< 0x40000000 || index
> 0x40000010 ||
374 !tsc_is_known(env
) || !apic_bus_freq_is_known(env
)) {
376 cpu_x86_cpuid(env
, index
, count
, eax
, ebx
, ecx
, edx
);
382 *eax
= 0x40000010; /* Max available cpuid leaf */
383 *ebx
= 0; /* Leave signature empty */
389 *ebx
= env
->apic_bus_freq
/ 1000; /* Hz to KHz */
402 int hvf_vcpu_exec(CPUState
*cpu
)
404 X86CPU
*x86_cpu
= X86_CPU(cpu
);
405 CPUX86State
*env
= &x86_cpu
->env
;
409 if (hvf_process_events(cpu
)) {
414 if (cpu
->vcpu_dirty
) {
415 hvf_put_registers(cpu
);
416 cpu
->vcpu_dirty
= false;
419 if (hvf_inject_interrupts(cpu
)) {
420 return EXCP_INTERRUPT
;
424 qemu_mutex_unlock_iothread();
425 if (!cpu_is_bsp(X86_CPU(cpu
)) && cpu
->halted
) {
426 qemu_mutex_lock_iothread();
430 hv_return_t r
= hv_vcpu_run(cpu
->hvf
->fd
);
434 uint64_t exit_reason
= rvmcs(cpu
->hvf
->fd
, VMCS_EXIT_REASON
);
435 uint64_t exit_qual
= rvmcs(cpu
->hvf
->fd
, VMCS_EXIT_QUALIFICATION
);
436 uint32_t ins_len
= (uint32_t)rvmcs(cpu
->hvf
->fd
,
437 VMCS_EXIT_INSTRUCTION_LENGTH
);
439 uint64_t idtvec_info
= rvmcs(cpu
->hvf
->fd
, VMCS_IDT_VECTORING_INFO
);
441 hvf_store_events(cpu
, ins_len
, idtvec_info
);
442 rip
= rreg(cpu
->hvf
->fd
, HV_X86_RIP
);
443 env
->eflags
= rreg(cpu
->hvf
->fd
, HV_X86_RFLAGS
);
445 qemu_mutex_lock_iothread();
447 update_apic_tpr(cpu
);
451 switch (exit_reason
) {
452 case EXIT_REASON_HLT
: {
453 macvm_set_rip(cpu
, rip
+ ins_len
);
454 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
455 (env
->eflags
& IF_MASK
))
456 && !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) &&
457 !(idtvec_info
& VMCS_IDT_VEC_VALID
)) {
462 ret
= EXCP_INTERRUPT
;
465 case EXIT_REASON_MWAIT
: {
466 ret
= EXCP_INTERRUPT
;
469 /* Need to check if MMIO or unmapped fault */
470 case EXIT_REASON_EPT_FAULT
:
473 uint64_t gpa
= rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_PHYSICAL_ADDRESS
);
475 if (((idtvec_info
& VMCS_IDT_VEC_VALID
) == 0) &&
476 ((exit_qual
& EXIT_QUAL_NMIUDTI
) != 0)) {
477 vmx_set_nmi_blocking(cpu
);
480 slot
= hvf_find_overlap_slot(gpa
, 1);
482 if (ept_emulation_fault(slot
, gpa
, exit_qual
)) {
483 struct x86_decode decode
;
486 decode_instruction(env
, &decode
);
487 exec_instruction(env
, &decode
);
493 case EXIT_REASON_INOUT
:
495 uint32_t in
= (exit_qual
& 8) != 0;
496 uint32_t size
= (exit_qual
& 7) + 1;
497 uint32_t string
= (exit_qual
& 16) != 0;
498 uint32_t port
= exit_qual
>> 16;
499 /*uint32_t rep = (exit_qual & 0x20) != 0;*/
504 hvf_handle_io(env
, port
, &val
, 0, size
, 1);
507 } else if (size
== 2) {
509 } else if (size
== 4) {
510 RAX(env
) = (uint32_t)val
;
512 RAX(env
) = (uint64_t)val
;
517 } else if (!string
&& !in
) {
518 RAX(env
) = rreg(cpu
->hvf
->fd
, HV_X86_RAX
);
519 hvf_handle_io(env
, port
, &RAX(env
), 1, size
, 1);
520 macvm_set_rip(cpu
, rip
+ ins_len
);
523 struct x86_decode decode
;
526 decode_instruction(env
, &decode
);
527 assert(ins_len
== decode
.len
);
528 exec_instruction(env
, &decode
);
533 case EXIT_REASON_CPUID
: {
534 uint32_t rax
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RAX
);
535 uint32_t rbx
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RBX
);
536 uint32_t rcx
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RCX
);
537 uint32_t rdx
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RDX
);
540 /* CPUID1.ecx.OSXSAVE needs to know CR4 */
541 env
->cr
[4] = rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_CR4
);
543 hvf_cpu_x86_cpuid(env
, rax
, rcx
, &rax
, &rbx
, &rcx
, &rdx
);
545 wreg(cpu
->hvf
->fd
, HV_X86_RAX
, rax
);
546 wreg(cpu
->hvf
->fd
, HV_X86_RBX
, rbx
);
547 wreg(cpu
->hvf
->fd
, HV_X86_RCX
, rcx
);
548 wreg(cpu
->hvf
->fd
, HV_X86_RDX
, rdx
);
550 macvm_set_rip(cpu
, rip
+ ins_len
);
553 case EXIT_REASON_XSETBV
: {
554 X86CPU
*x86_cpu
= X86_CPU(cpu
);
555 CPUX86State
*env
= &x86_cpu
->env
;
556 uint32_t eax
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RAX
);
557 uint32_t ecx
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RCX
);
558 uint32_t edx
= (uint32_t)rreg(cpu
->hvf
->fd
, HV_X86_RDX
);
561 macvm_set_rip(cpu
, rip
+ ins_len
);
564 env
->xcr0
= ((uint64_t)edx
<< 32) | eax
;
565 wreg(cpu
->hvf
->fd
, HV_X86_XCR0
, env
->xcr0
| 1);
566 macvm_set_rip(cpu
, rip
+ ins_len
);
569 case EXIT_REASON_INTR_WINDOW
:
570 vmx_clear_int_window_exiting(cpu
);
571 ret
= EXCP_INTERRUPT
;
573 case EXIT_REASON_NMI_WINDOW
:
574 vmx_clear_nmi_window_exiting(cpu
);
575 ret
= EXCP_INTERRUPT
;
577 case EXIT_REASON_EXT_INTR
:
578 /* force exit and allow io handling */
579 ret
= EXCP_INTERRUPT
;
581 case EXIT_REASON_RDMSR
:
582 case EXIT_REASON_WRMSR
:
585 if (exit_reason
== EXIT_REASON_RDMSR
) {
594 case EXIT_REASON_CR_ACCESS
: {
600 reg
= (exit_qual
>> 8) & 15;
604 macvm_set_cr0(cpu
->hvf
->fd
, RRX(env
, reg
));
608 macvm_set_cr4(cpu
->hvf
->fd
, RRX(env
, reg
));
612 X86CPU
*x86_cpu
= X86_CPU(cpu
);
613 if (exit_qual
& 0x10) {
614 RRX(env
, reg
) = cpu_get_apic_tpr(x86_cpu
->apic_state
);
616 int tpr
= RRX(env
, reg
);
617 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
618 ret
= EXCP_INTERRUPT
;
623 error_report("Unrecognized CR %d", cr
);
630 case EXIT_REASON_APIC_ACCESS
: { /* TODO */
631 struct x86_decode decode
;
634 decode_instruction(env
, &decode
);
635 exec_instruction(env
, &decode
);
639 case EXIT_REASON_TPR
: {
643 case EXIT_REASON_TASK_SWITCH
: {
644 uint64_t vinfo
= rvmcs(cpu
->hvf
->fd
, VMCS_IDT_VECTORING_INFO
);
645 x68_segment_selector sel
= {.sel
= exit_qual
& 0xffff};
646 vmx_handle_task_switch(cpu
, sel
, (exit_qual
>> 30) & 0x3,
647 vinfo
& VMCS_INTR_VALID
, vinfo
& VECTORING_INFO_VECTOR_MASK
, vinfo
651 case EXIT_REASON_TRIPLE_FAULT
: {
652 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
653 ret
= EXCP_INTERRUPT
;
656 case EXIT_REASON_RDPMC
:
657 wreg(cpu
->hvf
->fd
, HV_X86_RAX
, 0);
658 wreg(cpu
->hvf
->fd
, HV_X86_RDX
, 0);
659 macvm_set_rip(cpu
, rip
+ ins_len
);
661 case VMX_REASON_VMCALL
:
662 env
->exception_nr
= EXCP0D_GPF
;
663 env
->exception_injected
= 1;
664 env
->has_error_code
= true;
668 error_report("%llx: unhandled exit %llx", rip
, exit_reason
);