2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
16 #include "sysemu/accel.h"
17 #include "sysemu/whpx.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/runstate.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/error-report.h"
22 #include "qapi/error.h"
23 #include "migration/blocker.h"
24 #include "whp-dispatch.h"
26 #include <WinHvPlatform.h>
27 #include <WinHvEmulation.h>
31 WHV_PARTITION_HANDLE partition
;
34 static const WHV_REGISTER_NAME whpx_register_names
[] = {
36 /* X64 General purpose registers */
56 /* X64 Segment registers */
66 /* X64 Table registers */
70 /* X64 Control Registers */
77 /* X64 Debug Registers */
87 /* X64 Floating Point and Vector Registers */
104 WHvX64RegisterFpMmx0
,
105 WHvX64RegisterFpMmx1
,
106 WHvX64RegisterFpMmx2
,
107 WHvX64RegisterFpMmx3
,
108 WHvX64RegisterFpMmx4
,
109 WHvX64RegisterFpMmx5
,
110 WHvX64RegisterFpMmx6
,
111 WHvX64RegisterFpMmx7
,
112 WHvX64RegisterFpControlStatus
,
113 WHvX64RegisterXmmControlStatus
,
119 WHvX64RegisterKernelGsBase
,
121 WHvX64RegisterApicBase
,
122 /* WHvX64RegisterPat, */
123 WHvX64RegisterSysenterCs
,
124 WHvX64RegisterSysenterEip
,
125 WHvX64RegisterSysenterEsp
,
130 WHvX64RegisterSfmask
,
133 /* Interrupt / Event Registers */
135 * WHvRegisterPendingInterruption,
136 * WHvRegisterInterruptState,
137 * WHvRegisterPendingEvent0,
138 * WHvRegisterPendingEvent1
139 * WHvX64RegisterDeliverabilityNotifications,
143 struct whpx_register_set
{
144 WHV_REGISTER_VALUE values
[RTL_NUMBER_OF(whpx_register_names
)];
148 WHV_EMULATOR_HANDLE emulator
;
149 bool window_registered
;
153 bool interruption_pending
;
155 /* Must be the last field as it may have a tail */
156 WHV_RUN_VP_EXIT_CONTEXT exit_ctx
;
159 static bool whpx_allowed
;
160 static bool whp_dispatch_initialized
;
161 static HMODULE hWinHvPlatform
, hWinHvEmulation
;
163 struct whpx_state whpx_global
;
164 struct WHPDispatch whp_dispatch
;
171 static struct whpx_vcpu
*get_whpx_vcpu(CPUState
*cpu
)
173 return (struct whpx_vcpu
*)cpu
->hax_vcpu
;
176 static WHV_X64_SEGMENT_REGISTER
whpx_seg_q2h(const SegmentCache
*qs
, int v86
,
179 WHV_X64_SEGMENT_REGISTER hs
;
180 unsigned flags
= qs
->flags
;
183 hs
.Limit
= qs
->limit
;
184 hs
.Selector
= qs
->selector
;
190 hs
.DescriptorPrivilegeLevel
= 3;
191 hs
.NonSystemSegment
= 1;
194 hs
.Attributes
= (flags
>> DESC_TYPE_SHIFT
);
197 /* hs.Base &= 0xfffff; */
204 static SegmentCache
whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER
*hs
)
209 qs
.limit
= hs
->Limit
;
210 qs
.selector
= hs
->Selector
;
212 qs
.flags
= ((uint32_t)hs
->Attributes
) << DESC_TYPE_SHIFT
;
217 static void whpx_set_registers(CPUState
*cpu
)
219 struct whpx_state
*whpx
= &whpx_global
;
220 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
221 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
222 X86CPU
*x86_cpu
= X86_CPU(cpu
);
223 struct whpx_register_set vcxt
;
230 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
232 memset(&vcxt
, 0, sizeof(struct whpx_register_set
));
234 v86
= (env
->eflags
& VM_MASK
);
235 r86
= !(env
->cr
[0] & CR0_PE_MASK
);
237 vcpu
->tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
238 vcpu
->apic_base
= cpu_get_apic_base(x86_cpu
->apic_state
);
242 /* Indexes for first 16 registers match between HV and QEMU definitions */
244 for (idx
= 0; idx
< CPU_NB_REGS
; idx
+= 1) {
245 vcxt
.values
[idx
].Reg64
= (uint64_t)env
->regs
[idx
];
249 /* Same goes for RIP and RFLAGS */
250 assert(whpx_register_names
[idx
] == WHvX64RegisterRip
);
251 vcxt
.values
[idx
++].Reg64
= env
->eip
;
253 assert(whpx_register_names
[idx
] == WHvX64RegisterRflags
);
254 vcxt
.values
[idx
++].Reg64
= env
->eflags
;
256 /* Translate 6+4 segment registers. HV and QEMU order matches */
257 assert(idx
== WHvX64RegisterEs
);
258 for (i
= 0; i
< 6; i
+= 1, idx
+= 1) {
259 vcxt
.values
[idx
].Segment
= whpx_seg_q2h(&env
->segs
[i
], v86
, r86
);
262 assert(idx
== WHvX64RegisterLdtr
);
263 vcxt
.values
[idx
++].Segment
= whpx_seg_q2h(&env
->ldt
, 0, 0);
265 assert(idx
== WHvX64RegisterTr
);
266 vcxt
.values
[idx
++].Segment
= whpx_seg_q2h(&env
->tr
, 0, 0);
268 assert(idx
== WHvX64RegisterIdtr
);
269 vcxt
.values
[idx
].Table
.Base
= env
->idt
.base
;
270 vcxt
.values
[idx
].Table
.Limit
= env
->idt
.limit
;
273 assert(idx
== WHvX64RegisterGdtr
);
274 vcxt
.values
[idx
].Table
.Base
= env
->gdt
.base
;
275 vcxt
.values
[idx
].Table
.Limit
= env
->gdt
.limit
;
278 /* CR0, 2, 3, 4, 8 */
279 assert(whpx_register_names
[idx
] == WHvX64RegisterCr0
);
280 vcxt
.values
[idx
++].Reg64
= env
->cr
[0];
281 assert(whpx_register_names
[idx
] == WHvX64RegisterCr2
);
282 vcxt
.values
[idx
++].Reg64
= env
->cr
[2];
283 assert(whpx_register_names
[idx
] == WHvX64RegisterCr3
);
284 vcxt
.values
[idx
++].Reg64
= env
->cr
[3];
285 assert(whpx_register_names
[idx
] == WHvX64RegisterCr4
);
286 vcxt
.values
[idx
++].Reg64
= env
->cr
[4];
287 assert(whpx_register_names
[idx
] == WHvX64RegisterCr8
);
288 vcxt
.values
[idx
++].Reg64
= vcpu
->tpr
;
290 /* 8 Debug Registers - Skipped */
292 /* 16 XMM registers */
293 assert(whpx_register_names
[idx
] == WHvX64RegisterXmm0
);
295 for (i
= 0; i
< sizeof(env
->xmm_regs
) / sizeof(ZMMReg
); i
+= 1, idx
+= 1) {
296 vcxt
.values
[idx
].Reg128
.Low64
= env
->xmm_regs
[i
].ZMM_Q(0);
297 vcxt
.values
[idx
].Reg128
.High64
= env
->xmm_regs
[i
].ZMM_Q(1);
302 assert(whpx_register_names
[idx
] == WHvX64RegisterFpMmx0
);
303 for (i
= 0; i
< 8; i
+= 1, idx
+= 1) {
304 vcxt
.values
[idx
].Fp
.AsUINT128
.Low64
= env
->fpregs
[i
].mmx
.MMX_Q(0);
305 /* vcxt.values[idx].Fp.AsUINT128.High64 =
306 env->fpregs[i].mmx.MMX_Q(1);
310 /* FP control status register */
311 assert(whpx_register_names
[idx
] == WHvX64RegisterFpControlStatus
);
312 vcxt
.values
[idx
].FpControlStatus
.FpControl
= env
->fpuc
;
313 vcxt
.values
[idx
].FpControlStatus
.FpStatus
=
314 (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
315 vcxt
.values
[idx
].FpControlStatus
.FpTag
= 0;
316 for (i
= 0; i
< 8; ++i
) {
317 vcxt
.values
[idx
].FpControlStatus
.FpTag
|= (!env
->fptags
[i
]) << i
;
319 vcxt
.values
[idx
].FpControlStatus
.Reserved
= 0;
320 vcxt
.values
[idx
].FpControlStatus
.LastFpOp
= env
->fpop
;
321 vcxt
.values
[idx
].FpControlStatus
.LastFpRip
= env
->fpip
;
324 /* XMM control status register */
325 assert(whpx_register_names
[idx
] == WHvX64RegisterXmmControlStatus
);
326 vcxt
.values
[idx
].XmmControlStatus
.LastFpRdp
= 0;
327 vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControl
= env
->mxcsr
;
328 vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControlMask
= 0x0000ffff;
332 assert(whpx_register_names
[idx
] == WHvX64RegisterTsc
);
333 vcxt
.values
[idx
++].Reg64
= env
->tsc
;
334 assert(whpx_register_names
[idx
] == WHvX64RegisterEfer
);
335 vcxt
.values
[idx
++].Reg64
= env
->efer
;
337 assert(whpx_register_names
[idx
] == WHvX64RegisterKernelGsBase
);
338 vcxt
.values
[idx
++].Reg64
= env
->kernelgsbase
;
341 assert(whpx_register_names
[idx
] == WHvX64RegisterApicBase
);
342 vcxt
.values
[idx
++].Reg64
= vcpu
->apic_base
;
344 /* WHvX64RegisterPat - Skipped */
346 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterCs
);
347 vcxt
.values
[idx
++].Reg64
= env
->sysenter_cs
;
348 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEip
);
349 vcxt
.values
[idx
++].Reg64
= env
->sysenter_eip
;
350 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEsp
);
351 vcxt
.values
[idx
++].Reg64
= env
->sysenter_esp
;
352 assert(whpx_register_names
[idx
] == WHvX64RegisterStar
);
353 vcxt
.values
[idx
++].Reg64
= env
->star
;
355 assert(whpx_register_names
[idx
] == WHvX64RegisterLstar
);
356 vcxt
.values
[idx
++].Reg64
= env
->lstar
;
357 assert(whpx_register_names
[idx
] == WHvX64RegisterCstar
);
358 vcxt
.values
[idx
++].Reg64
= env
->cstar
;
359 assert(whpx_register_names
[idx
] == WHvX64RegisterSfmask
);
360 vcxt
.values
[idx
++].Reg64
= env
->fmask
;
363 /* Interrupt / Event Registers - Skipped */
365 assert(idx
== RTL_NUMBER_OF(whpx_register_names
));
367 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
368 whpx
->partition
, cpu
->cpu_index
,
370 RTL_NUMBER_OF(whpx_register_names
),
374 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
381 static void whpx_get_registers(CPUState
*cpu
)
383 struct whpx_state
*whpx
= &whpx_global
;
384 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
385 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
386 X86CPU
*x86_cpu
= X86_CPU(cpu
);
387 struct whpx_register_set vcxt
;
388 uint64_t tpr
, apic_base
;
394 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
396 hr
= whp_dispatch
.WHvGetVirtualProcessorRegisters(
397 whpx
->partition
, cpu
->cpu_index
,
399 RTL_NUMBER_OF(whpx_register_names
),
402 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
408 /* Indexes for first 16 registers match between HV and QEMU definitions */
410 for (idx
= 0; idx
< CPU_NB_REGS
; idx
+= 1) {
411 env
->regs
[idx
] = vcxt
.values
[idx
].Reg64
;
415 /* Same goes for RIP and RFLAGS */
416 assert(whpx_register_names
[idx
] == WHvX64RegisterRip
);
417 env
->eip
= vcxt
.values
[idx
++].Reg64
;
418 assert(whpx_register_names
[idx
] == WHvX64RegisterRflags
);
419 env
->eflags
= vcxt
.values
[idx
++].Reg64
;
421 /* Translate 6+4 segment registers. HV and QEMU order matches */
422 assert(idx
== WHvX64RegisterEs
);
423 for (i
= 0; i
< 6; i
+= 1, idx
+= 1) {
424 env
->segs
[i
] = whpx_seg_h2q(&vcxt
.values
[idx
].Segment
);
427 assert(idx
== WHvX64RegisterLdtr
);
428 env
->ldt
= whpx_seg_h2q(&vcxt
.values
[idx
++].Segment
);
429 assert(idx
== WHvX64RegisterTr
);
430 env
->tr
= whpx_seg_h2q(&vcxt
.values
[idx
++].Segment
);
431 assert(idx
== WHvX64RegisterIdtr
);
432 env
->idt
.base
= vcxt
.values
[idx
].Table
.Base
;
433 env
->idt
.limit
= vcxt
.values
[idx
].Table
.Limit
;
435 assert(idx
== WHvX64RegisterGdtr
);
436 env
->gdt
.base
= vcxt
.values
[idx
].Table
.Base
;
437 env
->gdt
.limit
= vcxt
.values
[idx
].Table
.Limit
;
440 /* CR0, 2, 3, 4, 8 */
441 assert(whpx_register_names
[idx
] == WHvX64RegisterCr0
);
442 env
->cr
[0] = vcxt
.values
[idx
++].Reg64
;
443 assert(whpx_register_names
[idx
] == WHvX64RegisterCr2
);
444 env
->cr
[2] = vcxt
.values
[idx
++].Reg64
;
445 assert(whpx_register_names
[idx
] == WHvX64RegisterCr3
);
446 env
->cr
[3] = vcxt
.values
[idx
++].Reg64
;
447 assert(whpx_register_names
[idx
] == WHvX64RegisterCr4
);
448 env
->cr
[4] = vcxt
.values
[idx
++].Reg64
;
449 assert(whpx_register_names
[idx
] == WHvX64RegisterCr8
);
450 tpr
= vcxt
.values
[idx
++].Reg64
;
451 if (tpr
!= vcpu
->tpr
) {
453 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
456 /* 8 Debug Registers - Skipped */
458 /* 16 XMM registers */
459 assert(whpx_register_names
[idx
] == WHvX64RegisterXmm0
);
461 for (i
= 0; i
< sizeof(env
->xmm_regs
) / sizeof(ZMMReg
); i
+= 1, idx
+= 1) {
462 env
->xmm_regs
[i
].ZMM_Q(0) = vcxt
.values
[idx
].Reg128
.Low64
;
463 env
->xmm_regs
[i
].ZMM_Q(1) = vcxt
.values
[idx
].Reg128
.High64
;
468 assert(whpx_register_names
[idx
] == WHvX64RegisterFpMmx0
);
469 for (i
= 0; i
< 8; i
+= 1, idx
+= 1) {
470 env
->fpregs
[i
].mmx
.MMX_Q(0) = vcxt
.values
[idx
].Fp
.AsUINT128
.Low64
;
471 /* env->fpregs[i].mmx.MMX_Q(1) =
472 vcxt.values[idx].Fp.AsUINT128.High64;
476 /* FP control status register */
477 assert(whpx_register_names
[idx
] == WHvX64RegisterFpControlStatus
);
478 env
->fpuc
= vcxt
.values
[idx
].FpControlStatus
.FpControl
;
479 env
->fpstt
= (vcxt
.values
[idx
].FpControlStatus
.FpStatus
>> 11) & 0x7;
480 env
->fpus
= vcxt
.values
[idx
].FpControlStatus
.FpStatus
& ~0x3800;
481 for (i
= 0; i
< 8; ++i
) {
482 env
->fptags
[i
] = !((vcxt
.values
[idx
].FpControlStatus
.FpTag
>> i
) & 1);
484 env
->fpop
= vcxt
.values
[idx
].FpControlStatus
.LastFpOp
;
485 env
->fpip
= vcxt
.values
[idx
].FpControlStatus
.LastFpRip
;
488 /* XMM control status register */
489 assert(whpx_register_names
[idx
] == WHvX64RegisterXmmControlStatus
);
490 env
->mxcsr
= vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControl
;
494 assert(whpx_register_names
[idx
] == WHvX64RegisterTsc
);
495 env
->tsc
= vcxt
.values
[idx
++].Reg64
;
496 assert(whpx_register_names
[idx
] == WHvX64RegisterEfer
);
497 env
->efer
= vcxt
.values
[idx
++].Reg64
;
499 assert(whpx_register_names
[idx
] == WHvX64RegisterKernelGsBase
);
500 env
->kernelgsbase
= vcxt
.values
[idx
++].Reg64
;
503 assert(whpx_register_names
[idx
] == WHvX64RegisterApicBase
);
504 apic_base
= vcxt
.values
[idx
++].Reg64
;
505 if (apic_base
!= vcpu
->apic_base
) {
506 vcpu
->apic_base
= apic_base
;
507 cpu_set_apic_base(x86_cpu
->apic_state
, vcpu
->apic_base
);
510 /* WHvX64RegisterPat - Skipped */
512 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterCs
);
513 env
->sysenter_cs
= vcxt
.values
[idx
++].Reg64
;;
514 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEip
);
515 env
->sysenter_eip
= vcxt
.values
[idx
++].Reg64
;
516 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEsp
);
517 env
->sysenter_esp
= vcxt
.values
[idx
++].Reg64
;
518 assert(whpx_register_names
[idx
] == WHvX64RegisterStar
);
519 env
->star
= vcxt
.values
[idx
++].Reg64
;
521 assert(whpx_register_names
[idx
] == WHvX64RegisterLstar
);
522 env
->lstar
= vcxt
.values
[idx
++].Reg64
;
523 assert(whpx_register_names
[idx
] == WHvX64RegisterCstar
);
524 env
->cstar
= vcxt
.values
[idx
++].Reg64
;
525 assert(whpx_register_names
[idx
] == WHvX64RegisterSfmask
);
526 env
->fmask
= vcxt
.values
[idx
++].Reg64
;
529 /* Interrupt / Event Registers - Skipped */
531 assert(idx
== RTL_NUMBER_OF(whpx_register_names
));
536 static HRESULT CALLBACK
whpx_emu_ioport_callback(
538 WHV_EMULATOR_IO_ACCESS_INFO
*IoAccess
)
540 MemTxAttrs attrs
= { 0 };
541 address_space_rw(&address_space_io
, IoAccess
->Port
, attrs
,
542 (uint8_t *)&IoAccess
->Data
, IoAccess
->AccessSize
,
543 IoAccess
->Direction
);
547 static HRESULT CALLBACK
whpx_emu_mmio_callback(
549 WHV_EMULATOR_MEMORY_ACCESS_INFO
*ma
)
551 cpu_physical_memory_rw(ma
->GpaAddress
, ma
->Data
, ma
->AccessSize
,
556 static HRESULT CALLBACK
whpx_emu_getreg_callback(
558 const WHV_REGISTER_NAME
*RegisterNames
,
559 UINT32 RegisterCount
,
560 WHV_REGISTER_VALUE
*RegisterValues
)
563 struct whpx_state
*whpx
= &whpx_global
;
564 CPUState
*cpu
= (CPUState
*)ctx
;
566 hr
= whp_dispatch
.WHvGetVirtualProcessorRegisters(
567 whpx
->partition
, cpu
->cpu_index
,
568 RegisterNames
, RegisterCount
,
571 error_report("WHPX: Failed to get virtual processor registers,"
578 static HRESULT CALLBACK
whpx_emu_setreg_callback(
580 const WHV_REGISTER_NAME
*RegisterNames
,
581 UINT32 RegisterCount
,
582 const WHV_REGISTER_VALUE
*RegisterValues
)
585 struct whpx_state
*whpx
= &whpx_global
;
586 CPUState
*cpu
= (CPUState
*)ctx
;
588 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
589 whpx
->partition
, cpu
->cpu_index
,
590 RegisterNames
, RegisterCount
,
593 error_report("WHPX: Failed to set virtual processor registers,"
598 * The emulator just successfully wrote the register state. We clear the
599 * dirty state so we avoid the double write on resume of the VP.
601 cpu
->vcpu_dirty
= false;
606 static HRESULT CALLBACK
whpx_emu_translate_callback(
608 WHV_GUEST_VIRTUAL_ADDRESS Gva
,
609 WHV_TRANSLATE_GVA_FLAGS TranslateFlags
,
610 WHV_TRANSLATE_GVA_RESULT_CODE
*TranslationResult
,
611 WHV_GUEST_PHYSICAL_ADDRESS
*Gpa
)
614 struct whpx_state
*whpx
= &whpx_global
;
615 CPUState
*cpu
= (CPUState
*)ctx
;
616 WHV_TRANSLATE_GVA_RESULT res
;
618 hr
= whp_dispatch
.WHvTranslateGva(whpx
->partition
, cpu
->cpu_index
,
619 Gva
, TranslateFlags
, &res
, Gpa
);
621 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr
);
623 *TranslationResult
= res
.ResultCode
;
629 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks
= {
630 .Size
= sizeof(WHV_EMULATOR_CALLBACKS
),
631 .WHvEmulatorIoPortCallback
= whpx_emu_ioport_callback
,
632 .WHvEmulatorMemoryCallback
= whpx_emu_mmio_callback
,
633 .WHvEmulatorGetVirtualProcessorRegisters
= whpx_emu_getreg_callback
,
634 .WHvEmulatorSetVirtualProcessorRegisters
= whpx_emu_setreg_callback
,
635 .WHvEmulatorTranslateGvaPage
= whpx_emu_translate_callback
,
638 static int whpx_handle_mmio(CPUState
*cpu
, WHV_MEMORY_ACCESS_CONTEXT
*ctx
)
641 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
642 WHV_EMULATOR_STATUS emu_status
;
644 hr
= whp_dispatch
.WHvEmulatorTryMmioEmulation(
646 &vcpu
->exit_ctx
.VpContext
, ctx
,
649 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr
);
653 if (!emu_status
.EmulationSuccessful
) {
654 error_report("WHPX: Failed to emulate MMIO access with"
655 " EmulatorReturnStatus: %u", emu_status
.AsUINT32
);
662 static int whpx_handle_portio(CPUState
*cpu
,
663 WHV_X64_IO_PORT_ACCESS_CONTEXT
*ctx
)
666 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
667 WHV_EMULATOR_STATUS emu_status
;
669 hr
= whp_dispatch
.WHvEmulatorTryIoEmulation(
671 &vcpu
->exit_ctx
.VpContext
, ctx
,
674 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr
);
678 if (!emu_status
.EmulationSuccessful
) {
679 error_report("WHPX: Failed to emulate PortIO access with"
680 " EmulatorReturnStatus: %u", emu_status
.AsUINT32
);
687 static int whpx_handle_halt(CPUState
*cpu
)
689 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
692 qemu_mutex_lock_iothread();
693 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
694 (env
->eflags
& IF_MASK
)) &&
695 !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
696 cpu
->exception_index
= EXCP_HLT
;
700 qemu_mutex_unlock_iothread();
705 static void whpx_vcpu_pre_run(CPUState
*cpu
)
708 struct whpx_state
*whpx
= &whpx_global
;
709 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
710 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
711 X86CPU
*x86_cpu
= X86_CPU(cpu
);
714 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int
;
715 UINT32 reg_count
= 0;
716 WHV_REGISTER_VALUE reg_values
[3];
717 WHV_REGISTER_NAME reg_names
[3];
719 memset(&new_int
, 0, sizeof(new_int
));
720 memset(reg_values
, 0, sizeof(reg_values
));
722 qemu_mutex_lock_iothread();
725 if (!vcpu
->interruption_pending
&&
726 cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
727 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
728 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
729 vcpu
->interruptable
= false;
730 new_int
.InterruptionType
= WHvX64PendingNmi
;
731 new_int
.InterruptionPending
= 1;
732 new_int
.InterruptionVector
= 2;
734 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
735 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
740 * Force the VCPU out of its inner loop to process any INIT requests or
741 * commit pending TPR access.
743 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
744 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
745 !(env
->hflags
& HF_SMM_MASK
)) {
746 cpu
->exit_request
= 1;
748 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
749 cpu
->exit_request
= 1;
753 /* Get pending hard interruption or replay one that was overwritten */
754 if (!vcpu
->interruption_pending
&&
755 vcpu
->interruptable
&& (env
->eflags
& IF_MASK
)) {
756 assert(!new_int
.InterruptionPending
);
757 if (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
758 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
759 irq
= cpu_get_pic_interrupt(env
);
761 new_int
.InterruptionType
= WHvX64PendingInterrupt
;
762 new_int
.InterruptionPending
= 1;
763 new_int
.InterruptionVector
= irq
;
768 /* Setup interrupt state if new one was prepared */
769 if (new_int
.InterruptionPending
) {
770 reg_values
[reg_count
].PendingInterruption
= new_int
;
771 reg_names
[reg_count
] = WHvRegisterPendingInterruption
;
775 /* Sync the TPR to the CR8 if was modified during the intercept */
776 tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
777 if (tpr
!= vcpu
->tpr
) {
779 reg_values
[reg_count
].Reg64
= tpr
;
780 cpu
->exit_request
= 1;
781 reg_names
[reg_count
] = WHvX64RegisterCr8
;
785 /* Update the state of the interrupt delivery notification */
786 if (!vcpu
->window_registered
&&
787 cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
788 reg_values
[reg_count
].DeliverabilityNotifications
.InterruptNotification
790 vcpu
->window_registered
= 1;
791 reg_names
[reg_count
] = WHvX64RegisterDeliverabilityNotifications
;
795 qemu_mutex_unlock_iothread();
798 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
799 whpx
->partition
, cpu
->cpu_index
,
800 reg_names
, reg_count
, reg_values
);
802 error_report("WHPX: Failed to set interrupt state registers,"
810 static void whpx_vcpu_post_run(CPUState
*cpu
)
812 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
813 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
814 X86CPU
*x86_cpu
= X86_CPU(cpu
);
816 env
->eflags
= vcpu
->exit_ctx
.VpContext
.Rflags
;
818 uint64_t tpr
= vcpu
->exit_ctx
.VpContext
.Cr8
;
819 if (vcpu
->tpr
!= tpr
) {
821 qemu_mutex_lock_iothread();
822 cpu_set_apic_tpr(x86_cpu
->apic_state
, vcpu
->tpr
);
823 qemu_mutex_unlock_iothread();
826 vcpu
->interruption_pending
=
827 vcpu
->exit_ctx
.VpContext
.ExecutionState
.InterruptionPending
;
829 vcpu
->interruptable
=
830 !vcpu
->exit_ctx
.VpContext
.ExecutionState
.InterruptShadow
;
835 static void whpx_vcpu_process_async_events(CPUState
*cpu
)
837 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
838 X86CPU
*x86_cpu
= X86_CPU(cpu
);
839 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
841 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
842 !(env
->hflags
& HF_SMM_MASK
)) {
844 do_cpu_init(x86_cpu
);
845 cpu
->vcpu_dirty
= true;
846 vcpu
->interruptable
= true;
849 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
850 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
851 apic_poll_irq(x86_cpu
->apic_state
);
854 if (((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
855 (env
->eflags
& IF_MASK
)) ||
856 (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
860 if (cpu
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
861 if (!cpu
->vcpu_dirty
) {
862 whpx_get_registers(cpu
);
864 do_cpu_sipi(x86_cpu
);
867 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
868 cpu
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
869 if (!cpu
->vcpu_dirty
) {
870 whpx_get_registers(cpu
);
872 apic_handle_tpr_access_report(x86_cpu
->apic_state
, env
->eip
,
873 env
->tpr_access_type
);
879 static int whpx_vcpu_run(CPUState
*cpu
)
882 struct whpx_state
*whpx
= &whpx_global
;
883 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
886 whpx_vcpu_process_async_events(cpu
);
888 cpu
->exception_index
= EXCP_HLT
;
889 atomic_set(&cpu
->exit_request
, false);
893 qemu_mutex_unlock_iothread();
897 if (cpu
->vcpu_dirty
) {
898 whpx_set_registers(cpu
);
899 cpu
->vcpu_dirty
= false;
902 whpx_vcpu_pre_run(cpu
);
904 if (atomic_read(&cpu
->exit_request
)) {
908 hr
= whp_dispatch
.WHvRunVirtualProcessor(
909 whpx
->partition
, cpu
->cpu_index
,
910 &vcpu
->exit_ctx
, sizeof(vcpu
->exit_ctx
));
913 error_report("WHPX: Failed to exec a virtual processor,"
919 whpx_vcpu_post_run(cpu
);
921 switch (vcpu
->exit_ctx
.ExitReason
) {
922 case WHvRunVpExitReasonMemoryAccess
:
923 ret
= whpx_handle_mmio(cpu
, &vcpu
->exit_ctx
.MemoryAccess
);
926 case WHvRunVpExitReasonX64IoPortAccess
:
927 ret
= whpx_handle_portio(cpu
, &vcpu
->exit_ctx
.IoPortAccess
);
930 case WHvRunVpExitReasonX64InterruptWindow
:
931 vcpu
->window_registered
= 0;
935 case WHvRunVpExitReasonX64Halt
:
936 ret
= whpx_handle_halt(cpu
);
939 case WHvRunVpExitReasonCanceled
:
940 cpu
->exception_index
= EXCP_INTERRUPT
;
944 case WHvRunVpExitReasonX64MsrAccess
: {
945 WHV_REGISTER_VALUE reg_values
[3] = {0};
946 WHV_REGISTER_NAME reg_names
[3];
949 reg_names
[0] = WHvX64RegisterRip
;
950 reg_names
[1] = WHvX64RegisterRax
;
951 reg_names
[2] = WHvX64RegisterRdx
;
953 reg_values
[0].Reg64
=
954 vcpu
->exit_ctx
.VpContext
.Rip
+
955 vcpu
->exit_ctx
.VpContext
.InstructionLength
;
958 * For all unsupported MSR access we:
962 reg_count
= vcpu
->exit_ctx
.MsrAccess
.AccessInfo
.IsWrite
?
965 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
968 reg_names
, reg_count
,
972 error_report("WHPX: Failed to set MsrAccess state "
973 " registers, hr=%08lx", hr
);
978 case WHvRunVpExitReasonX64Cpuid
: {
979 WHV_REGISTER_VALUE reg_values
[5];
980 WHV_REGISTER_NAME reg_names
[5];
981 UINT32 reg_count
= 5;
982 UINT64 rip
, rax
, rcx
, rdx
, rbx
;
984 memset(reg_values
, 0, sizeof(reg_values
));
986 rip
= vcpu
->exit_ctx
.VpContext
.Rip
+
987 vcpu
->exit_ctx
.VpContext
.InstructionLength
;
988 switch (vcpu
->exit_ctx
.CpuidAccess
.Rax
) {
990 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
991 /* Advertise that we are running on a hypervisor */
993 vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
|
994 CPUID_EXT_HYPERVISOR
;
996 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
997 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1000 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
1001 /* Remove any support of OSVW */
1003 vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
&
1006 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1007 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1010 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
1011 rcx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
;
1012 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1013 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1016 reg_names
[0] = WHvX64RegisterRip
;
1017 reg_names
[1] = WHvX64RegisterRax
;
1018 reg_names
[2] = WHvX64RegisterRcx
;
1019 reg_names
[3] = WHvX64RegisterRdx
;
1020 reg_names
[4] = WHvX64RegisterRbx
;
1022 reg_values
[0].Reg64
= rip
;
1023 reg_values
[1].Reg64
= rax
;
1024 reg_values
[2].Reg64
= rcx
;
1025 reg_values
[3].Reg64
= rdx
;
1026 reg_values
[4].Reg64
= rbx
;
1028 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
1029 whpx
->partition
, cpu
->cpu_index
,
1035 error_report("WHPX: Failed to set CpuidAccess state registers,"
1041 case WHvRunVpExitReasonNone
:
1042 case WHvRunVpExitReasonUnrecoverableException
:
1043 case WHvRunVpExitReasonInvalidVpRegisterValue
:
1044 case WHvRunVpExitReasonUnsupportedFeature
:
1045 case WHvRunVpExitReasonException
:
1047 error_report("WHPX: Unexpected VP exit code %d",
1048 vcpu
->exit_ctx
.ExitReason
);
1049 whpx_get_registers(cpu
);
1050 qemu_mutex_lock_iothread();
1051 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
1052 qemu_mutex_unlock_iothread();
1059 qemu_mutex_lock_iothread();
1062 atomic_set(&cpu
->exit_request
, false);
1067 static void do_whpx_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
1069 whpx_get_registers(cpu
);
1070 cpu
->vcpu_dirty
= true;
1073 static void do_whpx_cpu_synchronize_post_reset(CPUState
*cpu
,
1074 run_on_cpu_data arg
)
1076 whpx_set_registers(cpu
);
1077 cpu
->vcpu_dirty
= false;
1080 static void do_whpx_cpu_synchronize_post_init(CPUState
*cpu
,
1081 run_on_cpu_data arg
)
1083 whpx_set_registers(cpu
);
1084 cpu
->vcpu_dirty
= false;
1087 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState
*cpu
,
1088 run_on_cpu_data arg
)
1090 cpu
->vcpu_dirty
= true;
1097 void whpx_cpu_synchronize_state(CPUState
*cpu
)
1099 if (!cpu
->vcpu_dirty
) {
1100 run_on_cpu(cpu
, do_whpx_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
1104 void whpx_cpu_synchronize_post_reset(CPUState
*cpu
)
1106 run_on_cpu(cpu
, do_whpx_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
1109 void whpx_cpu_synchronize_post_init(CPUState
*cpu
)
1111 run_on_cpu(cpu
, do_whpx_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
1114 void whpx_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
1116 run_on_cpu(cpu
, do_whpx_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
1123 static Error
*whpx_migration_blocker
;
1125 int whpx_init_vcpu(CPUState
*cpu
)
1128 struct whpx_state
*whpx
= &whpx_global
;
1129 struct whpx_vcpu
*vcpu
;
1130 Error
*local_error
= NULL
;
1132 /* Add migration blockers for all unsupported features of the
1133 * Windows Hypervisor Platform
1135 if (whpx_migration_blocker
== NULL
) {
1136 error_setg(&whpx_migration_blocker
,
1137 "State blocked due to non-migratable CPUID feature support,"
1138 "dirty memory tracking support, and XSAVE/XRSTOR support");
1140 (void)migrate_add_blocker(whpx_migration_blocker
, &local_error
);
1142 error_report_err(local_error
);
1143 migrate_del_blocker(whpx_migration_blocker
);
1144 error_free(whpx_migration_blocker
);
1149 vcpu
= g_malloc0(sizeof(struct whpx_vcpu
));
1152 error_report("WHPX: Failed to allocte VCPU context.");
1156 hr
= whp_dispatch
.WHvEmulatorCreateEmulator(
1157 &whpx_emu_callbacks
,
1160 error_report("WHPX: Failed to setup instruction completion support,"
1166 hr
= whp_dispatch
.WHvCreateVirtualProcessor(
1167 whpx
->partition
, cpu
->cpu_index
, 0);
1169 error_report("WHPX: Failed to create a virtual processor,"
1171 whp_dispatch
.WHvEmulatorDestroyEmulator(vcpu
->emulator
);
1176 vcpu
->interruptable
= true;
1178 cpu
->vcpu_dirty
= true;
1179 cpu
->hax_vcpu
= (struct hax_vcpu_state
*)vcpu
;
1184 int whpx_vcpu_exec(CPUState
*cpu
)
1190 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
1191 ret
= cpu
->exception_index
;
1192 cpu
->exception_index
= -1;
1196 fatal
= whpx_vcpu_run(cpu
);
1199 error_report("WHPX: Failed to exec a virtual processor");
1207 void whpx_destroy_vcpu(CPUState
*cpu
)
1209 struct whpx_state
*whpx
= &whpx_global
;
1210 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
1212 whp_dispatch
.WHvDeleteVirtualProcessor(whpx
->partition
, cpu
->cpu_index
);
1213 whp_dispatch
.WHvEmulatorDestroyEmulator(vcpu
->emulator
);
1214 g_free(cpu
->hax_vcpu
);
1218 void whpx_vcpu_kick(CPUState
*cpu
)
1220 struct whpx_state
*whpx
= &whpx_global
;
1221 whp_dispatch
.WHvCancelRunVirtualProcessor(
1222 whpx
->partition
, cpu
->cpu_index
, 0);
1229 static void whpx_update_mapping(hwaddr start_pa
, ram_addr_t size
,
1230 void *host_va
, int add
, int rom
,
1233 struct whpx_state
*whpx
= &whpx_global
;
1238 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1239 (void*)start_pa, (void*)size, host_va,
1240 (rom ? "ROM" : "RAM"), name);
1242 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1243 (void*)start_pa, (void*)size, host_va, name);
1248 hr
= whp_dispatch
.WHvMapGpaRange(whpx
->partition
,
1252 (WHvMapGpaRangeFlagRead
|
1253 WHvMapGpaRangeFlagExecute
|
1254 (rom
? 0 : WHvMapGpaRangeFlagWrite
)));
1256 hr
= whp_dispatch
.WHvUnmapGpaRange(whpx
->partition
,
1262 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1263 " Host:%p, hr=%08lx",
1264 (add
? "MAP" : "UNMAP"), name
,
1265 (void *)(uintptr_t)start_pa
, (void *)size
, host_va
, hr
);
1269 static void whpx_process_section(MemoryRegionSection
*section
, int add
)
1271 MemoryRegion
*mr
= section
->mr
;
1272 hwaddr start_pa
= section
->offset_within_address_space
;
1273 ram_addr_t size
= int128_get64(section
->size
);
1277 if (!memory_region_is_ram(mr
)) {
1281 delta
= qemu_real_host_page_size
- (start_pa
& ~qemu_real_host_page_mask
);
1282 delta
&= ~qemu_real_host_page_mask
;
1288 size
&= qemu_real_host_page_mask
;
1289 if (!size
|| (start_pa
& ~qemu_real_host_page_mask
)) {
1293 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
1294 + section
->offset_within_region
+ delta
;
1296 whpx_update_mapping(start_pa
, size
, (void *)(uintptr_t)host_va
, add
,
1297 memory_region_is_rom(mr
), mr
->name
);
1300 static void whpx_region_add(MemoryListener
*listener
,
1301 MemoryRegionSection
*section
)
1303 memory_region_ref(section
->mr
);
1304 whpx_process_section(section
, 1);
1307 static void whpx_region_del(MemoryListener
*listener
,
1308 MemoryRegionSection
*section
)
1310 whpx_process_section(section
, 0);
1311 memory_region_unref(section
->mr
);
1314 static void whpx_transaction_begin(MemoryListener
*listener
)
1318 static void whpx_transaction_commit(MemoryListener
*listener
)
1322 static void whpx_log_sync(MemoryListener
*listener
,
1323 MemoryRegionSection
*section
)
1325 MemoryRegion
*mr
= section
->mr
;
1327 if (!memory_region_is_ram(mr
)) {
1331 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
1334 static MemoryListener whpx_memory_listener
= {
1335 .begin
= whpx_transaction_begin
,
1336 .commit
= whpx_transaction_commit
,
1337 .region_add
= whpx_region_add
,
1338 .region_del
= whpx_region_del
,
1339 .log_sync
= whpx_log_sync
,
1343 static void whpx_memory_init(void)
1345 memory_listener_register(&whpx_memory_listener
, &address_space_memory
);
1348 static void whpx_handle_interrupt(CPUState
*cpu
, int mask
)
1350 cpu
->interrupt_request
|= mask
;
1352 if (!qemu_cpu_is_self(cpu
)) {
1361 static int whpx_accel_init(MachineState
*ms
)
1363 struct whpx_state
*whpx
;
1366 WHV_CAPABILITY whpx_cap
;
1367 UINT32 whpx_cap_size
;
1368 WHV_PARTITION_PROPERTY prop
;
1370 whpx
= &whpx_global
;
1372 if (!init_whp_dispatch()) {
1377 memset(whpx
, 0, sizeof(struct whpx_state
));
1378 whpx
->mem_quota
= ms
->ram_size
;
1380 hr
= whp_dispatch
.WHvGetCapability(
1381 WHvCapabilityCodeHypervisorPresent
, &whpx_cap
,
1382 sizeof(whpx_cap
), &whpx_cap_size
);
1383 if (FAILED(hr
) || !whpx_cap
.HypervisorPresent
) {
1384 error_report("WHPX: No accelerator found, hr=%08lx", hr
);
1389 hr
= whp_dispatch
.WHvCreatePartition(&whpx
->partition
);
1391 error_report("WHPX: Failed to create partition, hr=%08lx", hr
);
1396 memset(&prop
, 0, sizeof(WHV_PARTITION_PROPERTY
));
1397 prop
.ProcessorCount
= ms
->smp
.cpus
;
1398 hr
= whp_dispatch
.WHvSetPartitionProperty(
1400 WHvPartitionPropertyCodeProcessorCount
,
1402 sizeof(WHV_PARTITION_PROPERTY
));
1405 error_report("WHPX: Failed to set partition core count to %d,"
1406 " hr=%08lx", ms
->smp
.cores
, hr
);
1411 memset(&prop
, 0, sizeof(WHV_PARTITION_PROPERTY
));
1412 prop
.ExtendedVmExits
.X64MsrExit
= 1;
1413 prop
.ExtendedVmExits
.X64CpuidExit
= 1;
1414 hr
= whp_dispatch
.WHvSetPartitionProperty(
1416 WHvPartitionPropertyCodeExtendedVmExits
,
1418 sizeof(WHV_PARTITION_PROPERTY
));
1421 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1422 " X64CpuidExit hr=%08lx", hr
);
1427 UINT32 cpuidExitList
[] = {1, 0x80000001};
1428 hr
= whp_dispatch
.WHvSetPartitionProperty(
1430 WHvPartitionPropertyCodeCpuidExitList
,
1432 RTL_NUMBER_OF(cpuidExitList
) * sizeof(UINT32
));
1435 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1441 hr
= whp_dispatch
.WHvSetupPartition(whpx
->partition
);
1443 error_report("WHPX: Failed to setup partition, hr=%08lx", hr
);
1450 cpu_interrupt_handler
= whpx_handle_interrupt
;
1452 printf("Windows Hypervisor Platform accelerator is operational\n");
1457 if (NULL
!= whpx
->partition
) {
1458 whp_dispatch
.WHvDeletePartition(whpx
->partition
);
1459 whpx
->partition
= NULL
;
1466 int whpx_enabled(void)
1468 return whpx_allowed
;
1471 static void whpx_accel_class_init(ObjectClass
*oc
, void *data
)
1473 AccelClass
*ac
= ACCEL_CLASS(oc
);
1475 ac
->init_machine
= whpx_accel_init
;
1476 ac
->allowed
= &whpx_allowed
;
1479 static const TypeInfo whpx_accel_type
= {
1480 .name
= ACCEL_CLASS_NAME("whpx"),
1481 .parent
= TYPE_ACCEL
,
1482 .class_init
= whpx_accel_class_init
,
1485 static void whpx_type_init(void)
1487 type_register_static(&whpx_accel_type
);
1490 bool init_whp_dispatch(void)
1492 const char *lib_name
;
1495 if (whp_dispatch_initialized
) {
1499 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1500 whp_dispatch.function_name = \
1501 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1502 if (!whp_dispatch.function_name) { \
1503 error_report("Could not load function %s from library %s.", \
1504 #function_name, lib_name); \
1508 lib_name = "WinHvPlatform.dll";
1509 hWinHvPlatform
= LoadLibrary(lib_name
);
1510 if (!hWinHvPlatform
) {
1511 error_report("Could not load library %s.", lib_name
);
1514 hLib
= hWinHvPlatform
;
1515 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD
)
1517 lib_name
= "WinHvEmulation.dll";
1518 hWinHvEmulation
= LoadLibrary(lib_name
);
1519 if (!hWinHvEmulation
) {
1520 error_report("Could not load library %s.", lib_name
);
1523 hLib
= hWinHvEmulation
;
1524 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD
)
1526 whp_dispatch_initialized
= true;
1531 if (hWinHvPlatform
) {
1532 FreeLibrary(hWinHvPlatform
);
1534 if (hWinHvEmulation
) {
1535 FreeLibrary(hWinHvEmulation
);
1540 type_init(whpx_type_init
);