]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/whpx-all.c
Merge remote-tracking branch 'remotes/berrange/tags/qio-next-pull-request' into staging
[mirror_qemu.git] / target / i386 / whpx-all.c
CommitLineData
812d49f2
JTV
1/*
2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
3 *
4 * Copyright Microsoft Corp. 2017
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11#include "qemu/osdep.h"
12#include "cpu.h"
13#include "exec/address-spaces.h"
14#include "exec/exec-all.h"
15#include "exec/ioport.h"
16#include "qemu-common.h"
17#include "strings.h"
18#include "sysemu/accel.h"
19#include "sysemu/whpx.h"
20#include "sysemu/sysemu.h"
21#include "sysemu/cpus.h"
22#include "qemu/main-loop.h"
23#include "hw/boards.h"
24#include "qemu/error-report.h"
25#include "qemu/queue.h"
26#include "qapi/error.h"
27#include "migration/blocker.h"
28
53537bb1
JTV
29#include <WinHvPlatform.h>
30#include <WinHvEmulation.h>
812d49f2
JTV
31
32struct whpx_state {
33 uint64_t mem_quota;
34 WHV_PARTITION_HANDLE partition;
812d49f2
JTV
35};
36
37static const WHV_REGISTER_NAME whpx_register_names[] = {
38
39 /* X64 General purpose registers */
40 WHvX64RegisterRax,
41 WHvX64RegisterRcx,
42 WHvX64RegisterRdx,
43 WHvX64RegisterRbx,
44 WHvX64RegisterRsp,
45 WHvX64RegisterRbp,
46 WHvX64RegisterRsi,
47 WHvX64RegisterRdi,
48 WHvX64RegisterR8,
49 WHvX64RegisterR9,
50 WHvX64RegisterR10,
51 WHvX64RegisterR11,
52 WHvX64RegisterR12,
53 WHvX64RegisterR13,
54 WHvX64RegisterR14,
55 WHvX64RegisterR15,
56 WHvX64RegisterRip,
57 WHvX64RegisterRflags,
58
59 /* X64 Segment registers */
60 WHvX64RegisterEs,
61 WHvX64RegisterCs,
62 WHvX64RegisterSs,
63 WHvX64RegisterDs,
64 WHvX64RegisterFs,
65 WHvX64RegisterGs,
66 WHvX64RegisterLdtr,
67 WHvX64RegisterTr,
68
69 /* X64 Table registers */
70 WHvX64RegisterIdtr,
71 WHvX64RegisterGdtr,
72
73 /* X64 Control Registers */
74 WHvX64RegisterCr0,
75 WHvX64RegisterCr2,
76 WHvX64RegisterCr3,
77 WHvX64RegisterCr4,
78 WHvX64RegisterCr8,
79
80 /* X64 Debug Registers */
81 /*
82 * WHvX64RegisterDr0,
83 * WHvX64RegisterDr1,
84 * WHvX64RegisterDr2,
85 * WHvX64RegisterDr3,
86 * WHvX64RegisterDr6,
87 * WHvX64RegisterDr7,
88 */
89
90 /* X64 Floating Point and Vector Registers */
91 WHvX64RegisterXmm0,
92 WHvX64RegisterXmm1,
93 WHvX64RegisterXmm2,
94 WHvX64RegisterXmm3,
95 WHvX64RegisterXmm4,
96 WHvX64RegisterXmm5,
97 WHvX64RegisterXmm6,
98 WHvX64RegisterXmm7,
99 WHvX64RegisterXmm8,
100 WHvX64RegisterXmm9,
101 WHvX64RegisterXmm10,
102 WHvX64RegisterXmm11,
103 WHvX64RegisterXmm12,
104 WHvX64RegisterXmm13,
105 WHvX64RegisterXmm14,
106 WHvX64RegisterXmm15,
107 WHvX64RegisterFpMmx0,
108 WHvX64RegisterFpMmx1,
109 WHvX64RegisterFpMmx2,
110 WHvX64RegisterFpMmx3,
111 WHvX64RegisterFpMmx4,
112 WHvX64RegisterFpMmx5,
113 WHvX64RegisterFpMmx6,
114 WHvX64RegisterFpMmx7,
115 WHvX64RegisterFpControlStatus,
116 WHvX64RegisterXmmControlStatus,
117
118 /* X64 MSRs */
119 WHvX64RegisterTsc,
120 WHvX64RegisterEfer,
121#ifdef TARGET_X86_64
122 WHvX64RegisterKernelGsBase,
123#endif
124 WHvX64RegisterApicBase,
125 /* WHvX64RegisterPat, */
126 WHvX64RegisterSysenterCs,
127 WHvX64RegisterSysenterEip,
128 WHvX64RegisterSysenterEsp,
129 WHvX64RegisterStar,
130#ifdef TARGET_X86_64
131 WHvX64RegisterLstar,
132 WHvX64RegisterCstar,
133 WHvX64RegisterSfmask,
134#endif
135
136 /* Interrupt / Event Registers */
137 /*
138 * WHvRegisterPendingInterruption,
139 * WHvRegisterInterruptState,
140 * WHvRegisterPendingEvent0,
141 * WHvRegisterPendingEvent1
142 * WHvX64RegisterDeliverabilityNotifications,
143 */
144};
145
146struct whpx_register_set {
147 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
148};
149
150struct whpx_vcpu {
151 WHV_EMULATOR_HANDLE emulator;
152 bool window_registered;
153 bool interruptable;
154 uint64_t tpr;
155 uint64_t apic_base;
156 WHV_X64_PENDING_INTERRUPTION_REGISTER interrupt_in_flight;
157
158 /* Must be the last field as it may have a tail */
159 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
160};
161
162static bool whpx_allowed;
163
164struct whpx_state whpx_global;
165
166
167/*
168 * VP support
169 */
170
171static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
172{
173 return (struct whpx_vcpu *)cpu->hax_vcpu;
174}
175
176static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
177 int r86)
178{
179 WHV_X64_SEGMENT_REGISTER hs;
180 unsigned flags = qs->flags;
181
182 hs.Base = qs->base;
183 hs.Limit = qs->limit;
184 hs.Selector = qs->selector;
185
186 if (v86) {
187 hs.Attributes = 0;
188 hs.SegmentType = 3;
189 hs.Present = 1;
190 hs.DescriptorPrivilegeLevel = 3;
191 hs.NonSystemSegment = 1;
192
193 } else {
194 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
195
196 if (r86) {
197 /* hs.Base &= 0xfffff; */
198 }
199 }
200
201 return hs;
202}
203
204static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
205{
206 SegmentCache qs;
207
208 qs.base = hs->Base;
209 qs.limit = hs->Limit;
210 qs.selector = hs->Selector;
211
212 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
213
214 return qs;
215}
216
217static void whpx_set_registers(CPUState *cpu)
218{
219 struct whpx_state *whpx = &whpx_global;
220 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
221 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
222 X86CPU *x86_cpu = X86_CPU(cpu);
223 struct whpx_register_set vcxt = {0};
224 HRESULT hr;
225 int idx = 0;
226 int i;
227 int v86, r86;
228
229 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
230
231 v86 = (env->eflags & VM_MASK);
232 r86 = !(env->cr[0] & CR0_PE_MASK);
233
234 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
235 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
236
237 /* Indexes for first 16 registers match between HV and QEMU definitions */
238 for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
239 vcxt.values[idx].Reg64 = env->regs[idx];
240 }
241
242 /* Same goes for RIP and RFLAGS */
243 assert(whpx_register_names[idx] == WHvX64RegisterRip);
244 vcxt.values[idx++].Reg64 = env->eip;
245
246 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
247 vcxt.values[idx++].Reg64 = env->eflags;
248
249 /* Translate 6+4 segment registers. HV and QEMU order matches */
250 assert(idx == WHvX64RegisterEs);
251 for (i = 0; i < 6; i += 1, idx += 1) {
252 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
253 }
254
255 assert(idx == WHvX64RegisterLdtr);
256 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
257
258 assert(idx == WHvX64RegisterTr);
259 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
260
261 assert(idx == WHvX64RegisterIdtr);
262 vcxt.values[idx].Table.Base = env->idt.base;
263 vcxt.values[idx].Table.Limit = env->idt.limit;
264 idx += 1;
265
266 assert(idx == WHvX64RegisterGdtr);
267 vcxt.values[idx].Table.Base = env->gdt.base;
268 vcxt.values[idx].Table.Limit = env->gdt.limit;
269 idx += 1;
270
271 /* CR0, 2, 3, 4, 8 */
272 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
273 vcxt.values[idx++].Reg64 = env->cr[0];
274 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
275 vcxt.values[idx++].Reg64 = env->cr[2];
276 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
277 vcxt.values[idx++].Reg64 = env->cr[3];
278 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
279 vcxt.values[idx++].Reg64 = env->cr[4];
280 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
281 vcxt.values[idx++].Reg64 = vcpu->tpr;
282
283 /* 8 Debug Registers - Skipped */
284
285 /* 16 XMM registers */
286 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
287 for (i = 0; i < 16; i += 1, idx += 1) {
288 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
289 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
290 }
291
292 /* 8 FP registers */
293 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
294 for (i = 0; i < 8; i += 1, idx += 1) {
295 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
296 /* vcxt.values[idx].Fp.AsUINT128.High64 =
297 env->fpregs[i].mmx.MMX_Q(1);
298 */
299 }
300
301 /* FP control status register */
302 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
303 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
304 vcxt.values[idx].FpControlStatus.FpStatus =
305 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
306 vcxt.values[idx].FpControlStatus.FpTag = 0;
307 for (i = 0; i < 8; ++i) {
308 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
309 }
310 vcxt.values[idx].FpControlStatus.Reserved = 0;
311 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
312 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
313 idx += 1;
314
315 /* XMM control status register */
316 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
317 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
318 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
319 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
320 idx += 1;
321
322 /* MSRs */
323 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
324 vcxt.values[idx++].Reg64 = env->tsc;
325 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
326 vcxt.values[idx++].Reg64 = env->efer;
327#ifdef TARGET_X86_64
328 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
329 vcxt.values[idx++].Reg64 = env->kernelgsbase;
330#endif
331
332 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
333 vcxt.values[idx++].Reg64 = vcpu->apic_base;
334
335 /* WHvX64RegisterPat - Skipped */
336
337 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
338 vcxt.values[idx++].Reg64 = env->sysenter_cs;
339 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
340 vcxt.values[idx++].Reg64 = env->sysenter_eip;
341 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
342 vcxt.values[idx++].Reg64 = env->sysenter_esp;
343 assert(whpx_register_names[idx] == WHvX64RegisterStar);
344 vcxt.values[idx++].Reg64 = env->star;
345#ifdef TARGET_X86_64
346 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
347 vcxt.values[idx++].Reg64 = env->lstar;
348 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
349 vcxt.values[idx++].Reg64 = env->cstar;
350 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
351 vcxt.values[idx++].Reg64 = env->fmask;
352#endif
353
354 /* Interrupt / Event Registers - Skipped */
355
356 assert(idx == RTL_NUMBER_OF(whpx_register_names));
357
358 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
359 whpx_register_names,
360 RTL_NUMBER_OF(whpx_register_names),
361 &vcxt.values[0]);
362
363 if (FAILED(hr)) {
364 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
365 hr);
812d49f2
JTV
366 }
367
368 return;
369}
370
371static void whpx_get_registers(CPUState *cpu)
372{
373 struct whpx_state *whpx = &whpx_global;
374 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
375 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
376 X86CPU *x86_cpu = X86_CPU(cpu);
377 struct whpx_register_set vcxt;
378 uint64_t tpr, apic_base;
379 HRESULT hr;
380 int idx = 0;
381 int i;
382
383 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
384
385 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
386 whpx_register_names,
387 RTL_NUMBER_OF(whpx_register_names),
388 &vcxt.values[0]);
389 if (FAILED(hr)) {
390 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
391 hr);
812d49f2
JTV
392 }
393
394 /* Indexes for first 16 registers match between HV and QEMU definitions */
395 for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
396 env->regs[idx] = vcxt.values[idx].Reg64;
397 }
398
399 /* Same goes for RIP and RFLAGS */
400 assert(whpx_register_names[idx] == WHvX64RegisterRip);
401 env->eip = vcxt.values[idx++].Reg64;
402 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
403 env->eflags = vcxt.values[idx++].Reg64;
404
405 /* Translate 6+4 segment registers. HV and QEMU order matches */
406 assert(idx == WHvX64RegisterEs);
407 for (i = 0; i < 6; i += 1, idx += 1) {
408 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
409 }
410
411 assert(idx == WHvX64RegisterLdtr);
412 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
413 assert(idx == WHvX64RegisterTr);
414 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
415 assert(idx == WHvX64RegisterIdtr);
416 env->idt.base = vcxt.values[idx].Table.Base;
417 env->idt.limit = vcxt.values[idx].Table.Limit;
418 idx += 1;
419 assert(idx == WHvX64RegisterGdtr);
420 env->gdt.base = vcxt.values[idx].Table.Base;
421 env->gdt.limit = vcxt.values[idx].Table.Limit;
422 idx += 1;
423
424 /* CR0, 2, 3, 4, 8 */
425 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
426 env->cr[0] = vcxt.values[idx++].Reg64;
427 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
428 env->cr[2] = vcxt.values[idx++].Reg64;
429 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
430 env->cr[3] = vcxt.values[idx++].Reg64;
431 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
432 env->cr[4] = vcxt.values[idx++].Reg64;
433 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
434 tpr = vcxt.values[idx++].Reg64;
435 if (tpr != vcpu->tpr) {
436 vcpu->tpr = tpr;
437 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
438 }
439
440 /* 8 Debug Registers - Skipped */
441
442 /* 16 XMM registers */
443 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
444 for (i = 0; i < 16; i += 1, idx += 1) {
445 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
446 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
447 }
448
449 /* 8 FP registers */
450 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
451 for (i = 0; i < 8; i += 1, idx += 1) {
452 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
453 /* env->fpregs[i].mmx.MMX_Q(1) =
454 vcxt.values[idx].Fp.AsUINT128.High64;
455 */
456 }
457
458 /* FP control status register */
459 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
460 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
461 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
462 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
463 for (i = 0; i < 8; ++i) {
464 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
465 }
466 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
467 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
468 idx += 1;
469
470 /* XMM control status register */
471 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
472 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
473 idx += 1;
474
475 /* MSRs */
476 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
477 env->tsc = vcxt.values[idx++].Reg64;
478 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
479 env->efer = vcxt.values[idx++].Reg64;
480#ifdef TARGET_X86_64
481 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
482 env->kernelgsbase = vcxt.values[idx++].Reg64;
483#endif
484
485 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
486 apic_base = vcxt.values[idx++].Reg64;
487 if (apic_base != vcpu->apic_base) {
488 vcpu->apic_base = apic_base;
489 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
490 }
491
492 /* WHvX64RegisterPat - Skipped */
493
494 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
495 env->sysenter_cs = vcxt.values[idx++].Reg64;;
496 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
497 env->sysenter_eip = vcxt.values[idx++].Reg64;
498 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
499 env->sysenter_esp = vcxt.values[idx++].Reg64;
500 assert(whpx_register_names[idx] == WHvX64RegisterStar);
501 env->star = vcxt.values[idx++].Reg64;
502#ifdef TARGET_X86_64
503 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
504 env->lstar = vcxt.values[idx++].Reg64;
505 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
506 env->cstar = vcxt.values[idx++].Reg64;
507 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
508 env->fmask = vcxt.values[idx++].Reg64;
509#endif
510
511 /* Interrupt / Event Registers - Skipped */
512
513 assert(idx == RTL_NUMBER_OF(whpx_register_names));
514
515 return;
516}
517
518static HRESULT CALLBACK whpx_emu_ioport_callback(
519 void *ctx,
520 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
521{
522 MemTxAttrs attrs = { 0 };
523 address_space_rw(&address_space_io, IoAccess->Port, attrs,
524 (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
525 IoAccess->Direction);
526 return S_OK;
527}
528
f875f04c 529static HRESULT CALLBACK whpx_emu_mmio_callback(
812d49f2
JTV
530 void *ctx,
531 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
532{
533 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
534 ma->Direction);
535 return S_OK;
536}
537
538static HRESULT CALLBACK whpx_emu_getreg_callback(
539 void *ctx,
540 const WHV_REGISTER_NAME *RegisterNames,
541 UINT32 RegisterCount,
542 WHV_REGISTER_VALUE *RegisterValues)
543{
544 HRESULT hr;
545 struct whpx_state *whpx = &whpx_global;
546 CPUState *cpu = (CPUState *)ctx;
547
548 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
549 RegisterNames, RegisterCount,
550 RegisterValues);
551 if (FAILED(hr)) {
552 error_report("WHPX: Failed to get virtual processor registers,"
553 " hr=%08lx", hr);
812d49f2
JTV
554 }
555
556 return hr;
557}
558
559static HRESULT CALLBACK whpx_emu_setreg_callback(
560 void *ctx,
561 const WHV_REGISTER_NAME *RegisterNames,
562 UINT32 RegisterCount,
563 const WHV_REGISTER_VALUE *RegisterValues)
564{
565 HRESULT hr;
566 struct whpx_state *whpx = &whpx_global;
567 CPUState *cpu = (CPUState *)ctx;
568
569 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
570 RegisterNames, RegisterCount,
571 RegisterValues);
572 if (FAILED(hr)) {
573 error_report("WHPX: Failed to set virtual processor registers,"
574 " hr=%08lx", hr);
812d49f2
JTV
575 }
576
577 /*
578 * The emulator just successfully wrote the register state. We clear the
579 * dirty state so we avoid the double write on resume of the VP.
580 */
581 cpu->vcpu_dirty = false;
582
583 return hr;
584}
585
586static HRESULT CALLBACK whpx_emu_translate_callback(
587 void *ctx,
588 WHV_GUEST_VIRTUAL_ADDRESS Gva,
589 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
590 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
591 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
592{
593 HRESULT hr;
594 struct whpx_state *whpx = &whpx_global;
595 CPUState *cpu = (CPUState *)ctx;
596 WHV_TRANSLATE_GVA_RESULT res;
597
598 hr = WHvTranslateGva(whpx->partition, cpu->cpu_index,
599 Gva, TranslateFlags, &res, Gpa);
600 if (FAILED(hr)) {
601 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
812d49f2
JTV
602 } else {
603 *TranslationResult = res.ResultCode;
604 }
605
606 return hr;
607}
608
609static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
914e2ab3 610 .Size = sizeof(WHV_EMULATOR_CALLBACKS),
812d49f2 611 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
f875f04c 612 .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
812d49f2
JTV
613 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
614 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
615 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
616};
617
618static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
619{
620 HRESULT hr;
621 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
622 WHV_EMULATOR_STATUS emu_status;
623
914e2ab3
JTV
624 hr = WHvEmulatorTryMmioEmulation(vcpu->emulator, cpu,
625 &vcpu->exit_ctx.VpContext, ctx,
626 &emu_status);
812d49f2 627 if (FAILED(hr)) {
812d49f2
JTV
628 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
629 return -1;
630 }
631
632 if (!emu_status.EmulationSuccessful) {
812d49f2
JTV
633 error_report("WHPX: Failed to emulate MMIO access");
634 return -1;
635 }
636
637 return 0;
638}
639
640static int whpx_handle_portio(CPUState *cpu,
641 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
642{
643 HRESULT hr;
644 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
645 WHV_EMULATOR_STATUS emu_status;
646
914e2ab3
JTV
647 hr = WHvEmulatorTryIoEmulation(vcpu->emulator, cpu,
648 &vcpu->exit_ctx.VpContext, ctx,
649 &emu_status);
812d49f2 650 if (FAILED(hr)) {
812d49f2
JTV
651 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
652 return -1;
653 }
654
655 if (!emu_status.EmulationSuccessful) {
812d49f2
JTV
656 error_report("WHPX: Failed to emulate PortMMIO access");
657 return -1;
658 }
659
660 return 0;
661}
662
663static int whpx_handle_halt(CPUState *cpu)
664{
665 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
666 int ret = 0;
667
668 qemu_mutex_lock_iothread();
669 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
670 (env->eflags & IF_MASK)) &&
671 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
672 cpu->exception_index = EXCP_HLT;
673 cpu->halted = true;
674 ret = 1;
675 }
676 qemu_mutex_unlock_iothread();
677
678 return ret;
679}
680
681static void whpx_vcpu_pre_run(CPUState *cpu)
682{
683 HRESULT hr;
684 struct whpx_state *whpx = &whpx_global;
685 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
686 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
687 X86CPU *x86_cpu = X86_CPU(cpu);
688 int irq;
2bf3e74d 689 uint8_t tpr;
812d49f2
JTV
690 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0};
691 UINT32 reg_count = 0;
692 WHV_REGISTER_VALUE reg_values[3] = {0};
693 WHV_REGISTER_NAME reg_names[3];
694
695 qemu_mutex_lock_iothread();
696
697 /* Inject NMI */
698 if (!vcpu->interrupt_in_flight.InterruptionPending &&
699 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
700 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
701 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
702 vcpu->interruptable = false;
703 new_int.InterruptionType = WHvX64PendingNmi;
704 new_int.InterruptionPending = 1;
705 new_int.InterruptionVector = 2;
706 }
707 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
812d49f2 708 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
812d49f2
JTV
709 }
710 }
711
712 /*
713 * Force the VCPU out of its inner loop to process any INIT requests or
714 * commit pending TPR access.
715 */
716 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
717 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
718 !(env->hflags & HF_SMM_MASK)) {
719 cpu->exit_request = 1;
720 }
721 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
722 cpu->exit_request = 1;
723 }
724 }
725
726 /* Get pending hard interruption or replay one that was overwritten */
727 if (!vcpu->interrupt_in_flight.InterruptionPending &&
728 vcpu->interruptable && (env->eflags & IF_MASK)) {
729 assert(!new_int.InterruptionPending);
730 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
731 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
732 irq = cpu_get_pic_interrupt(env);
733 if (irq >= 0) {
734 new_int.InterruptionType = WHvX64PendingInterrupt;
735 new_int.InterruptionPending = 1;
736 new_int.InterruptionVector = irq;
737 }
738 }
739 }
740
741 /* Setup interrupt state if new one was prepared */
742 if (new_int.InterruptionPending) {
743 reg_values[reg_count].PendingInterruption = new_int;
744 reg_names[reg_count] = WHvRegisterPendingInterruption;
745 reg_count += 1;
746 }
747
748 /* Sync the TPR to the CR8 if was modified during the intercept */
2bf3e74d
JTV
749 tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
750 if (tpr != vcpu->tpr) {
751 vcpu->tpr = tpr;
752 reg_values[reg_count].Reg64 = tpr;
812d49f2
JTV
753 cpu->exit_request = 1;
754 reg_names[reg_count] = WHvX64RegisterCr8;
755 reg_count += 1;
756 }
757
758 /* Update the state of the interrupt delivery notification */
eb1fe944
JTV
759 if (!vcpu->window_registered &&
760 cpu->interrupt_request & CPU_INTERRUPT_HARD) {
812d49f2
JTV
761 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
762 = 1;
eb1fe944 763 vcpu->window_registered = 1;
812d49f2
JTV
764 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
765 reg_count += 1;
766 }
767
768 qemu_mutex_unlock_iothread();
769
770 if (reg_count) {
771 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
772 reg_names, reg_count, reg_values);
773 if (FAILED(hr)) {
774 error_report("WHPX: Failed to set interrupt state registers,"
775 " hr=%08lx", hr);
812d49f2
JTV
776 }
777 }
778
779 return;
780}
781
782static void whpx_vcpu_post_run(CPUState *cpu)
783{
784 HRESULT hr;
785 struct whpx_state *whpx = &whpx_global;
786 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
787 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
788 X86CPU *x86_cpu = X86_CPU(cpu);
789 WHV_REGISTER_VALUE reg_values[4];
790 const WHV_REGISTER_NAME reg_names[4] = {
791 WHvX64RegisterRflags,
792 WHvX64RegisterCr8,
793 WHvRegisterPendingInterruption,
794 WHvRegisterInterruptState,
795 };
796
797 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
798 reg_names, 4, reg_values);
799 if (FAILED(hr)) {
800 error_report("WHPX: Failed to get interrupt state regusters,"
801 " hr=%08lx", hr);
812d49f2
JTV
802 vcpu->interruptable = false;
803 return;
804 }
805
806 assert(reg_names[0] == WHvX64RegisterRflags);
807 env->eflags = reg_values[0].Reg64;
808
809 assert(reg_names[1] == WHvX64RegisterCr8);
810 if (vcpu->tpr != reg_values[1].Reg64) {
811 vcpu->tpr = reg_values[1].Reg64;
812 qemu_mutex_lock_iothread();
813 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
814 qemu_mutex_unlock_iothread();
815 }
816
817 assert(reg_names[2] == WHvRegisterPendingInterruption);
818 vcpu->interrupt_in_flight = reg_values[2].PendingInterruption;
819
820 assert(reg_names[3] == WHvRegisterInterruptState);
821 vcpu->interruptable = !reg_values[3].InterruptState.InterruptShadow;
822
823 return;
824}
825
826static void whpx_vcpu_process_async_events(CPUState *cpu)
827{
828 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
829 X86CPU *x86_cpu = X86_CPU(cpu);
830 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
831
832 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
833 !(env->hflags & HF_SMM_MASK)) {
834
835 do_cpu_init(x86_cpu);
836 cpu->vcpu_dirty = true;
837 vcpu->interruptable = true;
838 }
839
840 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
841 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
842 apic_poll_irq(x86_cpu->apic_state);
843 }
844
845 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
846 (env->eflags & IF_MASK)) ||
847 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
848 cpu->halted = false;
849 }
850
851 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
852 if (!cpu->vcpu_dirty) {
853 whpx_get_registers(cpu);
854 }
855 do_cpu_sipi(x86_cpu);
856 }
857
858 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
859 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
860 if (!cpu->vcpu_dirty) {
861 whpx_get_registers(cpu);
862 }
863 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
864 env->tpr_access_type);
865 }
866
867 return;
868}
869
870static int whpx_vcpu_run(CPUState *cpu)
871{
872 HRESULT hr;
873 struct whpx_state *whpx = &whpx_global;
874 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
875 int ret;
876
877 whpx_vcpu_process_async_events(cpu);
878 if (cpu->halted) {
879 cpu->exception_index = EXCP_HLT;
880 atomic_set(&cpu->exit_request, false);
881 return 0;
882 }
883
884 qemu_mutex_unlock_iothread();
885 cpu_exec_start(cpu);
886
887 do {
888 if (cpu->vcpu_dirty) {
889 whpx_set_registers(cpu);
890 cpu->vcpu_dirty = false;
891 }
892
893 whpx_vcpu_pre_run(cpu);
894
895 if (atomic_read(&cpu->exit_request)) {
896 whpx_vcpu_kick(cpu);
897 }
898
914e2ab3 899 hr = WHvRunVirtualProcessor(whpx->partition, cpu->cpu_index,
e2940978 900 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
812d49f2
JTV
901
902 if (FAILED(hr)) {
903 error_report("WHPX: Failed to exec a virtual processor,"
904 " hr=%08lx", hr);
905 ret = -1;
906 break;
907 }
908
909 whpx_vcpu_post_run(cpu);
910
911 switch (vcpu->exit_ctx.ExitReason) {
912 case WHvRunVpExitReasonMemoryAccess:
913 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
914 break;
915
916 case WHvRunVpExitReasonX64IoPortAccess:
917 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
918 break;
919
920 case WHvRunVpExitReasonX64InterruptWindow:
921 vcpu->window_registered = 0;
922 break;
923
924 case WHvRunVpExitReasonX64Halt:
925 ret = whpx_handle_halt(cpu);
926 break;
927
928 case WHvRunVpExitReasonCanceled:
929 cpu->exception_index = EXCP_INTERRUPT;
930 ret = 1;
931 break;
932
933 case WHvRunVpExitReasonNone:
934 case WHvRunVpExitReasonUnrecoverableException:
935 case WHvRunVpExitReasonInvalidVpRegisterValue:
936 case WHvRunVpExitReasonUnsupportedFeature:
937 case WHvRunVpExitReasonX64MsrAccess:
938 case WHvRunVpExitReasonX64Cpuid:
939 case WHvRunVpExitReasonException:
812d49f2
JTV
940 default:
941 error_report("WHPX: Unexpected VP exit code %d",
942 vcpu->exit_ctx.ExitReason);
943 whpx_get_registers(cpu);
944 qemu_mutex_lock_iothread();
945 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
946 qemu_mutex_unlock_iothread();
947 break;
948 }
949
950 } while (!ret);
951
952 cpu_exec_end(cpu);
953 qemu_mutex_lock_iothread();
954 current_cpu = cpu;
955
956 atomic_set(&cpu->exit_request, false);
957
958 return ret < 0;
959}
960
961static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
962{
963 whpx_get_registers(cpu);
964 cpu->vcpu_dirty = true;
965}
966
967static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
968 run_on_cpu_data arg)
969{
970 whpx_set_registers(cpu);
971 cpu->vcpu_dirty = false;
972}
973
974static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
975 run_on_cpu_data arg)
976{
977 whpx_set_registers(cpu);
978 cpu->vcpu_dirty = false;
979}
980
981static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
982 run_on_cpu_data arg)
983{
984 cpu->vcpu_dirty = true;
985}
986
987/*
988 * CPU support.
989 */
990
991void whpx_cpu_synchronize_state(CPUState *cpu)
992{
993 if (!cpu->vcpu_dirty) {
994 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
995 }
996}
997
998void whpx_cpu_synchronize_post_reset(CPUState *cpu)
999{
1000 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1001}
1002
1003void whpx_cpu_synchronize_post_init(CPUState *cpu)
1004{
1005 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1006}
1007
1008void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1009{
1010 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1011}
1012
1013/*
1014 * Vcpu support.
1015 */
1016
1017static Error *whpx_migration_blocker;
1018
1019int whpx_init_vcpu(CPUState *cpu)
1020{
1021 HRESULT hr;
1022 struct whpx_state *whpx = &whpx_global;
1023 struct whpx_vcpu *vcpu;
1024 Error *local_error = NULL;
1025
1026 /* Add migration blockers for all unsupported features of the
1027 * Windows Hypervisor Platform
1028 */
1029 if (whpx_migration_blocker == NULL) {
1030 error_setg(&whpx_migration_blocker,
1031 "State blocked due to non-migratable CPUID feature support,"
1032 "dirty memory tracking support, and XSAVE/XRSTOR support");
1033
1034 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1035 if (local_error) {
1036 error_report_err(local_error);
1037 error_free(whpx_migration_blocker);
1038 migrate_del_blocker(whpx_migration_blocker);
1039 return -EINVAL;
1040 }
1041 }
1042
e2940978 1043 vcpu = g_malloc0(sizeof(struct whpx_vcpu));
812d49f2
JTV
1044
1045 if (!vcpu) {
1046 error_report("WHPX: Failed to allocte VCPU context.");
1047 return -ENOMEM;
1048 }
1049
914e2ab3 1050 hr = WHvEmulatorCreateEmulator(&whpx_emu_callbacks, &vcpu->emulator);
812d49f2
JTV
1051 if (FAILED(hr)) {
1052 error_report("WHPX: Failed to setup instruction completion support,"
1053 " hr=%08lx", hr);
1054 g_free(vcpu);
1055 return -EINVAL;
1056 }
1057
1058 hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1059 if (FAILED(hr)) {
1060 error_report("WHPX: Failed to create a virtual processor,"
1061 " hr=%08lx", hr);
1062 WHvEmulatorDestroyEmulator(vcpu->emulator);
1063 g_free(vcpu);
1064 return -EINVAL;
1065 }
1066
1067 vcpu->interruptable = true;
1068
1069 cpu->vcpu_dirty = true;
1070 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1071
1072 return 0;
1073}
1074
1075int whpx_vcpu_exec(CPUState *cpu)
1076{
1077 int ret;
1078 int fatal;
1079
1080 for (;;) {
1081 if (cpu->exception_index >= EXCP_INTERRUPT) {
1082 ret = cpu->exception_index;
1083 cpu->exception_index = -1;
1084 break;
1085 }
1086
1087 fatal = whpx_vcpu_run(cpu);
1088
1089 if (fatal) {
1090 error_report("WHPX: Failed to exec a virtual processor");
1091 abort();
1092 }
1093 }
1094
1095 return ret;
1096}
1097
1098void whpx_destroy_vcpu(CPUState *cpu)
1099{
1100 struct whpx_state *whpx = &whpx_global;
1101 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1102
1103 WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1104 WHvEmulatorDestroyEmulator(vcpu->emulator);
1105 g_free(cpu->hax_vcpu);
1106 return;
1107}
1108
1109void whpx_vcpu_kick(CPUState *cpu)
1110{
1111 struct whpx_state *whpx = &whpx_global;
1112 WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1113}
1114
1115/*
1116 * Memory support.
1117 */
1118
1119static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1120 void *host_va, int add, int rom,
1121 const char *name)
1122{
1123 struct whpx_state *whpx = &whpx_global;
1124 HRESULT hr;
1125
1126 /*
1127 if (add) {
1128 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1129 (void*)start_pa, (void*)size, host_va,
1130 (rom ? "ROM" : "RAM"), name);
1131 } else {
1132 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1133 (void*)start_pa, (void*)size, host_va, name);
1134 }
1135 */
1136
1137 if (add) {
1138 hr = WHvMapGpaRange(whpx->partition,
1139 host_va,
1140 start_pa,
1141 size,
1142 (WHvMapGpaRangeFlagRead |
1143 WHvMapGpaRangeFlagExecute |
1144 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1145 } else {
1146 hr = WHvUnmapGpaRange(whpx->partition,
1147 start_pa,
1148 size);
1149 }
1150
1151 if (FAILED(hr)) {
1152 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1153 " Host:%p, hr=%08lx",
1154 (add ? "MAP" : "UNMAP"), name,
1155 (void *)start_pa, (void *)size, host_va, hr);
1156 }
1157}
1158
1159static void whpx_process_section(MemoryRegionSection *section, int add)
1160{
1161 MemoryRegion *mr = section->mr;
1162 hwaddr start_pa = section->offset_within_address_space;
1163 ram_addr_t size = int128_get64(section->size);
1164 unsigned int delta;
1165 uint64_t host_va;
1166
1167 if (!memory_region_is_ram(mr)) {
1168 return;
1169 }
1170
1171 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1172 delta &= ~qemu_real_host_page_mask;
1173 if (delta > size) {
1174 return;
1175 }
1176 start_pa += delta;
1177 size -= delta;
1178 size &= qemu_real_host_page_mask;
1179 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1180 return;
1181 }
1182
1183 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1184 + section->offset_within_region + delta;
1185
1186 whpx_update_mapping(start_pa, size, (void *)host_va, add,
1187 memory_region_is_rom(mr), mr->name);
1188}
1189
1190static void whpx_region_add(MemoryListener *listener,
1191 MemoryRegionSection *section)
1192{
1193 memory_region_ref(section->mr);
1194 whpx_process_section(section, 1);
1195}
1196
1197static void whpx_region_del(MemoryListener *listener,
1198 MemoryRegionSection *section)
1199{
1200 whpx_process_section(section, 0);
1201 memory_region_unref(section->mr);
1202}
1203
1204static void whpx_transaction_begin(MemoryListener *listener)
1205{
1206}
1207
1208static void whpx_transaction_commit(MemoryListener *listener)
1209{
1210}
1211
1212static void whpx_log_sync(MemoryListener *listener,
1213 MemoryRegionSection *section)
1214{
1215 MemoryRegion *mr = section->mr;
1216
1217 if (!memory_region_is_ram(mr)) {
1218 return;
1219 }
1220
1221 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1222}
1223
1224static MemoryListener whpx_memory_listener = {
1225 .begin = whpx_transaction_begin,
1226 .commit = whpx_transaction_commit,
1227 .region_add = whpx_region_add,
1228 .region_del = whpx_region_del,
1229 .log_sync = whpx_log_sync,
1230 .priority = 10,
1231};
1232
1233static void whpx_memory_init(void)
1234{
1235 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1236}
1237
1238static void whpx_handle_interrupt(CPUState *cpu, int mask)
1239{
1240 cpu->interrupt_request |= mask;
1241
1242 if (!qemu_cpu_is_self(cpu)) {
1243 qemu_cpu_kick(cpu);
1244 }
1245}
1246
1247/*
1248 * Partition support
1249 */
1250
1251static int whpx_accel_init(MachineState *ms)
1252{
1253 struct whpx_state *whpx;
1254 int ret;
1255 HRESULT hr;
1256 WHV_CAPABILITY whpx_cap;
1257 WHV_PARTITION_PROPERTY prop;
1258
1259 whpx = &whpx_global;
1260
1261 memset(whpx, 0, sizeof(struct whpx_state));
1262 whpx->mem_quota = ms->ram_size;
1263
1264 hr = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1265 sizeof(whpx_cap));
1266 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1267 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1268 ret = -ENOSPC;
1269 goto error;
1270 }
1271
1272 hr = WHvCreatePartition(&whpx->partition);
1273 if (FAILED(hr)) {
1274 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1275 ret = -EINVAL;
1276 goto error;
1277 }
1278
1279 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1280 prop.PropertyCode = WHvPartitionPropertyCodeProcessorCount;
1281 prop.ProcessorCount = smp_cpus;
1282 hr = WHvSetPartitionProperty(whpx->partition,
1283 &prop,
1284 sizeof(WHV_PARTITION_PROPERTY));
1285
1286 if (FAILED(hr)) {
1287 error_report("WHPX: Failed to set partition core count to %d,"
1288 " hr=%08lx", smp_cores, hr);
1289 ret = -EINVAL;
1290 goto error;
1291 }
1292
1293 hr = WHvSetupPartition(whpx->partition);
1294 if (FAILED(hr)) {
1295 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1296 ret = -EINVAL;
1297 goto error;
1298 }
1299
812d49f2
JTV
1300 whpx_memory_init();
1301
1302 cpu_interrupt_handler = whpx_handle_interrupt;
1303
1304 printf("Windows Hypervisor Platform accelerator is operational\n");
1305 return 0;
1306
1307 error:
1308
1309 if (NULL != whpx->partition) {
1310 WHvDeletePartition(whpx->partition);
1311 whpx->partition = NULL;
1312 }
1313
1314
1315 return ret;
1316}
1317
1318int whpx_enabled(void)
1319{
1320 return whpx_allowed;
1321}
1322
1323static void whpx_accel_class_init(ObjectClass *oc, void *data)
1324{
1325 AccelClass *ac = ACCEL_CLASS(oc);
1326 ac->name = "WHPX";
1327 ac->init_machine = whpx_accel_init;
1328 ac->allowed = &whpx_allowed;
1329}
1330
1331static const TypeInfo whpx_accel_type = {
1332 .name = ACCEL_CLASS_NAME("whpx"),
1333 .parent = TYPE_ACCEL,
1334 .class_init = whpx_accel_class_init,
1335};
1336
1337static void whpx_type_init(void)
1338{
1339 type_register_static(&whpx_accel_type);
1340}
1341
1342type_init(whpx_type_init);