]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/machine.c
target-i386/kvm: Hyper-V SynIC timers MSR's support
[mirror_qemu.git] / target-i386 / machine.c
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "hw/i386/pc.h"
4 #include "hw/isa/isa.h"
5
6 #include "cpu.h"
7 #include "sysemu/kvm.h"
8
9 static const VMStateDescription vmstate_segment = {
10 .name = "segment",
11 .version_id = 1,
12 .minimum_version_id = 1,
13 .fields = (VMStateField[]) {
14 VMSTATE_UINT32(selector, SegmentCache),
15 VMSTATE_UINTTL(base, SegmentCache),
16 VMSTATE_UINT32(limit, SegmentCache),
17 VMSTATE_UINT32(flags, SegmentCache),
18 VMSTATE_END_OF_LIST()
19 }
20 };
21
22 #define VMSTATE_SEGMENT(_field, _state) { \
23 .name = (stringify(_field)), \
24 .size = sizeof(SegmentCache), \
25 .vmsd = &vmstate_segment, \
26 .flags = VMS_STRUCT, \
27 .offset = offsetof(_state, _field) \
28 + type_check(SegmentCache,typeof_field(_state, _field)) \
29 }
30
31 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
32 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
33
34 static const VMStateDescription vmstate_xmm_reg = {
35 .name = "xmm_reg",
36 .version_id = 1,
37 .minimum_version_id = 1,
38 .fields = (VMStateField[]) {
39 VMSTATE_UINT64(XMM_Q(0), XMMReg),
40 VMSTATE_UINT64(XMM_Q(1), XMMReg),
41 VMSTATE_END_OF_LIST()
42 }
43 };
44
45 #define VMSTATE_XMM_REGS(_field, _state, _start) \
46 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
47 vmstate_xmm_reg, XMMReg)
48
49 /* YMMH format is the same as XMM, but for bits 128-255 */
50 static const VMStateDescription vmstate_ymmh_reg = {
51 .name = "ymmh_reg",
52 .version_id = 1,
53 .minimum_version_id = 1,
54 .fields = (VMStateField[]) {
55 VMSTATE_UINT64(XMM_Q(2), XMMReg),
56 VMSTATE_UINT64(XMM_Q(3), XMMReg),
57 VMSTATE_END_OF_LIST()
58 }
59 };
60
61 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
62 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
63 vmstate_ymmh_reg, XMMReg)
64
65 static const VMStateDescription vmstate_zmmh_reg = {
66 .name = "zmmh_reg",
67 .version_id = 1,
68 .minimum_version_id = 1,
69 .fields = (VMStateField[]) {
70 VMSTATE_UINT64(XMM_Q(4), XMMReg),
71 VMSTATE_UINT64(XMM_Q(5), XMMReg),
72 VMSTATE_UINT64(XMM_Q(6), XMMReg),
73 VMSTATE_UINT64(XMM_Q(7), XMMReg),
74 VMSTATE_END_OF_LIST()
75 }
76 };
77
78 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
79 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
80 vmstate_zmmh_reg, XMMReg)
81
82 #ifdef TARGET_X86_64
83 static const VMStateDescription vmstate_hi16_zmm_reg = {
84 .name = "hi16_zmm_reg",
85 .version_id = 1,
86 .minimum_version_id = 1,
87 .fields = (VMStateField[]) {
88 VMSTATE_UINT64(XMM_Q(0), XMMReg),
89 VMSTATE_UINT64(XMM_Q(1), XMMReg),
90 VMSTATE_UINT64(XMM_Q(2), XMMReg),
91 VMSTATE_UINT64(XMM_Q(3), XMMReg),
92 VMSTATE_UINT64(XMM_Q(4), XMMReg),
93 VMSTATE_UINT64(XMM_Q(5), XMMReg),
94 VMSTATE_UINT64(XMM_Q(6), XMMReg),
95 VMSTATE_UINT64(XMM_Q(7), XMMReg),
96 VMSTATE_END_OF_LIST()
97 }
98 };
99
100 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
101 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
102 vmstate_hi16_zmm_reg, XMMReg)
103 #endif
104
105 static const VMStateDescription vmstate_bnd_regs = {
106 .name = "bnd_regs",
107 .version_id = 1,
108 .minimum_version_id = 1,
109 .fields = (VMStateField[]) {
110 VMSTATE_UINT64(lb, BNDReg),
111 VMSTATE_UINT64(ub, BNDReg),
112 VMSTATE_END_OF_LIST()
113 }
114 };
115
116 #define VMSTATE_BND_REGS(_field, _state, _n) \
117 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
118
119 static const VMStateDescription vmstate_mtrr_var = {
120 .name = "mtrr_var",
121 .version_id = 1,
122 .minimum_version_id = 1,
123 .fields = (VMStateField[]) {
124 VMSTATE_UINT64(base, MTRRVar),
125 VMSTATE_UINT64(mask, MTRRVar),
126 VMSTATE_END_OF_LIST()
127 }
128 };
129
130 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
131 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
132
133 static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
134 {
135 fprintf(stderr, "call put_fpreg() with invalid arguments\n");
136 exit(0);
137 }
138
139 /* XXX: add that in a FPU generic layer */
140 union x86_longdouble {
141 uint64_t mant;
142 uint16_t exp;
143 };
144
145 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
146 #define EXPBIAS1 1023
147 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
148 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
149
150 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
151 {
152 int e;
153 /* mantissa */
154 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
155 /* exponent + sign */
156 e = EXPD1(temp) - EXPBIAS1 + 16383;
157 e |= SIGND1(temp) >> 16;
158 p->exp = e;
159 }
160
161 static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
162 {
163 FPReg *fp_reg = opaque;
164 uint64_t mant;
165 uint16_t exp;
166
167 qemu_get_be64s(f, &mant);
168 qemu_get_be16s(f, &exp);
169 fp_reg->d = cpu_set_fp80(mant, exp);
170 return 0;
171 }
172
173 static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
174 {
175 FPReg *fp_reg = opaque;
176 uint64_t mant;
177 uint16_t exp;
178 /* we save the real CPU data (in case of MMX usage only 'mant'
179 contains the MMX register */
180 cpu_get_fp80(&mant, &exp, fp_reg->d);
181 qemu_put_be64s(f, &mant);
182 qemu_put_be16s(f, &exp);
183 }
184
185 static const VMStateInfo vmstate_fpreg = {
186 .name = "fpreg",
187 .get = get_fpreg,
188 .put = put_fpreg,
189 };
190
191 static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
192 {
193 union x86_longdouble *p = opaque;
194 uint64_t mant;
195
196 qemu_get_be64s(f, &mant);
197 p->mant = mant;
198 p->exp = 0xffff;
199 return 0;
200 }
201
202 static const VMStateInfo vmstate_fpreg_1_mmx = {
203 .name = "fpreg_1_mmx",
204 .get = get_fpreg_1_mmx,
205 .put = put_fpreg_error,
206 };
207
208 static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
209 {
210 union x86_longdouble *p = opaque;
211 uint64_t mant;
212
213 qemu_get_be64s(f, &mant);
214 fp64_to_fp80(p, mant);
215 return 0;
216 }
217
218 static const VMStateInfo vmstate_fpreg_1_no_mmx = {
219 .name = "fpreg_1_no_mmx",
220 .get = get_fpreg_1_no_mmx,
221 .put = put_fpreg_error,
222 };
223
224 static bool fpregs_is_0(void *opaque, int version_id)
225 {
226 X86CPU *cpu = opaque;
227 CPUX86State *env = &cpu->env;
228
229 return (env->fpregs_format_vmstate == 0);
230 }
231
232 static bool fpregs_is_1_mmx(void *opaque, int version_id)
233 {
234 X86CPU *cpu = opaque;
235 CPUX86State *env = &cpu->env;
236 int guess_mmx;
237
238 guess_mmx = ((env->fptag_vmstate == 0xff) &&
239 (env->fpus_vmstate & 0x3800) == 0);
240 return (guess_mmx && (env->fpregs_format_vmstate == 1));
241 }
242
243 static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
244 {
245 X86CPU *cpu = opaque;
246 CPUX86State *env = &cpu->env;
247 int guess_mmx;
248
249 guess_mmx = ((env->fptag_vmstate == 0xff) &&
250 (env->fpus_vmstate & 0x3800) == 0);
251 return (!guess_mmx && (env->fpregs_format_vmstate == 1));
252 }
253
254 #define VMSTATE_FP_REGS(_field, _state, _n) \
255 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
256 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
257 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
258
259 static bool version_is_5(void *opaque, int version_id)
260 {
261 return version_id == 5;
262 }
263
264 #ifdef TARGET_X86_64
265 static bool less_than_7(void *opaque, int version_id)
266 {
267 return version_id < 7;
268 }
269
270 static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
271 {
272 uint64_t *v = pv;
273 *v = qemu_get_be32(f);
274 return 0;
275 }
276
277 static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
278 {
279 uint64_t *v = pv;
280 qemu_put_be32(f, *v);
281 }
282
283 static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
284 .name = "uint64_as_uint32",
285 .get = get_uint64_as_uint32,
286 .put = put_uint64_as_uint32,
287 };
288
289 #define VMSTATE_HACK_UINT32(_f, _s, _t) \
290 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
291 #endif
292
293 static void cpu_pre_save(void *opaque)
294 {
295 X86CPU *cpu = opaque;
296 CPUX86State *env = &cpu->env;
297 int i;
298
299 /* FPU */
300 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
301 env->fptag_vmstate = 0;
302 for(i = 0; i < 8; i++) {
303 env->fptag_vmstate |= ((!env->fptags[i]) << i);
304 }
305
306 env->fpregs_format_vmstate = 0;
307
308 /*
309 * Real mode guest segments register DPL should be zero.
310 * Older KVM version were setting it wrongly.
311 * Fixing it will allow live migration to host with unrestricted guest
312 * support (otherwise the migration will fail with invalid guest state
313 * error).
314 */
315 if (!(env->cr[0] & CR0_PE_MASK) &&
316 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
317 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
318 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
319 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
320 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
321 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
322 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
323 }
324
325 }
326
327 static int cpu_post_load(void *opaque, int version_id)
328 {
329 X86CPU *cpu = opaque;
330 CPUState *cs = CPU(cpu);
331 CPUX86State *env = &cpu->env;
332 int i;
333
334 /*
335 * Real mode guest segments register DPL should be zero.
336 * Older KVM version were setting it wrongly.
337 * Fixing it will allow live migration from such host that don't have
338 * restricted guest support to a host with unrestricted guest support
339 * (otherwise the migration will fail with invalid guest state
340 * error).
341 */
342 if (!(env->cr[0] & CR0_PE_MASK) &&
343 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
344 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
345 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
346 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
347 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
348 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
349 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
350 }
351
352 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
353 * running under KVM. This is wrong for conforming code segments.
354 * Luckily, in our implementation the CPL field of hflags is redundant
355 * and we can get the right value from the SS descriptor privilege level.
356 */
357 env->hflags &= ~HF_CPL_MASK;
358 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
359
360 env->fpstt = (env->fpus_vmstate >> 11) & 7;
361 env->fpus = env->fpus_vmstate & ~0x3800;
362 env->fptag_vmstate ^= 0xff;
363 for(i = 0; i < 8; i++) {
364 env->fptags[i] = (env->fptag_vmstate >> i) & 1;
365 }
366 update_fp_status(env);
367
368 cpu_breakpoint_remove_all(cs, BP_CPU);
369 cpu_watchpoint_remove_all(cs, BP_CPU);
370 {
371 /* Indicate all breakpoints disabled, as they are, then
372 let the helper re-enable them. */
373 target_ulong dr7 = env->dr[7];
374 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
375 cpu_x86_update_dr7(env, dr7);
376 }
377 tlb_flush(cs, 1);
378
379 if (tcg_enabled()) {
380 cpu_smm_update(cpu);
381 }
382 return 0;
383 }
384
385 static bool async_pf_msr_needed(void *opaque)
386 {
387 X86CPU *cpu = opaque;
388
389 return cpu->env.async_pf_en_msr != 0;
390 }
391
392 static bool pv_eoi_msr_needed(void *opaque)
393 {
394 X86CPU *cpu = opaque;
395
396 return cpu->env.pv_eoi_en_msr != 0;
397 }
398
399 static bool steal_time_msr_needed(void *opaque)
400 {
401 X86CPU *cpu = opaque;
402
403 return cpu->env.steal_time_msr != 0;
404 }
405
406 static const VMStateDescription vmstate_steal_time_msr = {
407 .name = "cpu/steal_time_msr",
408 .version_id = 1,
409 .minimum_version_id = 1,
410 .needed = steal_time_msr_needed,
411 .fields = (VMStateField[]) {
412 VMSTATE_UINT64(env.steal_time_msr, X86CPU),
413 VMSTATE_END_OF_LIST()
414 }
415 };
416
417 static const VMStateDescription vmstate_async_pf_msr = {
418 .name = "cpu/async_pf_msr",
419 .version_id = 1,
420 .minimum_version_id = 1,
421 .needed = async_pf_msr_needed,
422 .fields = (VMStateField[]) {
423 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
424 VMSTATE_END_OF_LIST()
425 }
426 };
427
428 static const VMStateDescription vmstate_pv_eoi_msr = {
429 .name = "cpu/async_pv_eoi_msr",
430 .version_id = 1,
431 .minimum_version_id = 1,
432 .needed = pv_eoi_msr_needed,
433 .fields = (VMStateField[]) {
434 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
435 VMSTATE_END_OF_LIST()
436 }
437 };
438
439 static bool fpop_ip_dp_needed(void *opaque)
440 {
441 X86CPU *cpu = opaque;
442 CPUX86State *env = &cpu->env;
443
444 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
445 }
446
447 static const VMStateDescription vmstate_fpop_ip_dp = {
448 .name = "cpu/fpop_ip_dp",
449 .version_id = 1,
450 .minimum_version_id = 1,
451 .needed = fpop_ip_dp_needed,
452 .fields = (VMStateField[]) {
453 VMSTATE_UINT16(env.fpop, X86CPU),
454 VMSTATE_UINT64(env.fpip, X86CPU),
455 VMSTATE_UINT64(env.fpdp, X86CPU),
456 VMSTATE_END_OF_LIST()
457 }
458 };
459
460 static bool tsc_adjust_needed(void *opaque)
461 {
462 X86CPU *cpu = opaque;
463 CPUX86State *env = &cpu->env;
464
465 return env->tsc_adjust != 0;
466 }
467
468 static const VMStateDescription vmstate_msr_tsc_adjust = {
469 .name = "cpu/msr_tsc_adjust",
470 .version_id = 1,
471 .minimum_version_id = 1,
472 .needed = tsc_adjust_needed,
473 .fields = (VMStateField[]) {
474 VMSTATE_UINT64(env.tsc_adjust, X86CPU),
475 VMSTATE_END_OF_LIST()
476 }
477 };
478
479 static bool tscdeadline_needed(void *opaque)
480 {
481 X86CPU *cpu = opaque;
482 CPUX86State *env = &cpu->env;
483
484 return env->tsc_deadline != 0;
485 }
486
487 static const VMStateDescription vmstate_msr_tscdeadline = {
488 .name = "cpu/msr_tscdeadline",
489 .version_id = 1,
490 .minimum_version_id = 1,
491 .needed = tscdeadline_needed,
492 .fields = (VMStateField[]) {
493 VMSTATE_UINT64(env.tsc_deadline, X86CPU),
494 VMSTATE_END_OF_LIST()
495 }
496 };
497
498 static bool misc_enable_needed(void *opaque)
499 {
500 X86CPU *cpu = opaque;
501 CPUX86State *env = &cpu->env;
502
503 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
504 }
505
506 static bool feature_control_needed(void *opaque)
507 {
508 X86CPU *cpu = opaque;
509 CPUX86State *env = &cpu->env;
510
511 return env->msr_ia32_feature_control != 0;
512 }
513
514 static const VMStateDescription vmstate_msr_ia32_misc_enable = {
515 .name = "cpu/msr_ia32_misc_enable",
516 .version_id = 1,
517 .minimum_version_id = 1,
518 .needed = misc_enable_needed,
519 .fields = (VMStateField[]) {
520 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
521 VMSTATE_END_OF_LIST()
522 }
523 };
524
525 static const VMStateDescription vmstate_msr_ia32_feature_control = {
526 .name = "cpu/msr_ia32_feature_control",
527 .version_id = 1,
528 .minimum_version_id = 1,
529 .needed = feature_control_needed,
530 .fields = (VMStateField[]) {
531 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
532 VMSTATE_END_OF_LIST()
533 }
534 };
535
536 static bool pmu_enable_needed(void *opaque)
537 {
538 X86CPU *cpu = opaque;
539 CPUX86State *env = &cpu->env;
540 int i;
541
542 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
543 env->msr_global_status || env->msr_global_ovf_ctrl) {
544 return true;
545 }
546 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
547 if (env->msr_fixed_counters[i]) {
548 return true;
549 }
550 }
551 for (i = 0; i < MAX_GP_COUNTERS; i++) {
552 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
553 return true;
554 }
555 }
556
557 return false;
558 }
559
560 static const VMStateDescription vmstate_msr_architectural_pmu = {
561 .name = "cpu/msr_architectural_pmu",
562 .version_id = 1,
563 .minimum_version_id = 1,
564 .needed = pmu_enable_needed,
565 .fields = (VMStateField[]) {
566 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
567 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
568 VMSTATE_UINT64(env.msr_global_status, X86CPU),
569 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
570 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
571 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
572 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
573 VMSTATE_END_OF_LIST()
574 }
575 };
576
577 static bool mpx_needed(void *opaque)
578 {
579 X86CPU *cpu = opaque;
580 CPUX86State *env = &cpu->env;
581 unsigned int i;
582
583 for (i = 0; i < 4; i++) {
584 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
585 return true;
586 }
587 }
588
589 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
590 return true;
591 }
592
593 return !!env->msr_bndcfgs;
594 }
595
596 static const VMStateDescription vmstate_mpx = {
597 .name = "cpu/mpx",
598 .version_id = 1,
599 .minimum_version_id = 1,
600 .needed = mpx_needed,
601 .fields = (VMStateField[]) {
602 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
603 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
604 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
605 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
606 VMSTATE_END_OF_LIST()
607 }
608 };
609
610 static bool hyperv_hypercall_enable_needed(void *opaque)
611 {
612 X86CPU *cpu = opaque;
613 CPUX86State *env = &cpu->env;
614
615 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
616 }
617
618 static const VMStateDescription vmstate_msr_hypercall_hypercall = {
619 .name = "cpu/msr_hyperv_hypercall",
620 .version_id = 1,
621 .minimum_version_id = 1,
622 .needed = hyperv_hypercall_enable_needed,
623 .fields = (VMStateField[]) {
624 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
625 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
626 VMSTATE_END_OF_LIST()
627 }
628 };
629
630 static bool hyperv_vapic_enable_needed(void *opaque)
631 {
632 X86CPU *cpu = opaque;
633 CPUX86State *env = &cpu->env;
634
635 return env->msr_hv_vapic != 0;
636 }
637
638 static const VMStateDescription vmstate_msr_hyperv_vapic = {
639 .name = "cpu/msr_hyperv_vapic",
640 .version_id = 1,
641 .minimum_version_id = 1,
642 .needed = hyperv_vapic_enable_needed,
643 .fields = (VMStateField[]) {
644 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
645 VMSTATE_END_OF_LIST()
646 }
647 };
648
649 static bool hyperv_time_enable_needed(void *opaque)
650 {
651 X86CPU *cpu = opaque;
652 CPUX86State *env = &cpu->env;
653
654 return env->msr_hv_tsc != 0;
655 }
656
657 static const VMStateDescription vmstate_msr_hyperv_time = {
658 .name = "cpu/msr_hyperv_time",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .needed = hyperv_time_enable_needed,
662 .fields = (VMStateField[]) {
663 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
664 VMSTATE_END_OF_LIST()
665 }
666 };
667
668 static bool hyperv_crash_enable_needed(void *opaque)
669 {
670 X86CPU *cpu = opaque;
671 CPUX86State *env = &cpu->env;
672 int i;
673
674 for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
675 if (env->msr_hv_crash_params[i]) {
676 return true;
677 }
678 }
679 return false;
680 }
681
682 static const VMStateDescription vmstate_msr_hyperv_crash = {
683 .name = "cpu/msr_hyperv_crash",
684 .version_id = 1,
685 .minimum_version_id = 1,
686 .needed = hyperv_crash_enable_needed,
687 .fields = (VMStateField[]) {
688 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
689 X86CPU, HV_X64_MSR_CRASH_PARAMS),
690 VMSTATE_END_OF_LIST()
691 }
692 };
693
694 static bool hyperv_runtime_enable_needed(void *opaque)
695 {
696 X86CPU *cpu = opaque;
697 CPUX86State *env = &cpu->env;
698
699 return env->msr_hv_runtime != 0;
700 }
701
702 static const VMStateDescription vmstate_msr_hyperv_runtime = {
703 .name = "cpu/msr_hyperv_runtime",
704 .version_id = 1,
705 .minimum_version_id = 1,
706 .needed = hyperv_runtime_enable_needed,
707 .fields = (VMStateField[]) {
708 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
709 VMSTATE_END_OF_LIST()
710 }
711 };
712
713 static bool hyperv_synic_enable_needed(void *opaque)
714 {
715 X86CPU *cpu = opaque;
716 CPUX86State *env = &cpu->env;
717 int i;
718
719 if (env->msr_hv_synic_control != 0 ||
720 env->msr_hv_synic_evt_page != 0 ||
721 env->msr_hv_synic_msg_page != 0) {
722 return true;
723 }
724
725 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
726 if (env->msr_hv_synic_sint[i] != 0) {
727 return true;
728 }
729 }
730
731 return false;
732 }
733
734 static const VMStateDescription vmstate_msr_hyperv_synic = {
735 .name = "cpu/msr_hyperv_synic",
736 .version_id = 1,
737 .minimum_version_id = 1,
738 .needed = hyperv_synic_enable_needed,
739 .fields = (VMStateField[]) {
740 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
741 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
742 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
743 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
744 HV_SYNIC_SINT_COUNT),
745 VMSTATE_END_OF_LIST()
746 }
747 };
748
749 static bool hyperv_stimer_enable_needed(void *opaque)
750 {
751 X86CPU *cpu = opaque;
752 CPUX86State *env = &cpu->env;
753 int i;
754
755 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
756 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
757 return true;
758 }
759 }
760 return false;
761 }
762
763 static const VMStateDescription vmstate_msr_hyperv_stimer = {
764 .name = "cpu/msr_hyperv_stimer",
765 .version_id = 1,
766 .minimum_version_id = 1,
767 .needed = hyperv_stimer_enable_needed,
768 .fields = (VMStateField[]) {
769 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
770 X86CPU, HV_SYNIC_STIMER_COUNT),
771 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
772 X86CPU, HV_SYNIC_STIMER_COUNT),
773 VMSTATE_END_OF_LIST()
774 }
775 };
776
777 static bool avx512_needed(void *opaque)
778 {
779 X86CPU *cpu = opaque;
780 CPUX86State *env = &cpu->env;
781 unsigned int i;
782
783 for (i = 0; i < NB_OPMASK_REGS; i++) {
784 if (env->opmask_regs[i]) {
785 return true;
786 }
787 }
788
789 for (i = 0; i < CPU_NB_REGS; i++) {
790 #define ENV_XMM(reg, field) (env->xmm_regs[reg].XMM_Q(field))
791 if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
792 ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
793 return true;
794 }
795 #ifdef TARGET_X86_64
796 if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
797 ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
798 ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
799 ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
800 return true;
801 }
802 #endif
803 }
804
805 return false;
806 }
807
808 static const VMStateDescription vmstate_avx512 = {
809 .name = "cpu/avx512",
810 .version_id = 1,
811 .minimum_version_id = 1,
812 .needed = avx512_needed,
813 .fields = (VMStateField[]) {
814 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
815 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
816 #ifdef TARGET_X86_64
817 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
818 #endif
819 VMSTATE_END_OF_LIST()
820 }
821 };
822
823 static bool xss_needed(void *opaque)
824 {
825 X86CPU *cpu = opaque;
826 CPUX86State *env = &cpu->env;
827
828 return env->xss != 0;
829 }
830
831 static const VMStateDescription vmstate_xss = {
832 .name = "cpu/xss",
833 .version_id = 1,
834 .minimum_version_id = 1,
835 .needed = xss_needed,
836 .fields = (VMStateField[]) {
837 VMSTATE_UINT64(env.xss, X86CPU),
838 VMSTATE_END_OF_LIST()
839 }
840 };
841
842 VMStateDescription vmstate_x86_cpu = {
843 .name = "cpu",
844 .version_id = 12,
845 .minimum_version_id = 3,
846 .pre_save = cpu_pre_save,
847 .post_load = cpu_post_load,
848 .fields = (VMStateField[]) {
849 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
850 VMSTATE_UINTTL(env.eip, X86CPU),
851 VMSTATE_UINTTL(env.eflags, X86CPU),
852 VMSTATE_UINT32(env.hflags, X86CPU),
853 /* FPU */
854 VMSTATE_UINT16(env.fpuc, X86CPU),
855 VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
856 VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
857 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
858 VMSTATE_FP_REGS(env.fpregs, X86CPU, 8),
859
860 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
861 VMSTATE_SEGMENT(env.ldt, X86CPU),
862 VMSTATE_SEGMENT(env.tr, X86CPU),
863 VMSTATE_SEGMENT(env.gdt, X86CPU),
864 VMSTATE_SEGMENT(env.idt, X86CPU),
865
866 VMSTATE_UINT32(env.sysenter_cs, X86CPU),
867 #ifdef TARGET_X86_64
868 /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
869 VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7),
870 VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7),
871 VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7),
872 VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7),
873 #else
874 VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
875 VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
876 #endif
877
878 VMSTATE_UINTTL(env.cr[0], X86CPU),
879 VMSTATE_UINTTL(env.cr[2], X86CPU),
880 VMSTATE_UINTTL(env.cr[3], X86CPU),
881 VMSTATE_UINTTL(env.cr[4], X86CPU),
882 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
883 /* MMU */
884 VMSTATE_INT32(env.a20_mask, X86CPU),
885 /* XMM */
886 VMSTATE_UINT32(env.mxcsr, X86CPU),
887 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
888
889 #ifdef TARGET_X86_64
890 VMSTATE_UINT64(env.efer, X86CPU),
891 VMSTATE_UINT64(env.star, X86CPU),
892 VMSTATE_UINT64(env.lstar, X86CPU),
893 VMSTATE_UINT64(env.cstar, X86CPU),
894 VMSTATE_UINT64(env.fmask, X86CPU),
895 VMSTATE_UINT64(env.kernelgsbase, X86CPU),
896 #endif
897 VMSTATE_UINT32_V(env.smbase, X86CPU, 4),
898
899 VMSTATE_UINT64_V(env.pat, X86CPU, 5),
900 VMSTATE_UINT32_V(env.hflags2, X86CPU, 5),
901
902 VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5),
903 VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5),
904 VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5),
905 VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5),
906 VMSTATE_UINT64_V(env.intercept, X86CPU, 5),
907 VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5),
908 VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5),
909 VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5),
910 VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5),
911 VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5),
912 VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5),
913 /* MTRRs */
914 VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
915 VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
916 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
917 /* KVM-related states */
918 VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
919 VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
920 VMSTATE_UINT64_V(env.tsc, X86CPU, 9),
921 VMSTATE_INT32_V(env.exception_injected, X86CPU, 11),
922 VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11),
923 VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11),
924 VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11),
925 VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11),
926 VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11),
927 /* MCE */
928 VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10),
929 VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10),
930 VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10),
931 VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10),
932 /* rdtscp */
933 VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11),
934 /* KVM pvclock msr */
935 VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11),
936 VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11),
937 /* XSAVE related fields */
938 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
939 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
940 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
941 VMSTATE_END_OF_LIST()
942 /* The above list is not sorted /wrt version numbers, watch out! */
943 },
944 .subsections = (const VMStateDescription*[]) {
945 &vmstate_async_pf_msr,
946 &vmstate_pv_eoi_msr,
947 &vmstate_steal_time_msr,
948 &vmstate_fpop_ip_dp,
949 &vmstate_msr_tsc_adjust,
950 &vmstate_msr_tscdeadline,
951 &vmstate_msr_ia32_misc_enable,
952 &vmstate_msr_ia32_feature_control,
953 &vmstate_msr_architectural_pmu,
954 &vmstate_mpx,
955 &vmstate_msr_hypercall_hypercall,
956 &vmstate_msr_hyperv_vapic,
957 &vmstate_msr_hyperv_time,
958 &vmstate_msr_hyperv_crash,
959 &vmstate_msr_hyperv_runtime,
960 &vmstate_msr_hyperv_synic,
961 &vmstate_msr_hyperv_stimer,
962 &vmstate_avx512,
963 &vmstate_xss,
964 NULL
965 }
966 };