]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/machine.c
Merge remote-tracking branch 'rth/tags/pull-s390-20170512' into staging
[mirror_qemu.git] / target / i386 / machine.c
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "exec/exec-all.h"
5 #include "hw/hw.h"
6 #include "hw/boards.h"
7 #include "hw/i386/pc.h"
8 #include "hw/isa/isa.h"
9 #include "migration/cpu.h"
10
11 #include "sysemu/kvm.h"
12
13 #include "qemu/error-report.h"
14
15 static const VMStateDescription vmstate_segment = {
16 .name = "segment",
17 .version_id = 1,
18 .minimum_version_id = 1,
19 .fields = (VMStateField[]) {
20 VMSTATE_UINT32(selector, SegmentCache),
21 VMSTATE_UINTTL(base, SegmentCache),
22 VMSTATE_UINT32(limit, SegmentCache),
23 VMSTATE_UINT32(flags, SegmentCache),
24 VMSTATE_END_OF_LIST()
25 }
26 };
27
28 #define VMSTATE_SEGMENT(_field, _state) { \
29 .name = (stringify(_field)), \
30 .size = sizeof(SegmentCache), \
31 .vmsd = &vmstate_segment, \
32 .flags = VMS_STRUCT, \
33 .offset = offsetof(_state, _field) \
34 + type_check(SegmentCache,typeof_field(_state, _field)) \
35 }
36
37 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
38 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
39
40 static const VMStateDescription vmstate_xmm_reg = {
41 .name = "xmm_reg",
42 .version_id = 1,
43 .minimum_version_id = 1,
44 .fields = (VMStateField[]) {
45 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
46 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
47 VMSTATE_END_OF_LIST()
48 }
49 };
50
51 #define VMSTATE_XMM_REGS(_field, _state, _start) \
52 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
53 vmstate_xmm_reg, ZMMReg)
54
55 /* YMMH format is the same as XMM, but for bits 128-255 */
56 static const VMStateDescription vmstate_ymmh_reg = {
57 .name = "ymmh_reg",
58 .version_id = 1,
59 .minimum_version_id = 1,
60 .fields = (VMStateField[]) {
61 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
62 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
63 VMSTATE_END_OF_LIST()
64 }
65 };
66
67 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
68 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
69 vmstate_ymmh_reg, ZMMReg)
70
71 static const VMStateDescription vmstate_zmmh_reg = {
72 .name = "zmmh_reg",
73 .version_id = 1,
74 .minimum_version_id = 1,
75 .fields = (VMStateField[]) {
76 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
77 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
78 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
79 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
80 VMSTATE_END_OF_LIST()
81 }
82 };
83
84 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
85 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
86 vmstate_zmmh_reg, ZMMReg)
87
88 #ifdef TARGET_X86_64
89 static const VMStateDescription vmstate_hi16_zmm_reg = {
90 .name = "hi16_zmm_reg",
91 .version_id = 1,
92 .minimum_version_id = 1,
93 .fields = (VMStateField[]) {
94 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
95 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
96 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
97 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
98 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
99 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
100 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
101 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
102 VMSTATE_END_OF_LIST()
103 }
104 };
105
106 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
107 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
108 vmstate_hi16_zmm_reg, ZMMReg)
109 #endif
110
111 static const VMStateDescription vmstate_bnd_regs = {
112 .name = "bnd_regs",
113 .version_id = 1,
114 .minimum_version_id = 1,
115 .fields = (VMStateField[]) {
116 VMSTATE_UINT64(lb, BNDReg),
117 VMSTATE_UINT64(ub, BNDReg),
118 VMSTATE_END_OF_LIST()
119 }
120 };
121
122 #define VMSTATE_BND_REGS(_field, _state, _n) \
123 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
124
125 static const VMStateDescription vmstate_mtrr_var = {
126 .name = "mtrr_var",
127 .version_id = 1,
128 .minimum_version_id = 1,
129 .fields = (VMStateField[]) {
130 VMSTATE_UINT64(base, MTRRVar),
131 VMSTATE_UINT64(mask, MTRRVar),
132 VMSTATE_END_OF_LIST()
133 }
134 };
135
136 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
137 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
138
139 typedef struct x86_FPReg_tmp {
140 FPReg *parent;
141 uint64_t tmp_mant;
142 uint16_t tmp_exp;
143 } x86_FPReg_tmp;
144
145 static void fpreg_pre_save(void *opaque)
146 {
147 x86_FPReg_tmp *tmp = opaque;
148
149 /* we save the real CPU data (in case of MMX usage only 'mant'
150 contains the MMX register */
151 cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
152 }
153
154 static int fpreg_post_load(void *opaque, int version)
155 {
156 x86_FPReg_tmp *tmp = opaque;
157
158 tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
159 return 0;
160 }
161
162 static const VMStateDescription vmstate_fpreg_tmp = {
163 .name = "fpreg_tmp",
164 .post_load = fpreg_post_load,
165 .pre_save = fpreg_pre_save,
166 .fields = (VMStateField[]) {
167 VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
168 VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
169 VMSTATE_END_OF_LIST()
170 }
171 };
172
173 static const VMStateDescription vmstate_fpreg = {
174 .name = "fpreg",
175 .fields = (VMStateField[]) {
176 VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
177 VMSTATE_END_OF_LIST()
178 }
179 };
180
181 static void cpu_pre_save(void *opaque)
182 {
183 X86CPU *cpu = opaque;
184 CPUX86State *env = &cpu->env;
185 int i;
186
187 /* FPU */
188 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
189 env->fptag_vmstate = 0;
190 for(i = 0; i < 8; i++) {
191 env->fptag_vmstate |= ((!env->fptags[i]) << i);
192 }
193
194 env->fpregs_format_vmstate = 0;
195
196 /*
197 * Real mode guest segments register DPL should be zero.
198 * Older KVM version were setting it wrongly.
199 * Fixing it will allow live migration to host with unrestricted guest
200 * support (otherwise the migration will fail with invalid guest state
201 * error).
202 */
203 if (!(env->cr[0] & CR0_PE_MASK) &&
204 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
205 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
206 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
207 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
208 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
209 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
210 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
211 }
212
213 }
214
215 static int cpu_post_load(void *opaque, int version_id)
216 {
217 X86CPU *cpu = opaque;
218 CPUState *cs = CPU(cpu);
219 CPUX86State *env = &cpu->env;
220 int i;
221
222 if (env->tsc_khz && env->user_tsc_khz &&
223 env->tsc_khz != env->user_tsc_khz) {
224 error_report("Mismatch between user-specified TSC frequency and "
225 "migrated TSC frequency");
226 return -EINVAL;
227 }
228
229 if (env->fpregs_format_vmstate) {
230 error_report("Unsupported old non-softfloat CPU state");
231 return -EINVAL;
232 }
233 /*
234 * Real mode guest segments register DPL should be zero.
235 * Older KVM version were setting it wrongly.
236 * Fixing it will allow live migration from such host that don't have
237 * restricted guest support to a host with unrestricted guest support
238 * (otherwise the migration will fail with invalid guest state
239 * error).
240 */
241 if (!(env->cr[0] & CR0_PE_MASK) &&
242 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
243 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
244 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
245 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
246 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
247 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
248 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
249 }
250
251 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
252 * running under KVM. This is wrong for conforming code segments.
253 * Luckily, in our implementation the CPL field of hflags is redundant
254 * and we can get the right value from the SS descriptor privilege level.
255 */
256 env->hflags &= ~HF_CPL_MASK;
257 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
258
259 env->fpstt = (env->fpus_vmstate >> 11) & 7;
260 env->fpus = env->fpus_vmstate & ~0x3800;
261 env->fptag_vmstate ^= 0xff;
262 for(i = 0; i < 8; i++) {
263 env->fptags[i] = (env->fptag_vmstate >> i) & 1;
264 }
265 update_fp_status(env);
266
267 cpu_breakpoint_remove_all(cs, BP_CPU);
268 cpu_watchpoint_remove_all(cs, BP_CPU);
269 {
270 /* Indicate all breakpoints disabled, as they are, then
271 let the helper re-enable them. */
272 target_ulong dr7 = env->dr[7];
273 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
274 cpu_x86_update_dr7(env, dr7);
275 }
276 tlb_flush(cs);
277
278 if (tcg_enabled()) {
279 cpu_smm_update(cpu);
280 }
281 return 0;
282 }
283
284 static bool async_pf_msr_needed(void *opaque)
285 {
286 X86CPU *cpu = opaque;
287
288 return cpu->env.async_pf_en_msr != 0;
289 }
290
291 static bool pv_eoi_msr_needed(void *opaque)
292 {
293 X86CPU *cpu = opaque;
294
295 return cpu->env.pv_eoi_en_msr != 0;
296 }
297
298 static bool steal_time_msr_needed(void *opaque)
299 {
300 X86CPU *cpu = opaque;
301
302 return cpu->env.steal_time_msr != 0;
303 }
304
305 static const VMStateDescription vmstate_steal_time_msr = {
306 .name = "cpu/steal_time_msr",
307 .version_id = 1,
308 .minimum_version_id = 1,
309 .needed = steal_time_msr_needed,
310 .fields = (VMStateField[]) {
311 VMSTATE_UINT64(env.steal_time_msr, X86CPU),
312 VMSTATE_END_OF_LIST()
313 }
314 };
315
316 static const VMStateDescription vmstate_async_pf_msr = {
317 .name = "cpu/async_pf_msr",
318 .version_id = 1,
319 .minimum_version_id = 1,
320 .needed = async_pf_msr_needed,
321 .fields = (VMStateField[]) {
322 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
323 VMSTATE_END_OF_LIST()
324 }
325 };
326
327 static const VMStateDescription vmstate_pv_eoi_msr = {
328 .name = "cpu/async_pv_eoi_msr",
329 .version_id = 1,
330 .minimum_version_id = 1,
331 .needed = pv_eoi_msr_needed,
332 .fields = (VMStateField[]) {
333 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
334 VMSTATE_END_OF_LIST()
335 }
336 };
337
338 static bool fpop_ip_dp_needed(void *opaque)
339 {
340 X86CPU *cpu = opaque;
341 CPUX86State *env = &cpu->env;
342
343 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
344 }
345
346 static const VMStateDescription vmstate_fpop_ip_dp = {
347 .name = "cpu/fpop_ip_dp",
348 .version_id = 1,
349 .minimum_version_id = 1,
350 .needed = fpop_ip_dp_needed,
351 .fields = (VMStateField[]) {
352 VMSTATE_UINT16(env.fpop, X86CPU),
353 VMSTATE_UINT64(env.fpip, X86CPU),
354 VMSTATE_UINT64(env.fpdp, X86CPU),
355 VMSTATE_END_OF_LIST()
356 }
357 };
358
359 static bool tsc_adjust_needed(void *opaque)
360 {
361 X86CPU *cpu = opaque;
362 CPUX86State *env = &cpu->env;
363
364 return env->tsc_adjust != 0;
365 }
366
367 static const VMStateDescription vmstate_msr_tsc_adjust = {
368 .name = "cpu/msr_tsc_adjust",
369 .version_id = 1,
370 .minimum_version_id = 1,
371 .needed = tsc_adjust_needed,
372 .fields = (VMStateField[]) {
373 VMSTATE_UINT64(env.tsc_adjust, X86CPU),
374 VMSTATE_END_OF_LIST()
375 }
376 };
377
378 static bool tscdeadline_needed(void *opaque)
379 {
380 X86CPU *cpu = opaque;
381 CPUX86State *env = &cpu->env;
382
383 return env->tsc_deadline != 0;
384 }
385
386 static const VMStateDescription vmstate_msr_tscdeadline = {
387 .name = "cpu/msr_tscdeadline",
388 .version_id = 1,
389 .minimum_version_id = 1,
390 .needed = tscdeadline_needed,
391 .fields = (VMStateField[]) {
392 VMSTATE_UINT64(env.tsc_deadline, X86CPU),
393 VMSTATE_END_OF_LIST()
394 }
395 };
396
397 static bool misc_enable_needed(void *opaque)
398 {
399 X86CPU *cpu = opaque;
400 CPUX86State *env = &cpu->env;
401
402 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
403 }
404
405 static bool feature_control_needed(void *opaque)
406 {
407 X86CPU *cpu = opaque;
408 CPUX86State *env = &cpu->env;
409
410 return env->msr_ia32_feature_control != 0;
411 }
412
413 static const VMStateDescription vmstate_msr_ia32_misc_enable = {
414 .name = "cpu/msr_ia32_misc_enable",
415 .version_id = 1,
416 .minimum_version_id = 1,
417 .needed = misc_enable_needed,
418 .fields = (VMStateField[]) {
419 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
420 VMSTATE_END_OF_LIST()
421 }
422 };
423
424 static const VMStateDescription vmstate_msr_ia32_feature_control = {
425 .name = "cpu/msr_ia32_feature_control",
426 .version_id = 1,
427 .minimum_version_id = 1,
428 .needed = feature_control_needed,
429 .fields = (VMStateField[]) {
430 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
431 VMSTATE_END_OF_LIST()
432 }
433 };
434
435 static bool pmu_enable_needed(void *opaque)
436 {
437 X86CPU *cpu = opaque;
438 CPUX86State *env = &cpu->env;
439 int i;
440
441 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
442 env->msr_global_status || env->msr_global_ovf_ctrl) {
443 return true;
444 }
445 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
446 if (env->msr_fixed_counters[i]) {
447 return true;
448 }
449 }
450 for (i = 0; i < MAX_GP_COUNTERS; i++) {
451 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
452 return true;
453 }
454 }
455
456 return false;
457 }
458
459 static const VMStateDescription vmstate_msr_architectural_pmu = {
460 .name = "cpu/msr_architectural_pmu",
461 .version_id = 1,
462 .minimum_version_id = 1,
463 .needed = pmu_enable_needed,
464 .fields = (VMStateField[]) {
465 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
466 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
467 VMSTATE_UINT64(env.msr_global_status, X86CPU),
468 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
469 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
470 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
471 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
472 VMSTATE_END_OF_LIST()
473 }
474 };
475
476 static bool mpx_needed(void *opaque)
477 {
478 X86CPU *cpu = opaque;
479 CPUX86State *env = &cpu->env;
480 unsigned int i;
481
482 for (i = 0; i < 4; i++) {
483 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
484 return true;
485 }
486 }
487
488 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
489 return true;
490 }
491
492 return !!env->msr_bndcfgs;
493 }
494
495 static const VMStateDescription vmstate_mpx = {
496 .name = "cpu/mpx",
497 .version_id = 1,
498 .minimum_version_id = 1,
499 .needed = mpx_needed,
500 .fields = (VMStateField[]) {
501 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
502 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
503 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
504 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
505 VMSTATE_END_OF_LIST()
506 }
507 };
508
509 static bool hyperv_hypercall_enable_needed(void *opaque)
510 {
511 X86CPU *cpu = opaque;
512 CPUX86State *env = &cpu->env;
513
514 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
515 }
516
517 static const VMStateDescription vmstate_msr_hypercall_hypercall = {
518 .name = "cpu/msr_hyperv_hypercall",
519 .version_id = 1,
520 .minimum_version_id = 1,
521 .needed = hyperv_hypercall_enable_needed,
522 .fields = (VMStateField[]) {
523 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
524 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
525 VMSTATE_END_OF_LIST()
526 }
527 };
528
529 static bool hyperv_vapic_enable_needed(void *opaque)
530 {
531 X86CPU *cpu = opaque;
532 CPUX86State *env = &cpu->env;
533
534 return env->msr_hv_vapic != 0;
535 }
536
537 static const VMStateDescription vmstate_msr_hyperv_vapic = {
538 .name = "cpu/msr_hyperv_vapic",
539 .version_id = 1,
540 .minimum_version_id = 1,
541 .needed = hyperv_vapic_enable_needed,
542 .fields = (VMStateField[]) {
543 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
544 VMSTATE_END_OF_LIST()
545 }
546 };
547
548 static bool hyperv_time_enable_needed(void *opaque)
549 {
550 X86CPU *cpu = opaque;
551 CPUX86State *env = &cpu->env;
552
553 return env->msr_hv_tsc != 0;
554 }
555
556 static const VMStateDescription vmstate_msr_hyperv_time = {
557 .name = "cpu/msr_hyperv_time",
558 .version_id = 1,
559 .minimum_version_id = 1,
560 .needed = hyperv_time_enable_needed,
561 .fields = (VMStateField[]) {
562 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
563 VMSTATE_END_OF_LIST()
564 }
565 };
566
567 static bool hyperv_crash_enable_needed(void *opaque)
568 {
569 X86CPU *cpu = opaque;
570 CPUX86State *env = &cpu->env;
571 int i;
572
573 for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
574 if (env->msr_hv_crash_params[i]) {
575 return true;
576 }
577 }
578 return false;
579 }
580
581 static const VMStateDescription vmstate_msr_hyperv_crash = {
582 .name = "cpu/msr_hyperv_crash",
583 .version_id = 1,
584 .minimum_version_id = 1,
585 .needed = hyperv_crash_enable_needed,
586 .fields = (VMStateField[]) {
587 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
588 X86CPU, HV_X64_MSR_CRASH_PARAMS),
589 VMSTATE_END_OF_LIST()
590 }
591 };
592
593 static bool hyperv_runtime_enable_needed(void *opaque)
594 {
595 X86CPU *cpu = opaque;
596 CPUX86State *env = &cpu->env;
597
598 if (!cpu->hyperv_runtime) {
599 return false;
600 }
601
602 return env->msr_hv_runtime != 0;
603 }
604
605 static const VMStateDescription vmstate_msr_hyperv_runtime = {
606 .name = "cpu/msr_hyperv_runtime",
607 .version_id = 1,
608 .minimum_version_id = 1,
609 .needed = hyperv_runtime_enable_needed,
610 .fields = (VMStateField[]) {
611 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
612 VMSTATE_END_OF_LIST()
613 }
614 };
615
616 static bool hyperv_synic_enable_needed(void *opaque)
617 {
618 X86CPU *cpu = opaque;
619 CPUX86State *env = &cpu->env;
620 int i;
621
622 if (env->msr_hv_synic_control != 0 ||
623 env->msr_hv_synic_evt_page != 0 ||
624 env->msr_hv_synic_msg_page != 0) {
625 return true;
626 }
627
628 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
629 if (env->msr_hv_synic_sint[i] != 0) {
630 return true;
631 }
632 }
633
634 return false;
635 }
636
637 static const VMStateDescription vmstate_msr_hyperv_synic = {
638 .name = "cpu/msr_hyperv_synic",
639 .version_id = 1,
640 .minimum_version_id = 1,
641 .needed = hyperv_synic_enable_needed,
642 .fields = (VMStateField[]) {
643 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
644 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
645 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
646 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
647 HV_SYNIC_SINT_COUNT),
648 VMSTATE_END_OF_LIST()
649 }
650 };
651
652 static bool hyperv_stimer_enable_needed(void *opaque)
653 {
654 X86CPU *cpu = opaque;
655 CPUX86State *env = &cpu->env;
656 int i;
657
658 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
659 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
660 return true;
661 }
662 }
663 return false;
664 }
665
666 static const VMStateDescription vmstate_msr_hyperv_stimer = {
667 .name = "cpu/msr_hyperv_stimer",
668 .version_id = 1,
669 .minimum_version_id = 1,
670 .needed = hyperv_stimer_enable_needed,
671 .fields = (VMStateField[]) {
672 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
673 X86CPU, HV_SYNIC_STIMER_COUNT),
674 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
675 X86CPU, HV_SYNIC_STIMER_COUNT),
676 VMSTATE_END_OF_LIST()
677 }
678 };
679
680 static bool avx512_needed(void *opaque)
681 {
682 X86CPU *cpu = opaque;
683 CPUX86State *env = &cpu->env;
684 unsigned int i;
685
686 for (i = 0; i < NB_OPMASK_REGS; i++) {
687 if (env->opmask_regs[i]) {
688 return true;
689 }
690 }
691
692 for (i = 0; i < CPU_NB_REGS; i++) {
693 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
694 if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
695 ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
696 return true;
697 }
698 #ifdef TARGET_X86_64
699 if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
700 ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
701 ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
702 ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
703 return true;
704 }
705 #endif
706 }
707
708 return false;
709 }
710
711 static const VMStateDescription vmstate_avx512 = {
712 .name = "cpu/avx512",
713 .version_id = 1,
714 .minimum_version_id = 1,
715 .needed = avx512_needed,
716 .fields = (VMStateField[]) {
717 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
718 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
719 #ifdef TARGET_X86_64
720 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
721 #endif
722 VMSTATE_END_OF_LIST()
723 }
724 };
725
726 static bool xss_needed(void *opaque)
727 {
728 X86CPU *cpu = opaque;
729 CPUX86State *env = &cpu->env;
730
731 return env->xss != 0;
732 }
733
734 static const VMStateDescription vmstate_xss = {
735 .name = "cpu/xss",
736 .version_id = 1,
737 .minimum_version_id = 1,
738 .needed = xss_needed,
739 .fields = (VMStateField[]) {
740 VMSTATE_UINT64(env.xss, X86CPU),
741 VMSTATE_END_OF_LIST()
742 }
743 };
744
745 #ifdef TARGET_X86_64
746 static bool pkru_needed(void *opaque)
747 {
748 X86CPU *cpu = opaque;
749 CPUX86State *env = &cpu->env;
750
751 return env->pkru != 0;
752 }
753
754 static const VMStateDescription vmstate_pkru = {
755 .name = "cpu/pkru",
756 .version_id = 1,
757 .minimum_version_id = 1,
758 .needed = pkru_needed,
759 .fields = (VMStateField[]){
760 VMSTATE_UINT32(env.pkru, X86CPU),
761 VMSTATE_END_OF_LIST()
762 }
763 };
764 #endif
765
766 static bool tsc_khz_needed(void *opaque)
767 {
768 X86CPU *cpu = opaque;
769 CPUX86State *env = &cpu->env;
770 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
771 PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
772 return env->tsc_khz && pcmc->save_tsc_khz;
773 }
774
775 static const VMStateDescription vmstate_tsc_khz = {
776 .name = "cpu/tsc_khz",
777 .version_id = 1,
778 .minimum_version_id = 1,
779 .needed = tsc_khz_needed,
780 .fields = (VMStateField[]) {
781 VMSTATE_INT64(env.tsc_khz, X86CPU),
782 VMSTATE_END_OF_LIST()
783 }
784 };
785
786 static bool mcg_ext_ctl_needed(void *opaque)
787 {
788 X86CPU *cpu = opaque;
789 CPUX86State *env = &cpu->env;
790 return cpu->enable_lmce && env->mcg_ext_ctl;
791 }
792
793 static const VMStateDescription vmstate_mcg_ext_ctl = {
794 .name = "cpu/mcg_ext_ctl",
795 .version_id = 1,
796 .minimum_version_id = 1,
797 .needed = mcg_ext_ctl_needed,
798 .fields = (VMStateField[]) {
799 VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
800 VMSTATE_END_OF_LIST()
801 }
802 };
803
804 VMStateDescription vmstate_x86_cpu = {
805 .name = "cpu",
806 .version_id = 12,
807 .minimum_version_id = 11,
808 .pre_save = cpu_pre_save,
809 .post_load = cpu_post_load,
810 .fields = (VMStateField[]) {
811 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
812 VMSTATE_UINTTL(env.eip, X86CPU),
813 VMSTATE_UINTTL(env.eflags, X86CPU),
814 VMSTATE_UINT32(env.hflags, X86CPU),
815 /* FPU */
816 VMSTATE_UINT16(env.fpuc, X86CPU),
817 VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
818 VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
819 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
820
821 VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
822
823 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
824 VMSTATE_SEGMENT(env.ldt, X86CPU),
825 VMSTATE_SEGMENT(env.tr, X86CPU),
826 VMSTATE_SEGMENT(env.gdt, X86CPU),
827 VMSTATE_SEGMENT(env.idt, X86CPU),
828
829 VMSTATE_UINT32(env.sysenter_cs, X86CPU),
830 VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
831 VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
832
833 VMSTATE_UINTTL(env.cr[0], X86CPU),
834 VMSTATE_UINTTL(env.cr[2], X86CPU),
835 VMSTATE_UINTTL(env.cr[3], X86CPU),
836 VMSTATE_UINTTL(env.cr[4], X86CPU),
837 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
838 /* MMU */
839 VMSTATE_INT32(env.a20_mask, X86CPU),
840 /* XMM */
841 VMSTATE_UINT32(env.mxcsr, X86CPU),
842 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
843
844 #ifdef TARGET_X86_64
845 VMSTATE_UINT64(env.efer, X86CPU),
846 VMSTATE_UINT64(env.star, X86CPU),
847 VMSTATE_UINT64(env.lstar, X86CPU),
848 VMSTATE_UINT64(env.cstar, X86CPU),
849 VMSTATE_UINT64(env.fmask, X86CPU),
850 VMSTATE_UINT64(env.kernelgsbase, X86CPU),
851 #endif
852 VMSTATE_UINT32(env.smbase, X86CPU),
853
854 VMSTATE_UINT64(env.pat, X86CPU),
855 VMSTATE_UINT32(env.hflags2, X86CPU),
856
857 VMSTATE_UINT64(env.vm_hsave, X86CPU),
858 VMSTATE_UINT64(env.vm_vmcb, X86CPU),
859 VMSTATE_UINT64(env.tsc_offset, X86CPU),
860 VMSTATE_UINT64(env.intercept, X86CPU),
861 VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
862 VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
863 VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
864 VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
865 VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
866 VMSTATE_UINT8(env.v_tpr, X86CPU),
867 /* MTRRs */
868 VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
869 VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
870 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
871 /* KVM-related states */
872 VMSTATE_INT32(env.interrupt_injected, X86CPU),
873 VMSTATE_UINT32(env.mp_state, X86CPU),
874 VMSTATE_UINT64(env.tsc, X86CPU),
875 VMSTATE_INT32(env.exception_injected, X86CPU),
876 VMSTATE_UINT8(env.soft_interrupt, X86CPU),
877 VMSTATE_UINT8(env.nmi_injected, X86CPU),
878 VMSTATE_UINT8(env.nmi_pending, X86CPU),
879 VMSTATE_UINT8(env.has_error_code, X86CPU),
880 VMSTATE_UINT32(env.sipi_vector, X86CPU),
881 /* MCE */
882 VMSTATE_UINT64(env.mcg_cap, X86CPU),
883 VMSTATE_UINT64(env.mcg_status, X86CPU),
884 VMSTATE_UINT64(env.mcg_ctl, X86CPU),
885 VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
886 /* rdtscp */
887 VMSTATE_UINT64(env.tsc_aux, X86CPU),
888 /* KVM pvclock msr */
889 VMSTATE_UINT64(env.system_time_msr, X86CPU),
890 VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
891 /* XSAVE related fields */
892 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
893 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
894 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
895 VMSTATE_END_OF_LIST()
896 /* The above list is not sorted /wrt version numbers, watch out! */
897 },
898 .subsections = (const VMStateDescription*[]) {
899 &vmstate_async_pf_msr,
900 &vmstate_pv_eoi_msr,
901 &vmstate_steal_time_msr,
902 &vmstate_fpop_ip_dp,
903 &vmstate_msr_tsc_adjust,
904 &vmstate_msr_tscdeadline,
905 &vmstate_msr_ia32_misc_enable,
906 &vmstate_msr_ia32_feature_control,
907 &vmstate_msr_architectural_pmu,
908 &vmstate_mpx,
909 &vmstate_msr_hypercall_hypercall,
910 &vmstate_msr_hyperv_vapic,
911 &vmstate_msr_hyperv_time,
912 &vmstate_msr_hyperv_crash,
913 &vmstate_msr_hyperv_runtime,
914 &vmstate_msr_hyperv_synic,
915 &vmstate_msr_hyperv_stimer,
916 &vmstate_avx512,
917 &vmstate_xss,
918 &vmstate_tsc_khz,
919 #ifdef TARGET_X86_64
920 &vmstate_pkru,
921 #endif
922 &vmstate_mcg_ext_ctl,
923 NULL
924 }
925 };