]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/machine.c
Merge remote-tracking branch 'remotes/xtensa/tags/20181030-xtensa' into staging
[mirror_qemu.git] / target / arm / machine.c
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "hw/hw.h"
5 #include "hw/boards.h"
6 #include "qemu/error-report.h"
7 #include "sysemu/kvm.h"
8 #include "kvm_arm.h"
9 #include "internals.h"
10 #include "migration/cpu.h"
11
12 static bool vfp_needed(void *opaque)
13 {
14 ARMCPU *cpu = opaque;
15 CPUARMState *env = &cpu->env;
16
17 return arm_feature(env, ARM_FEATURE_VFP);
18 }
19
20 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
21 VMStateField *field)
22 {
23 ARMCPU *cpu = opaque;
24 CPUARMState *env = &cpu->env;
25 uint32_t val = qemu_get_be32(f);
26
27 vfp_set_fpscr(env, val);
28 return 0;
29 }
30
31 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
32 VMStateField *field, QJSON *vmdesc)
33 {
34 ARMCPU *cpu = opaque;
35 CPUARMState *env = &cpu->env;
36
37 qemu_put_be32(f, vfp_get_fpscr(env));
38 return 0;
39 }
40
41 static const VMStateInfo vmstate_fpscr = {
42 .name = "fpscr",
43 .get = get_fpscr,
44 .put = put_fpscr,
45 };
46
47 static const VMStateDescription vmstate_vfp = {
48 .name = "cpu/vfp",
49 .version_id = 3,
50 .minimum_version_id = 3,
51 .needed = vfp_needed,
52 .fields = (VMStateField[]) {
53 /* For compatibility, store Qn out of Zn here. */
54 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
55 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
56 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
57 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
58 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
59 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
60 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
61 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
62 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
63 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
64 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
65 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
66 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
67 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
68 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
69 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
70 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
71 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
72 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
73 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
74 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
75 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
76 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
77 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
78 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
79 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
80 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
81 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
82 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
83 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
84 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
85 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
86
87 /* The xregs array is a little awkward because element 1 (FPSCR)
88 * requires a specific accessor, so we have to split it up in
89 * the vmstate:
90 */
91 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
92 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
93 {
94 .name = "fpscr",
95 .version_id = 0,
96 .size = sizeof(uint32_t),
97 .info = &vmstate_fpscr,
98 .flags = VMS_SINGLE,
99 .offset = 0,
100 },
101 VMSTATE_END_OF_LIST()
102 }
103 };
104
105 static bool iwmmxt_needed(void *opaque)
106 {
107 ARMCPU *cpu = opaque;
108 CPUARMState *env = &cpu->env;
109
110 return arm_feature(env, ARM_FEATURE_IWMMXT);
111 }
112
113 static const VMStateDescription vmstate_iwmmxt = {
114 .name = "cpu/iwmmxt",
115 .version_id = 1,
116 .minimum_version_id = 1,
117 .needed = iwmmxt_needed,
118 .fields = (VMStateField[]) {
119 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
120 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
121 VMSTATE_END_OF_LIST()
122 }
123 };
124
125 #ifdef TARGET_AARCH64
126 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
127 * and ARMPredicateReg is actively empty. This triggers errors
128 * in the expansion of the VMSTATE macros.
129 */
130
131 static bool sve_needed(void *opaque)
132 {
133 ARMCPU *cpu = opaque;
134
135 return cpu_isar_feature(aa64_sve, cpu);
136 }
137
138 /* The first two words of each Zreg is stored in VFP state. */
139 static const VMStateDescription vmstate_zreg_hi_reg = {
140 .name = "cpu/sve/zreg_hi",
141 .version_id = 1,
142 .minimum_version_id = 1,
143 .fields = (VMStateField[]) {
144 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
145 VMSTATE_END_OF_LIST()
146 }
147 };
148
149 static const VMStateDescription vmstate_preg_reg = {
150 .name = "cpu/sve/preg",
151 .version_id = 1,
152 .minimum_version_id = 1,
153 .fields = (VMStateField[]) {
154 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
155 VMSTATE_END_OF_LIST()
156 }
157 };
158
159 static const VMStateDescription vmstate_sve = {
160 .name = "cpu/sve",
161 .version_id = 1,
162 .minimum_version_id = 1,
163 .needed = sve_needed,
164 .fields = (VMStateField[]) {
165 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
166 vmstate_zreg_hi_reg, ARMVectorReg),
167 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
168 vmstate_preg_reg, ARMPredicateReg),
169 VMSTATE_END_OF_LIST()
170 }
171 };
172 #endif /* AARCH64 */
173
174 static bool serror_needed(void *opaque)
175 {
176 ARMCPU *cpu = opaque;
177 CPUARMState *env = &cpu->env;
178
179 return env->serror.pending != 0;
180 }
181
182 static const VMStateDescription vmstate_serror = {
183 .name = "cpu/serror",
184 .version_id = 1,
185 .minimum_version_id = 1,
186 .needed = serror_needed,
187 .fields = (VMStateField[]) {
188 VMSTATE_UINT8(env.serror.pending, ARMCPU),
189 VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
190 VMSTATE_UINT64(env.serror.esr, ARMCPU),
191 VMSTATE_END_OF_LIST()
192 }
193 };
194
195 static bool m_needed(void *opaque)
196 {
197 ARMCPU *cpu = opaque;
198 CPUARMState *env = &cpu->env;
199
200 return arm_feature(env, ARM_FEATURE_M);
201 }
202
203 static const VMStateDescription vmstate_m_faultmask_primask = {
204 .name = "cpu/m/faultmask-primask",
205 .version_id = 1,
206 .minimum_version_id = 1,
207 .needed = m_needed,
208 .fields = (VMStateField[]) {
209 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
210 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
211 VMSTATE_END_OF_LIST()
212 }
213 };
214
215 /* CSSELR is in a subsection because we didn't implement it previously.
216 * Migration from an old implementation will leave it at zero, which
217 * is OK since the only CPUs in the old implementation make the
218 * register RAZ/WI.
219 * Since there was no version of QEMU which implemented the CSSELR for
220 * just non-secure, we transfer both banks here rather than putting
221 * the secure banked version in the m-security subsection.
222 */
223 static bool csselr_vmstate_validate(void *opaque, int version_id)
224 {
225 ARMCPU *cpu = opaque;
226
227 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
228 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
229 }
230
231 static bool m_csselr_needed(void *opaque)
232 {
233 ARMCPU *cpu = opaque;
234
235 return !arm_v7m_csselr_razwi(cpu);
236 }
237
238 static const VMStateDescription vmstate_m_csselr = {
239 .name = "cpu/m/csselr",
240 .version_id = 1,
241 .minimum_version_id = 1,
242 .needed = m_csselr_needed,
243 .fields = (VMStateField[]) {
244 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
245 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
246 VMSTATE_END_OF_LIST()
247 }
248 };
249
250 static const VMStateDescription vmstate_m_scr = {
251 .name = "cpu/m/scr",
252 .version_id = 1,
253 .minimum_version_id = 1,
254 .needed = m_needed,
255 .fields = (VMStateField[]) {
256 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
257 VMSTATE_END_OF_LIST()
258 }
259 };
260
261 static const VMStateDescription vmstate_m_other_sp = {
262 .name = "cpu/m/other-sp",
263 .version_id = 1,
264 .minimum_version_id = 1,
265 .needed = m_needed,
266 .fields = (VMStateField[]) {
267 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
268 VMSTATE_END_OF_LIST()
269 }
270 };
271
272 static bool m_v8m_needed(void *opaque)
273 {
274 ARMCPU *cpu = opaque;
275 CPUARMState *env = &cpu->env;
276
277 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
278 }
279
280 static const VMStateDescription vmstate_m_v8m = {
281 .name = "cpu/m/v8m",
282 .version_id = 1,
283 .minimum_version_id = 1,
284 .needed = m_v8m_needed,
285 .fields = (VMStateField[]) {
286 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
287 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
288 VMSTATE_END_OF_LIST()
289 }
290 };
291
292 static const VMStateDescription vmstate_m = {
293 .name = "cpu/m",
294 .version_id = 4,
295 .minimum_version_id = 4,
296 .needed = m_needed,
297 .fields = (VMStateField[]) {
298 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
299 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
300 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
301 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
302 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
303 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
304 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
305 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
306 VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
307 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
308 VMSTATE_INT32(env.v7m.exception, ARMCPU),
309 VMSTATE_END_OF_LIST()
310 },
311 .subsections = (const VMStateDescription*[]) {
312 &vmstate_m_faultmask_primask,
313 &vmstate_m_csselr,
314 &vmstate_m_scr,
315 &vmstate_m_other_sp,
316 &vmstate_m_v8m,
317 NULL
318 }
319 };
320
321 static bool thumb2ee_needed(void *opaque)
322 {
323 ARMCPU *cpu = opaque;
324 CPUARMState *env = &cpu->env;
325
326 return arm_feature(env, ARM_FEATURE_THUMB2EE);
327 }
328
329 static const VMStateDescription vmstate_thumb2ee = {
330 .name = "cpu/thumb2ee",
331 .version_id = 1,
332 .minimum_version_id = 1,
333 .needed = thumb2ee_needed,
334 .fields = (VMStateField[]) {
335 VMSTATE_UINT32(env.teecr, ARMCPU),
336 VMSTATE_UINT32(env.teehbr, ARMCPU),
337 VMSTATE_END_OF_LIST()
338 }
339 };
340
341 static bool pmsav7_needed(void *opaque)
342 {
343 ARMCPU *cpu = opaque;
344 CPUARMState *env = &cpu->env;
345
346 return arm_feature(env, ARM_FEATURE_PMSA) &&
347 arm_feature(env, ARM_FEATURE_V7) &&
348 !arm_feature(env, ARM_FEATURE_V8);
349 }
350
351 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
352 {
353 ARMCPU *cpu = opaque;
354
355 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
356 }
357
358 static const VMStateDescription vmstate_pmsav7 = {
359 .name = "cpu/pmsav7",
360 .version_id = 1,
361 .minimum_version_id = 1,
362 .needed = pmsav7_needed,
363 .fields = (VMStateField[]) {
364 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
365 vmstate_info_uint32, uint32_t),
366 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
367 vmstate_info_uint32, uint32_t),
368 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
369 vmstate_info_uint32, uint32_t),
370 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
371 VMSTATE_END_OF_LIST()
372 }
373 };
374
375 static bool pmsav7_rnr_needed(void *opaque)
376 {
377 ARMCPU *cpu = opaque;
378 CPUARMState *env = &cpu->env;
379
380 /* For R profile cores pmsav7.rnr is migrated via the cpreg
381 * "RGNR" definition in helper.h. For M profile we have to
382 * migrate it separately.
383 */
384 return arm_feature(env, ARM_FEATURE_M);
385 }
386
387 static const VMStateDescription vmstate_pmsav7_rnr = {
388 .name = "cpu/pmsav7-rnr",
389 .version_id = 1,
390 .minimum_version_id = 1,
391 .needed = pmsav7_rnr_needed,
392 .fields = (VMStateField[]) {
393 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
394 VMSTATE_END_OF_LIST()
395 }
396 };
397
398 static bool pmsav8_needed(void *opaque)
399 {
400 ARMCPU *cpu = opaque;
401 CPUARMState *env = &cpu->env;
402
403 return arm_feature(env, ARM_FEATURE_PMSA) &&
404 arm_feature(env, ARM_FEATURE_V8);
405 }
406
407 static const VMStateDescription vmstate_pmsav8 = {
408 .name = "cpu/pmsav8",
409 .version_id = 1,
410 .minimum_version_id = 1,
411 .needed = pmsav8_needed,
412 .fields = (VMStateField[]) {
413 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
414 0, vmstate_info_uint32, uint32_t),
415 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
416 0, vmstate_info_uint32, uint32_t),
417 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
418 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
419 VMSTATE_END_OF_LIST()
420 }
421 };
422
423 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
424 {
425 ARMCPU *cpu = opaque;
426
427 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
428 }
429
430 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
431 {
432 ARMCPU *cpu = opaque;
433
434 return cpu->env.sau.rnr < cpu->sau_sregion;
435 }
436
437 static bool m_security_needed(void *opaque)
438 {
439 ARMCPU *cpu = opaque;
440 CPUARMState *env = &cpu->env;
441
442 return arm_feature(env, ARM_FEATURE_M_SECURITY);
443 }
444
445 static const VMStateDescription vmstate_m_security = {
446 .name = "cpu/m-security",
447 .version_id = 1,
448 .minimum_version_id = 1,
449 .needed = m_security_needed,
450 .fields = (VMStateField[]) {
451 VMSTATE_UINT32(env.v7m.secure, ARMCPU),
452 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
453 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
454 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
455 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
456 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
457 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
458 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
459 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
460 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
461 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
462 0, vmstate_info_uint32, uint32_t),
463 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
464 0, vmstate_info_uint32, uint32_t),
465 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
466 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
467 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
468 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
469 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
470 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
471 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
472 VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
473 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
474 vmstate_info_uint32, uint32_t),
475 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
476 vmstate_info_uint32, uint32_t),
477 VMSTATE_UINT32(env.sau.rnr, ARMCPU),
478 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
479 VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
480 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
481 /* AIRCR is not secure-only, but our implementation is R/O if the
482 * security extension is unimplemented, so we migrate it here.
483 */
484 VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
485 VMSTATE_END_OF_LIST()
486 }
487 };
488
489 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
490 VMStateField *field)
491 {
492 ARMCPU *cpu = opaque;
493 CPUARMState *env = &cpu->env;
494 uint32_t val = qemu_get_be32(f);
495
496 if (arm_feature(env, ARM_FEATURE_M)) {
497 if (val & XPSR_EXCP) {
498 /* This is a CPSR format value from an older QEMU. (We can tell
499 * because values transferred in XPSR format always have zero
500 * for the EXCP field, and CPSR format will always have bit 4
501 * set in CPSR_M.) Rearrange it into XPSR format. The significant
502 * differences are that the T bit is not in the same place, the
503 * primask/faultmask info may be in the CPSR I and F bits, and
504 * we do not want the mode bits.
505 * We know that this cleanup happened before v8M, so there
506 * is no complication with banked primask/faultmask.
507 */
508 uint32_t newval = val;
509
510 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
511
512 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
513 if (val & CPSR_T) {
514 newval |= XPSR_T;
515 }
516 /* If the I or F bits are set then this is a migration from
517 * an old QEMU which still stored the M profile FAULTMASK
518 * and PRIMASK in env->daif. For a new QEMU, the data is
519 * transferred using the vmstate_m_faultmask_primask subsection.
520 */
521 if (val & CPSR_F) {
522 env->v7m.faultmask[M_REG_NS] = 1;
523 }
524 if (val & CPSR_I) {
525 env->v7m.primask[M_REG_NS] = 1;
526 }
527 val = newval;
528 }
529 /* Ignore the low bits, they are handled by vmstate_m. */
530 xpsr_write(env, val, ~XPSR_EXCP);
531 return 0;
532 }
533
534 env->aarch64 = ((val & PSTATE_nRW) == 0);
535
536 if (is_a64(env)) {
537 pstate_write(env, val);
538 return 0;
539 }
540
541 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
542 return 0;
543 }
544
545 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
546 VMStateField *field, QJSON *vmdesc)
547 {
548 ARMCPU *cpu = opaque;
549 CPUARMState *env = &cpu->env;
550 uint32_t val;
551
552 if (arm_feature(env, ARM_FEATURE_M)) {
553 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
554 val = xpsr_read(env) & ~XPSR_EXCP;
555 } else if (is_a64(env)) {
556 val = pstate_read(env);
557 } else {
558 val = cpsr_read(env);
559 }
560
561 qemu_put_be32(f, val);
562 return 0;
563 }
564
565 static const VMStateInfo vmstate_cpsr = {
566 .name = "cpsr",
567 .get = get_cpsr,
568 .put = put_cpsr,
569 };
570
571 static int get_power(QEMUFile *f, void *opaque, size_t size,
572 VMStateField *field)
573 {
574 ARMCPU *cpu = opaque;
575 bool powered_off = qemu_get_byte(f);
576 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
577 return 0;
578 }
579
580 static int put_power(QEMUFile *f, void *opaque, size_t size,
581 VMStateField *field, QJSON *vmdesc)
582 {
583 ARMCPU *cpu = opaque;
584
585 /* Migration should never happen while we transition power states */
586
587 if (cpu->power_state == PSCI_ON ||
588 cpu->power_state == PSCI_OFF) {
589 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
590 qemu_put_byte(f, powered_off);
591 return 0;
592 } else {
593 return 1;
594 }
595 }
596
597 static const VMStateInfo vmstate_powered_off = {
598 .name = "powered_off",
599 .get = get_power,
600 .put = put_power,
601 };
602
603 static int cpu_pre_save(void *opaque)
604 {
605 ARMCPU *cpu = opaque;
606
607 if (kvm_enabled()) {
608 if (!write_kvmstate_to_list(cpu)) {
609 /* This should never fail */
610 abort();
611 }
612 } else {
613 if (!write_cpustate_to_list(cpu)) {
614 /* This should never fail. */
615 abort();
616 }
617 }
618
619 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
620 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
621 cpu->cpreg_array_len * sizeof(uint64_t));
622 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
623 cpu->cpreg_array_len * sizeof(uint64_t));
624
625 return 0;
626 }
627
628 static int cpu_post_load(void *opaque, int version_id)
629 {
630 ARMCPU *cpu = opaque;
631 int i, v;
632
633 /* Update the values list from the incoming migration data.
634 * Anything in the incoming data which we don't know about is
635 * a migration failure; anything we know about but the incoming
636 * data doesn't specify retains its current (reset) value.
637 * The indexes list remains untouched -- we only inspect the
638 * incoming migration index list so we can match the values array
639 * entries with the right slots in our own values array.
640 */
641
642 for (i = 0, v = 0; i < cpu->cpreg_array_len
643 && v < cpu->cpreg_vmstate_array_len; i++) {
644 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
645 /* register in our list but not incoming : skip it */
646 continue;
647 }
648 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
649 /* register in their list but not ours: fail migration */
650 return -1;
651 }
652 /* matching register, copy the value over */
653 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
654 v++;
655 }
656
657 if (kvm_enabled()) {
658 if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
659 return -1;
660 }
661 /* Note that it's OK for the TCG side not to know about
662 * every register in the list; KVM is authoritative if
663 * we're using it.
664 */
665 write_list_to_cpustate(cpu);
666 } else {
667 if (!write_list_to_cpustate(cpu)) {
668 return -1;
669 }
670 }
671
672 hw_breakpoint_update_all(cpu);
673 hw_watchpoint_update_all(cpu);
674
675 return 0;
676 }
677
678 const VMStateDescription vmstate_arm_cpu = {
679 .name = "cpu",
680 .version_id = 22,
681 .minimum_version_id = 22,
682 .pre_save = cpu_pre_save,
683 .post_load = cpu_post_load,
684 .fields = (VMStateField[]) {
685 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
686 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
687 VMSTATE_UINT64(env.pc, ARMCPU),
688 {
689 .name = "cpsr",
690 .version_id = 0,
691 .size = sizeof(uint32_t),
692 .info = &vmstate_cpsr,
693 .flags = VMS_SINGLE,
694 .offset = 0,
695 },
696 VMSTATE_UINT32(env.spsr, ARMCPU),
697 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
698 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
699 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
700 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
701 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
702 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
703 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
704 /* The length-check must come before the arrays to avoid
705 * incoming data possibly overflowing the array.
706 */
707 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
708 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
709 cpreg_vmstate_array_len,
710 0, vmstate_info_uint64, uint64_t),
711 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
712 cpreg_vmstate_array_len,
713 0, vmstate_info_uint64, uint64_t),
714 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
715 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
716 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
717 VMSTATE_UINT64(env.features, ARMCPU),
718 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
719 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
720 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
721 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
722 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
723 {
724 .name = "power_state",
725 .version_id = 0,
726 .size = sizeof(bool),
727 .info = &vmstate_powered_off,
728 .flags = VMS_SINGLE,
729 .offset = 0,
730 },
731 VMSTATE_END_OF_LIST()
732 },
733 .subsections = (const VMStateDescription*[]) {
734 &vmstate_vfp,
735 &vmstate_iwmmxt,
736 &vmstate_m,
737 &vmstate_thumb2ee,
738 /* pmsav7_rnr must come before pmsav7 so that we have the
739 * region number before we test it in the VMSTATE_VALIDATE
740 * in vmstate_pmsav7.
741 */
742 &vmstate_pmsav7_rnr,
743 &vmstate_pmsav7,
744 &vmstate_pmsav8,
745 &vmstate_m_security,
746 #ifdef TARGET_AARCH64
747 &vmstate_sve,
748 #endif
749 &vmstate_serror,
750 NULL
751 }
752 };