]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/machine.c
target/arm: Implement v8M MSPLIM and PSPLIM registers
[mirror_qemu.git] / target / arm / machine.c
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "hw/hw.h"
5 #include "hw/boards.h"
6 #include "qemu/error-report.h"
7 #include "sysemu/kvm.h"
8 #include "kvm_arm.h"
9 #include "internals.h"
10 #include "migration/cpu.h"
11
12 static bool vfp_needed(void *opaque)
13 {
14 ARMCPU *cpu = opaque;
15 CPUARMState *env = &cpu->env;
16
17 return arm_feature(env, ARM_FEATURE_VFP);
18 }
19
20 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
21 VMStateField *field)
22 {
23 ARMCPU *cpu = opaque;
24 CPUARMState *env = &cpu->env;
25 uint32_t val = qemu_get_be32(f);
26
27 vfp_set_fpscr(env, val);
28 return 0;
29 }
30
31 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
32 VMStateField *field, QJSON *vmdesc)
33 {
34 ARMCPU *cpu = opaque;
35 CPUARMState *env = &cpu->env;
36
37 qemu_put_be32(f, vfp_get_fpscr(env));
38 return 0;
39 }
40
41 static const VMStateInfo vmstate_fpscr = {
42 .name = "fpscr",
43 .get = get_fpscr,
44 .put = put_fpscr,
45 };
46
47 static const VMStateDescription vmstate_vfp = {
48 .name = "cpu/vfp",
49 .version_id = 3,
50 .minimum_version_id = 3,
51 .needed = vfp_needed,
52 .fields = (VMStateField[]) {
53 /* For compatibility, store Qn out of Zn here. */
54 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
55 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
56 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
57 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
58 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
59 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
60 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
61 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
62 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
63 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
64 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
65 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
66 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
67 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
68 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
69 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
70 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
71 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
72 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
73 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
74 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
75 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
76 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
77 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
78 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
79 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
80 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
81 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
82 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
83 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
84 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
85 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
86
87 /* The xregs array is a little awkward because element 1 (FPSCR)
88 * requires a specific accessor, so we have to split it up in
89 * the vmstate:
90 */
91 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
92 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
93 {
94 .name = "fpscr",
95 .version_id = 0,
96 .size = sizeof(uint32_t),
97 .info = &vmstate_fpscr,
98 .flags = VMS_SINGLE,
99 .offset = 0,
100 },
101 VMSTATE_END_OF_LIST()
102 }
103 };
104
105 static bool iwmmxt_needed(void *opaque)
106 {
107 ARMCPU *cpu = opaque;
108 CPUARMState *env = &cpu->env;
109
110 return arm_feature(env, ARM_FEATURE_IWMMXT);
111 }
112
113 static const VMStateDescription vmstate_iwmmxt = {
114 .name = "cpu/iwmmxt",
115 .version_id = 1,
116 .minimum_version_id = 1,
117 .needed = iwmmxt_needed,
118 .fields = (VMStateField[]) {
119 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
120 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
121 VMSTATE_END_OF_LIST()
122 }
123 };
124
125 #ifdef TARGET_AARCH64
126 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
127 * and ARMPredicateReg is actively empty. This triggers errors
128 * in the expansion of the VMSTATE macros.
129 */
130
131 static bool sve_needed(void *opaque)
132 {
133 ARMCPU *cpu = opaque;
134 CPUARMState *env = &cpu->env;
135
136 return arm_feature(env, ARM_FEATURE_SVE);
137 }
138
139 /* The first two words of each Zreg is stored in VFP state. */
140 static const VMStateDescription vmstate_zreg_hi_reg = {
141 .name = "cpu/sve/zreg_hi",
142 .version_id = 1,
143 .minimum_version_id = 1,
144 .fields = (VMStateField[]) {
145 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
146 VMSTATE_END_OF_LIST()
147 }
148 };
149
150 static const VMStateDescription vmstate_preg_reg = {
151 .name = "cpu/sve/preg",
152 .version_id = 1,
153 .minimum_version_id = 1,
154 .fields = (VMStateField[]) {
155 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
156 VMSTATE_END_OF_LIST()
157 }
158 };
159
160 static const VMStateDescription vmstate_sve = {
161 .name = "cpu/sve",
162 .version_id = 1,
163 .minimum_version_id = 1,
164 .needed = sve_needed,
165 .fields = (VMStateField[]) {
166 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
167 vmstate_zreg_hi_reg, ARMVectorReg),
168 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
169 vmstate_preg_reg, ARMPredicateReg),
170 VMSTATE_END_OF_LIST()
171 }
172 };
173 #endif /* AARCH64 */
174
175 static bool m_needed(void *opaque)
176 {
177 ARMCPU *cpu = opaque;
178 CPUARMState *env = &cpu->env;
179
180 return arm_feature(env, ARM_FEATURE_M);
181 }
182
183 static const VMStateDescription vmstate_m_faultmask_primask = {
184 .name = "cpu/m/faultmask-primask",
185 .version_id = 1,
186 .minimum_version_id = 1,
187 .fields = (VMStateField[]) {
188 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
189 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
190 VMSTATE_END_OF_LIST()
191 }
192 };
193
194 /* CSSELR is in a subsection because we didn't implement it previously.
195 * Migration from an old implementation will leave it at zero, which
196 * is OK since the only CPUs in the old implementation make the
197 * register RAZ/WI.
198 * Since there was no version of QEMU which implemented the CSSELR for
199 * just non-secure, we transfer both banks here rather than putting
200 * the secure banked version in the m-security subsection.
201 */
202 static bool csselr_vmstate_validate(void *opaque, int version_id)
203 {
204 ARMCPU *cpu = opaque;
205
206 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
207 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
208 }
209
210 static bool m_csselr_needed(void *opaque)
211 {
212 ARMCPU *cpu = opaque;
213
214 return !arm_v7m_csselr_razwi(cpu);
215 }
216
217 static const VMStateDescription vmstate_m_csselr = {
218 .name = "cpu/m/csselr",
219 .version_id = 1,
220 .minimum_version_id = 1,
221 .needed = m_csselr_needed,
222 .fields = (VMStateField[]) {
223 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
224 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
225 VMSTATE_END_OF_LIST()
226 }
227 };
228
229 static const VMStateDescription vmstate_m_scr = {
230 .name = "cpu/m/scr",
231 .version_id = 1,
232 .minimum_version_id = 1,
233 .fields = (VMStateField[]) {
234 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
235 VMSTATE_END_OF_LIST()
236 }
237 };
238
239 static const VMStateDescription vmstate_m_other_sp = {
240 .name = "cpu/m/other-sp",
241 .version_id = 1,
242 .minimum_version_id = 1,
243 .fields = (VMStateField[]) {
244 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
245 VMSTATE_END_OF_LIST()
246 }
247 };
248
249 static bool m_v8m_needed(void *opaque)
250 {
251 ARMCPU *cpu = opaque;
252 CPUARMState *env = &cpu->env;
253
254 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
255 }
256
257 static const VMStateDescription vmstate_m_v8m = {
258 .name = "cpu/m/v8m",
259 .version_id = 1,
260 .minimum_version_id = 1,
261 .needed = m_v8m_needed,
262 .fields = (VMStateField[]) {
263 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
264 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
265 VMSTATE_END_OF_LIST()
266 }
267 };
268
269 static const VMStateDescription vmstate_m = {
270 .name = "cpu/m",
271 .version_id = 4,
272 .minimum_version_id = 4,
273 .needed = m_needed,
274 .fields = (VMStateField[]) {
275 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
276 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
277 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
278 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
279 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
280 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
281 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
282 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
283 VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
284 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
285 VMSTATE_INT32(env.v7m.exception, ARMCPU),
286 VMSTATE_END_OF_LIST()
287 },
288 .subsections = (const VMStateDescription*[]) {
289 &vmstate_m_faultmask_primask,
290 &vmstate_m_csselr,
291 &vmstate_m_scr,
292 &vmstate_m_other_sp,
293 &vmstate_m_v8m,
294 NULL
295 }
296 };
297
298 static bool thumb2ee_needed(void *opaque)
299 {
300 ARMCPU *cpu = opaque;
301 CPUARMState *env = &cpu->env;
302
303 return arm_feature(env, ARM_FEATURE_THUMB2EE);
304 }
305
306 static const VMStateDescription vmstate_thumb2ee = {
307 .name = "cpu/thumb2ee",
308 .version_id = 1,
309 .minimum_version_id = 1,
310 .needed = thumb2ee_needed,
311 .fields = (VMStateField[]) {
312 VMSTATE_UINT32(env.teecr, ARMCPU),
313 VMSTATE_UINT32(env.teehbr, ARMCPU),
314 VMSTATE_END_OF_LIST()
315 }
316 };
317
318 static bool pmsav7_needed(void *opaque)
319 {
320 ARMCPU *cpu = opaque;
321 CPUARMState *env = &cpu->env;
322
323 return arm_feature(env, ARM_FEATURE_PMSA) &&
324 arm_feature(env, ARM_FEATURE_V7) &&
325 !arm_feature(env, ARM_FEATURE_V8);
326 }
327
328 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
329 {
330 ARMCPU *cpu = opaque;
331
332 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
333 }
334
335 static const VMStateDescription vmstate_pmsav7 = {
336 .name = "cpu/pmsav7",
337 .version_id = 1,
338 .minimum_version_id = 1,
339 .needed = pmsav7_needed,
340 .fields = (VMStateField[]) {
341 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
342 vmstate_info_uint32, uint32_t),
343 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
344 vmstate_info_uint32, uint32_t),
345 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
346 vmstate_info_uint32, uint32_t),
347 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
348 VMSTATE_END_OF_LIST()
349 }
350 };
351
352 static bool pmsav7_rnr_needed(void *opaque)
353 {
354 ARMCPU *cpu = opaque;
355 CPUARMState *env = &cpu->env;
356
357 /* For R profile cores pmsav7.rnr is migrated via the cpreg
358 * "RGNR" definition in helper.h. For M profile we have to
359 * migrate it separately.
360 */
361 return arm_feature(env, ARM_FEATURE_M);
362 }
363
364 static const VMStateDescription vmstate_pmsav7_rnr = {
365 .name = "cpu/pmsav7-rnr",
366 .version_id = 1,
367 .minimum_version_id = 1,
368 .needed = pmsav7_rnr_needed,
369 .fields = (VMStateField[]) {
370 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
371 VMSTATE_END_OF_LIST()
372 }
373 };
374
375 static bool pmsav8_needed(void *opaque)
376 {
377 ARMCPU *cpu = opaque;
378 CPUARMState *env = &cpu->env;
379
380 return arm_feature(env, ARM_FEATURE_PMSA) &&
381 arm_feature(env, ARM_FEATURE_V8);
382 }
383
384 static const VMStateDescription vmstate_pmsav8 = {
385 .name = "cpu/pmsav8",
386 .version_id = 1,
387 .minimum_version_id = 1,
388 .needed = pmsav8_needed,
389 .fields = (VMStateField[]) {
390 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
391 0, vmstate_info_uint32, uint32_t),
392 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
393 0, vmstate_info_uint32, uint32_t),
394 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
395 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
396 VMSTATE_END_OF_LIST()
397 }
398 };
399
400 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
401 {
402 ARMCPU *cpu = opaque;
403
404 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
405 }
406
407 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
408 {
409 ARMCPU *cpu = opaque;
410
411 return cpu->env.sau.rnr < cpu->sau_sregion;
412 }
413
414 static bool m_security_needed(void *opaque)
415 {
416 ARMCPU *cpu = opaque;
417 CPUARMState *env = &cpu->env;
418
419 return arm_feature(env, ARM_FEATURE_M_SECURITY);
420 }
421
422 static const VMStateDescription vmstate_m_security = {
423 .name = "cpu/m-security",
424 .version_id = 1,
425 .minimum_version_id = 1,
426 .needed = m_security_needed,
427 .fields = (VMStateField[]) {
428 VMSTATE_UINT32(env.v7m.secure, ARMCPU),
429 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
430 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
431 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
432 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
433 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
434 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
435 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
436 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
437 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
438 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
439 0, vmstate_info_uint32, uint32_t),
440 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
441 0, vmstate_info_uint32, uint32_t),
442 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
443 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
444 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
445 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
446 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
447 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
448 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
449 VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
450 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
451 vmstate_info_uint32, uint32_t),
452 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
453 vmstate_info_uint32, uint32_t),
454 VMSTATE_UINT32(env.sau.rnr, ARMCPU),
455 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
456 VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
457 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
458 /* AIRCR is not secure-only, but our implementation is R/O if the
459 * security extension is unimplemented, so we migrate it here.
460 */
461 VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
462 VMSTATE_END_OF_LIST()
463 }
464 };
465
466 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
467 VMStateField *field)
468 {
469 ARMCPU *cpu = opaque;
470 CPUARMState *env = &cpu->env;
471 uint32_t val = qemu_get_be32(f);
472
473 if (arm_feature(env, ARM_FEATURE_M)) {
474 if (val & XPSR_EXCP) {
475 /* This is a CPSR format value from an older QEMU. (We can tell
476 * because values transferred in XPSR format always have zero
477 * for the EXCP field, and CPSR format will always have bit 4
478 * set in CPSR_M.) Rearrange it into XPSR format. The significant
479 * differences are that the T bit is not in the same place, the
480 * primask/faultmask info may be in the CPSR I and F bits, and
481 * we do not want the mode bits.
482 * We know that this cleanup happened before v8M, so there
483 * is no complication with banked primask/faultmask.
484 */
485 uint32_t newval = val;
486
487 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
488
489 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
490 if (val & CPSR_T) {
491 newval |= XPSR_T;
492 }
493 /* If the I or F bits are set then this is a migration from
494 * an old QEMU which still stored the M profile FAULTMASK
495 * and PRIMASK in env->daif. For a new QEMU, the data is
496 * transferred using the vmstate_m_faultmask_primask subsection.
497 */
498 if (val & CPSR_F) {
499 env->v7m.faultmask[M_REG_NS] = 1;
500 }
501 if (val & CPSR_I) {
502 env->v7m.primask[M_REG_NS] = 1;
503 }
504 val = newval;
505 }
506 /* Ignore the low bits, they are handled by vmstate_m. */
507 xpsr_write(env, val, ~XPSR_EXCP);
508 return 0;
509 }
510
511 env->aarch64 = ((val & PSTATE_nRW) == 0);
512
513 if (is_a64(env)) {
514 pstate_write(env, val);
515 return 0;
516 }
517
518 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
519 return 0;
520 }
521
522 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
523 VMStateField *field, QJSON *vmdesc)
524 {
525 ARMCPU *cpu = opaque;
526 CPUARMState *env = &cpu->env;
527 uint32_t val;
528
529 if (arm_feature(env, ARM_FEATURE_M)) {
530 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
531 val = xpsr_read(env) & ~XPSR_EXCP;
532 } else if (is_a64(env)) {
533 val = pstate_read(env);
534 } else {
535 val = cpsr_read(env);
536 }
537
538 qemu_put_be32(f, val);
539 return 0;
540 }
541
542 static const VMStateInfo vmstate_cpsr = {
543 .name = "cpsr",
544 .get = get_cpsr,
545 .put = put_cpsr,
546 };
547
548 static int get_power(QEMUFile *f, void *opaque, size_t size,
549 VMStateField *field)
550 {
551 ARMCPU *cpu = opaque;
552 bool powered_off = qemu_get_byte(f);
553 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
554 return 0;
555 }
556
557 static int put_power(QEMUFile *f, void *opaque, size_t size,
558 VMStateField *field, QJSON *vmdesc)
559 {
560 ARMCPU *cpu = opaque;
561
562 /* Migration should never happen while we transition power states */
563
564 if (cpu->power_state == PSCI_ON ||
565 cpu->power_state == PSCI_OFF) {
566 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
567 qemu_put_byte(f, powered_off);
568 return 0;
569 } else {
570 return 1;
571 }
572 }
573
574 static const VMStateInfo vmstate_powered_off = {
575 .name = "powered_off",
576 .get = get_power,
577 .put = put_power,
578 };
579
580 static int cpu_pre_save(void *opaque)
581 {
582 ARMCPU *cpu = opaque;
583
584 if (kvm_enabled()) {
585 if (!write_kvmstate_to_list(cpu)) {
586 /* This should never fail */
587 abort();
588 }
589 } else {
590 if (!write_cpustate_to_list(cpu)) {
591 /* This should never fail. */
592 abort();
593 }
594 }
595
596 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
597 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
598 cpu->cpreg_array_len * sizeof(uint64_t));
599 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
600 cpu->cpreg_array_len * sizeof(uint64_t));
601
602 return 0;
603 }
604
605 static int cpu_post_load(void *opaque, int version_id)
606 {
607 ARMCPU *cpu = opaque;
608 int i, v;
609
610 /* Update the values list from the incoming migration data.
611 * Anything in the incoming data which we don't know about is
612 * a migration failure; anything we know about but the incoming
613 * data doesn't specify retains its current (reset) value.
614 * The indexes list remains untouched -- we only inspect the
615 * incoming migration index list so we can match the values array
616 * entries with the right slots in our own values array.
617 */
618
619 for (i = 0, v = 0; i < cpu->cpreg_array_len
620 && v < cpu->cpreg_vmstate_array_len; i++) {
621 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
622 /* register in our list but not incoming : skip it */
623 continue;
624 }
625 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
626 /* register in their list but not ours: fail migration */
627 return -1;
628 }
629 /* matching register, copy the value over */
630 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
631 v++;
632 }
633
634 if (kvm_enabled()) {
635 if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
636 return -1;
637 }
638 /* Note that it's OK for the TCG side not to know about
639 * every register in the list; KVM is authoritative if
640 * we're using it.
641 */
642 write_list_to_cpustate(cpu);
643 } else {
644 if (!write_list_to_cpustate(cpu)) {
645 return -1;
646 }
647 }
648
649 hw_breakpoint_update_all(cpu);
650 hw_watchpoint_update_all(cpu);
651
652 return 0;
653 }
654
655 const VMStateDescription vmstate_arm_cpu = {
656 .name = "cpu",
657 .version_id = 22,
658 .minimum_version_id = 22,
659 .pre_save = cpu_pre_save,
660 .post_load = cpu_post_load,
661 .fields = (VMStateField[]) {
662 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
663 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
664 VMSTATE_UINT64(env.pc, ARMCPU),
665 {
666 .name = "cpsr",
667 .version_id = 0,
668 .size = sizeof(uint32_t),
669 .info = &vmstate_cpsr,
670 .flags = VMS_SINGLE,
671 .offset = 0,
672 },
673 VMSTATE_UINT32(env.spsr, ARMCPU),
674 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
675 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
676 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
677 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
678 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
679 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
680 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
681 /* The length-check must come before the arrays to avoid
682 * incoming data possibly overflowing the array.
683 */
684 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
685 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
686 cpreg_vmstate_array_len,
687 0, vmstate_info_uint64, uint64_t),
688 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
689 cpreg_vmstate_array_len,
690 0, vmstate_info_uint64, uint64_t),
691 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
692 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
693 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
694 VMSTATE_UINT64(env.features, ARMCPU),
695 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
696 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
697 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
698 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
699 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
700 {
701 .name = "power_state",
702 .version_id = 0,
703 .size = sizeof(bool),
704 .info = &vmstate_powered_off,
705 .flags = VMS_SINGLE,
706 .offset = 0,
707 },
708 VMSTATE_END_OF_LIST()
709 },
710 .subsections = (const VMStateDescription*[]) {
711 &vmstate_vfp,
712 &vmstate_iwmmxt,
713 &vmstate_m,
714 &vmstate_thumb2ee,
715 /* pmsav7_rnr must come before pmsav7 so that we have the
716 * region number before we test it in the VMSTATE_VALIDATE
717 * in vmstate_pmsav7.
718 */
719 &vmstate_pmsav7_rnr,
720 &vmstate_pmsav7,
721 &vmstate_pmsav8,
722 &vmstate_m_security,
723 #ifdef TARGET_AARCH64
724 &vmstate_sve,
725 #endif
726 NULL
727 }
728 };