4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "internals.h"
12 #include "exec/helper-proto.h"
13 #include "qemu/main-loop.h"
14 #include "qemu/bitops.h"
16 #include "exec/exec-all.h"
18 #include "exec/cpu_ldst.h"
19 #include "semihosting/common-semi.h"
22 static void v7m_msr_xpsr(CPUARMState
*env
, uint32_t mask
,
23 uint32_t reg
, uint32_t val
)
25 /* Only APSR is actually writable */
27 uint32_t apsrmask
= 0;
30 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
32 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
35 xpsr_write(env
, val
, apsrmask
);
39 static uint32_t v7m_mrs_xpsr(CPUARMState
*env
, uint32_t reg
, unsigned el
)
43 if ((reg
& 1) && el
) {
44 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
47 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
48 if (arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
52 /* EPSR reads as zero */
53 return xpsr_read(env
) & mask
;
56 static uint32_t v7m_mrs_control(CPUARMState
*env
, uint32_t secure
)
58 uint32_t value
= env
->v7m
.control
[secure
];
61 /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
62 value
|= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
;
67 #ifdef CONFIG_USER_ONLY
69 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
71 uint32_t mask
= extract32(maskreg
, 8, 4);
72 uint32_t reg
= extract32(maskreg
, 0, 8);
75 case 0 ... 7: /* xPSR sub-fields */
76 v7m_msr_xpsr(env
, mask
, reg
, val
);
78 case 20: /* CONTROL */
79 /* There are no sub-fields that are actually writable from EL0. */
82 /* Unprivileged writes to other registers are ignored */
87 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
90 case 0 ... 7: /* xPSR sub-fields */
91 return v7m_mrs_xpsr(env
, reg
, 0);
92 case 20: /* CONTROL */
93 return v7m_mrs_control(env
, 0);
95 /* Unprivileged reads others as zero. */
100 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
102 /* translate.c should never generate calls here in user-only mode */
103 g_assert_not_reached();
106 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
108 /* translate.c should never generate calls here in user-only mode */
109 g_assert_not_reached();
112 void HELPER(v7m_preserve_fp_state
)(CPUARMState
*env
)
114 /* translate.c should never generate calls here in user-only mode */
115 g_assert_not_reached();
118 void HELPER(v7m_vlstm
)(CPUARMState
*env
, uint32_t fptr
)
120 /* translate.c should never generate calls here in user-only mode */
121 g_assert_not_reached();
124 void HELPER(v7m_vlldm
)(CPUARMState
*env
, uint32_t fptr
)
126 /* translate.c should never generate calls here in user-only mode */
127 g_assert_not_reached();
130 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
133 * The TT instructions can be used by unprivileged code, but in
134 * user-only emulation we don't have the MPU.
135 * Luckily since we know we are NonSecure unprivileged (and that in
136 * turn means that the A flag wasn't specified), all the bits in the
137 * register must be zero:
138 * IREGION: 0 because IRVALID is 0
139 * IRVALID: 0 because NS
143 * RW: 0 because unpriv and A flag not set
144 * R: 0 because unpriv and A flag not set
145 * SRVALID: 0 because NS
146 * MRVALID: 0 because unpriv and A flag not set
147 * SREGION: 0 becaus SRVALID is 0
148 * MREGION: 0 because MRVALID is 0
156 * What kind of stack write are we doing? This affects how exceptions
157 * generated during the stacking are treated.
159 typedef enum StackingMode
{
165 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
166 ARMMMUIdx mmu_idx
, StackingMode mode
)
168 CPUState
*cs
= CPU(cpu
);
169 CPUARMState
*env
= &cpu
->env
;
171 GetPhysAddrResult res
= {};
172 ARMMMUFaultInfo fi
= {};
173 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
177 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &res
, &fi
)) {
178 /* MPU/SAU lookup failed */
179 if (fi
.type
== ARMFault_QEMU_SFault
) {
180 if (mode
== STACK_LAZYFP
) {
181 qemu_log_mask(CPU_LOG_INT
,
182 "...SecureFault with SFSR.LSPERR "
183 "during lazy stacking\n");
184 env
->v7m
.sfsr
|= R_V7M_SFSR_LSPERR_MASK
;
186 qemu_log_mask(CPU_LOG_INT
,
187 "...SecureFault with SFSR.AUVIOL "
188 "during stacking\n");
189 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
191 env
->v7m
.sfsr
|= R_V7M_SFSR_SFARVALID_MASK
;
192 env
->v7m
.sfar
= addr
;
193 exc
= ARMV7M_EXCP_SECURE
;
196 if (mode
== STACK_LAZYFP
) {
197 qemu_log_mask(CPU_LOG_INT
,
198 "...MemManageFault with CFSR.MLSPERR\n");
199 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MLSPERR_MASK
;
201 qemu_log_mask(CPU_LOG_INT
,
202 "...MemManageFault with CFSR.MSTKERR\n");
203 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
205 exc
= ARMV7M_EXCP_MEM
;
210 address_space_stl_le(arm_addressspace(cs
, res
.f
.attrs
), res
.f
.phys_addr
,
211 value
, res
.f
.attrs
, &txres
);
212 if (txres
!= MEMTX_OK
) {
213 /* BusFault trying to write the data */
214 if (mode
== STACK_LAZYFP
) {
215 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.LSPERR\n");
216 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_LSPERR_MASK
;
218 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
219 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
221 exc
= ARMV7M_EXCP_BUS
;
229 * By pending the exception at this point we are making
230 * the IMPDEF choice "overridden exceptions pended" (see the
231 * MergeExcInfo() pseudocode). The other choice would be to not
232 * pend them now and then make a choice about which to throw away
233 * later if we have two derived exceptions.
234 * The only case when we must not pend the exception but instead
235 * throw it away is if we are doing the push of the callee registers
236 * and we've already generated a derived exception (this is indicated
237 * by the caller passing STACK_IGNFAULTS). Even in this case we will
238 * still update the fault status registers.
242 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
245 armv7m_nvic_set_pending_lazyfp(env
->nvic
, exc
, exc_secure
);
247 case STACK_IGNFAULTS
:
253 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
256 CPUState
*cs
= CPU(cpu
);
257 CPUARMState
*env
= &cpu
->env
;
259 GetPhysAddrResult res
= {};
260 ARMMMUFaultInfo fi
= {};
261 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
266 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &res
, &fi
)) {
267 /* MPU/SAU lookup failed */
268 if (fi
.type
== ARMFault_QEMU_SFault
) {
269 qemu_log_mask(CPU_LOG_INT
,
270 "...SecureFault with SFSR.AUVIOL during unstack\n");
271 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
272 env
->v7m
.sfar
= addr
;
273 exc
= ARMV7M_EXCP_SECURE
;
276 qemu_log_mask(CPU_LOG_INT
,
277 "...MemManageFault with CFSR.MUNSTKERR\n");
278 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
279 exc
= ARMV7M_EXCP_MEM
;
285 value
= address_space_ldl(arm_addressspace(cs
, res
.f
.attrs
),
286 res
.f
.phys_addr
, res
.f
.attrs
, &txres
);
287 if (txres
!= MEMTX_OK
) {
288 /* BusFault trying to read the data */
289 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
290 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
291 exc
= ARMV7M_EXCP_BUS
;
301 * By pending the exception at this point we are making
302 * the IMPDEF choice "overridden exceptions pended" (see the
303 * MergeExcInfo() pseudocode). The other choice would be to not
304 * pend them now and then make a choice about which to throw away
305 * later if we have two derived exceptions.
307 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
311 void HELPER(v7m_preserve_fp_state
)(CPUARMState
*env
)
314 * Preserve FP state (because LSPACT was set and we are about
315 * to execute an FP instruction). This corresponds to the
316 * PreserveFPState() pseudocode.
317 * We may throw an exception if the stacking fails.
319 ARMCPU
*cpu
= env_archcpu(env
);
320 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
321 bool negpri
= !(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_HFRDY_MASK
);
322 bool is_priv
= !(env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_USER_MASK
);
323 bool splimviol
= env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_SPLIMVIOL_MASK
;
324 uint32_t fpcar
= env
->v7m
.fpcar
[is_secure
];
325 bool stacked_ok
= true;
326 bool ts
= is_secure
&& (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
);
329 /* Take the iothread lock as we are going to touch the NVIC */
330 qemu_mutex_lock_iothread();
332 /* Check the background context had access to the FPU */
333 if (!v7m_cpacr_pass(env
, is_secure
, is_priv
)) {
334 armv7m_nvic_set_pending_lazyfp(env
->nvic
, ARMV7M_EXCP_USAGE
, is_secure
);
335 env
->v7m
.cfsr
[is_secure
] |= R_V7M_CFSR_NOCP_MASK
;
337 } else if (!is_secure
&& !extract32(env
->v7m
.nsacr
, 10, 1)) {
338 armv7m_nvic_set_pending_lazyfp(env
->nvic
, ARMV7M_EXCP_USAGE
, M_REG_S
);
339 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
343 if (!splimviol
&& stacked_ok
) {
344 /* We only stack if the stack limit wasn't violated */
348 mmu_idx
= arm_v7m_mmu_idx_all(env
, is_secure
, is_priv
, negpri
);
349 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
350 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
351 uint32_t faddr
= fpcar
+ 4 * i
;
352 uint32_t slo
= extract64(dn
, 0, 32);
353 uint32_t shi
= extract64(dn
, 32, 32);
356 faddr
+= 8; /* skip the slot for the FPSCR/VPR */
358 stacked_ok
= stacked_ok
&&
359 v7m_stack_write(cpu
, faddr
, slo
, mmu_idx
, STACK_LAZYFP
) &&
360 v7m_stack_write(cpu
, faddr
+ 4, shi
, mmu_idx
, STACK_LAZYFP
);
363 stacked_ok
= stacked_ok
&&
364 v7m_stack_write(cpu
, fpcar
+ 0x40,
365 vfp_get_fpscr(env
), mmu_idx
, STACK_LAZYFP
);
366 if (cpu_isar_feature(aa32_mve
, cpu
)) {
367 stacked_ok
= stacked_ok
&&
368 v7m_stack_write(cpu
, fpcar
+ 0x44,
369 env
->v7m
.vpr
, mmu_idx
, STACK_LAZYFP
);
374 * We definitely pended an exception, but it's possible that it
375 * might not be able to be taken now. If its priority permits us
376 * to take it now, then we must not update the LSPACT or FP regs,
377 * but instead jump out to take the exception immediately.
378 * If it's just pending and won't be taken until the current
379 * handler exits, then we do update LSPACT and the FP regs.
381 take_exception
= !stacked_ok
&&
382 armv7m_nvic_can_take_pending_exception(env
->nvic
);
384 qemu_mutex_unlock_iothread();
386 if (take_exception
) {
387 raise_exception_ra(env
, EXCP_LAZYFP
, 0, 1, GETPC());
390 env
->v7m
.fpccr
[is_secure
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
393 /* Clear s0 to s31 and the FPSCR and VPR */
396 for (i
= 0; i
< 32; i
+= 2) {
397 *aa32_vfp_dreg(env
, i
/ 2) = 0;
399 vfp_set_fpscr(env
, 0);
400 if (cpu_isar_feature(aa32_mve
, cpu
)) {
405 * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
411 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
412 * This may change the current stack pointer between Main and Process
413 * stack pointers if it is done for the CONTROL register for the current
416 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
420 bool old_is_psp
= v7m_using_psp(env
);
422 env
->v7m
.control
[secstate
] =
423 deposit32(env
->v7m
.control
[secstate
],
424 R_V7M_CONTROL_SPSEL_SHIFT
,
425 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
427 if (secstate
== env
->v7m
.secure
) {
428 bool new_is_psp
= v7m_using_psp(env
);
431 if (old_is_psp
!= new_is_psp
) {
432 tmp
= env
->v7m
.other_sp
;
433 env
->v7m
.other_sp
= env
->regs
[13];
440 * Write to v7M CONTROL.SPSEL bit. This may change the current
441 * stack pointer between Main and Process stack pointers.
443 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
445 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
448 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
451 * Write a new value to v7m.exception, thus transitioning into or out
452 * of Handler mode; this may result in a change of active stack pointer.
454 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
457 env
->v7m
.exception
= new_exc
;
459 new_is_psp
= v7m_using_psp(env
);
461 if (old_is_psp
!= new_is_psp
) {
462 tmp
= env
->v7m
.other_sp
;
463 env
->v7m
.other_sp
= env
->regs
[13];
468 /* Switch M profile security state between NS and S */
469 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
471 uint32_t new_ss_msp
, new_ss_psp
;
473 if (env
->v7m
.secure
== new_secstate
) {
478 * All the banked state is accessed by looking at env->v7m.secure
479 * except for the stack pointer; rearrange the SP appropriately.
481 new_ss_msp
= env
->v7m
.other_ss_msp
;
482 new_ss_psp
= env
->v7m
.other_ss_psp
;
484 if (v7m_using_psp(env
)) {
485 env
->v7m
.other_ss_psp
= env
->regs
[13];
486 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
488 env
->v7m
.other_ss_msp
= env
->regs
[13];
489 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
492 env
->v7m
.secure
= new_secstate
;
494 if (v7m_using_psp(env
)) {
495 env
->regs
[13] = new_ss_psp
;
496 env
->v7m
.other_sp
= new_ss_msp
;
498 env
->regs
[13] = new_ss_msp
;
499 env
->v7m
.other_sp
= new_ss_psp
;
503 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
507 * - if the return value is a magic value, do exception return (like BX)
508 * - otherwise bit 0 of the return value is the target security state
512 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
513 /* Covers FNC_RETURN and EXC_RETURN magic */
514 min_magic
= FNC_RETURN_MIN_MAGIC
;
516 /* EXC_RETURN magic only */
517 min_magic
= EXC_RETURN_MIN_MAGIC
;
520 if (dest
>= min_magic
) {
522 * This is an exception return magic value; put it where
523 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
524 * Note that if we ever add gen_ss_advance() singlestep support to
525 * M profile this should count as an "instruction execution complete"
526 * event (compare gen_bx_excret_final_code()).
528 env
->regs
[15] = dest
& ~1;
529 env
->thumb
= dest
& 1;
530 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
534 /* translate.c should have made BXNS UNDEF unless we're secure */
535 assert(env
->v7m
.secure
);
538 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
540 switch_v7m_security_state(env
, dest
& 1);
542 env
->regs
[15] = dest
& ~1;
543 arm_rebuild_hflags(env
);
546 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
550 * - bit 0 of the destination address is the target security state
553 /* At this point regs[15] is the address just after the BLXNS */
554 uint32_t nextinst
= env
->regs
[15] | 1;
555 uint32_t sp
= env
->regs
[13] - 8;
558 /* translate.c will have made BLXNS UNDEF unless we're secure */
559 assert(env
->v7m
.secure
);
563 * Target is Secure, so this is just a normal BLX,
564 * except that the low bit doesn't indicate Thumb/not.
566 env
->regs
[14] = nextinst
;
568 env
->regs
[15] = dest
& ~1;
572 /* Target is non-secure: first push a stack frame */
573 if (!QEMU_IS_ALIGNED(sp
, 8)) {
574 qemu_log_mask(LOG_GUEST_ERROR
,
575 "BLXNS with misaligned SP is UNPREDICTABLE\n");
578 if (sp
< v7m_sp_limit(env
)) {
579 raise_exception(env
, EXCP_STKOF
, 0, 1);
582 saved_psr
= env
->v7m
.exception
;
583 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
584 saved_psr
|= XPSR_SFPA
;
587 /* Note that these stores can throw exceptions on MPU faults */
588 cpu_stl_data_ra(env
, sp
, nextinst
, GETPC());
589 cpu_stl_data_ra(env
, sp
+ 4, saved_psr
, GETPC());
592 env
->regs
[14] = 0xfeffffff;
593 if (arm_v7m_is_handler_mode(env
)) {
595 * Write a dummy value to IPSR, to avoid leaking the current secure
596 * exception number to non-secure code. This is guaranteed not
597 * to cause write_v7m_exception() to actually change stacks.
599 write_v7m_exception(env
, 1);
601 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
602 switch_v7m_security_state(env
, 0);
604 env
->regs
[15] = dest
;
605 arm_rebuild_hflags(env
);
608 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
612 * Return a pointer to the location where we currently store the
613 * stack pointer for the requested security state and thread mode.
614 * This pointer will become invalid if the CPU state is updated
615 * such that the stack pointers are switched around (eg changing
616 * the SPSEL control bit).
617 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
618 * Unlike that pseudocode, we require the caller to pass us in the
619 * SPSEL control bit value; this is because we also use this
620 * function in handling of pushing of the callee-saves registers
621 * part of the v8M stack frame (pseudocode PushCalleeStack()),
622 * and in the tailchain codepath the SPSEL bit comes from the exception
623 * return magic LR value from the previous exception. The pseudocode
624 * opencodes the stack-selection in PushCalleeStack(), but we prefer
625 * to make this utility function generic enough to do the job.
627 bool want_psp
= threadmode
&& spsel
;
629 if (secure
== env
->v7m
.secure
) {
630 if (want_psp
== v7m_using_psp(env
)) {
631 return &env
->regs
[13];
633 return &env
->v7m
.other_sp
;
637 return &env
->v7m
.other_ss_psp
;
639 return &env
->v7m
.other_ss_msp
;
644 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
647 CPUState
*cs
= CPU(cpu
);
648 CPUARMState
*env
= &cpu
->env
;
650 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
651 uint32_t vector_entry
;
652 MemTxAttrs attrs
= {};
656 qemu_log_mask(CPU_LOG_INT
,
657 "...loading from element %d of %s vector table at 0x%x\n",
658 exc
, targets_secure
? "secure" : "non-secure", addr
);
660 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
663 * We don't do a get_phys_addr() here because the rules for vector
664 * loads are special: they always use the default memory map, and
665 * the default memory map permits reads from all addresses.
666 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
667 * that we want this special case which would always say "yes",
668 * we just do the SAU lookup here followed by a direct physical load.
670 attrs
.secure
= targets_secure
;
673 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
674 V8M_SAttributes sattrs
= {};
676 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
677 targets_secure
, &sattrs
);
679 attrs
.secure
= false;
680 } else if (!targets_secure
) {
682 * NS access to S memory: the underlying exception which we escalate
683 * to HardFault is SecureFault, which always targets Secure.
690 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
692 if (result
!= MEMTX_OK
) {
694 * Underlying exception is BusFault: its target security state
695 * depends on BFHFNMINS.
697 exc_secure
= !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
700 *pvec
= vector_entry
;
701 qemu_log_mask(CPU_LOG_INT
, "...loaded new PC 0x%x\n", *pvec
);
706 * All vector table fetch fails are reported as HardFault, with
707 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
708 * technically the underlying exception is a SecureFault or BusFault
709 * that is escalated to HardFault.) This is a terminal exception,
710 * so we will either take the HardFault immediately or else enter
711 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
712 * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
713 * secure); otherwise it targets the same security state as the
714 * underlying exception.
715 * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
717 if (!(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
720 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
;
721 if (!arm_feature(env
, ARM_FEATURE_V8_1M
)) {
722 env
->v7m
.hfsr
|= R_V7M_HFSR_FORCED_MASK
;
724 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
728 static uint32_t v7m_integrity_sig(CPUARMState
*env
, uint32_t lr
)
731 * Return the integrity signature value for the callee-saves
732 * stack frame section. @lr is the exception return payload/LR value
733 * whose FType bit forms bit 0 of the signature if FP is present.
735 uint32_t sig
= 0xfefa125a;
737 if (!cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))
738 || (lr
& R_V7M_EXCRET_FTYPE_MASK
)) {
744 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
748 * For v8M, push the callee-saves register part of the stack frame.
749 * Compare the v8M pseudocode PushCalleeStack().
750 * In the tailchaining case this may not be the current stack.
752 CPUARMState
*env
= &cpu
->env
;
753 uint32_t *frame_sp_p
;
760 StackingMode smode
= ignore_faults
? STACK_IGNFAULTS
: STACK_NORMAL
;
763 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
764 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
767 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
768 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
769 lr
& R_V7M_EXCRET_SPSEL_MASK
);
770 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
772 limit
= env
->v7m
.psplim
[M_REG_S
];
774 limit
= env
->v7m
.msplim
[M_REG_S
];
777 mmu_idx
= arm_mmu_idx(env
);
778 frame_sp_p
= &env
->regs
[13];
779 limit
= v7m_sp_limit(env
);
782 frameptr
= *frame_sp_p
- 0x28;
783 if (frameptr
< limit
) {
785 * Stack limit failure: set SP to the limit value, and generate
786 * STKOF UsageFault. Stack pushes below the limit must not be
787 * performed. It is IMPDEF whether pushes above the limit are
788 * performed; we choose not to.
790 qemu_log_mask(CPU_LOG_INT
,
791 "...STKOF during callee-saves register stacking\n");
792 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
793 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
800 * Write as much of the stack frame as we can. A write failure may
801 * cause us to pend a derived exception.
803 sig
= v7m_integrity_sig(env
, lr
);
805 v7m_stack_write(cpu
, frameptr
, sig
, mmu_idx
, smode
) &&
806 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
, smode
) &&
807 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
, smode
) &&
808 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
, smode
) &&
809 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
, smode
) &&
810 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
, smode
) &&
811 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
, smode
) &&
812 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
, smode
) &&
813 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
, smode
);
815 /* Update SP regardless of whether any of the stack accesses failed. */
816 *frame_sp_p
= frameptr
;
821 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
822 bool ignore_stackfaults
)
825 * Do the "take the exception" parts of exception entry,
826 * but not the pushing of state to the stack. This is
827 * similar to the pseudocode ExceptionTaken() function.
829 CPUARMState
*env
= &cpu
->env
;
833 bool push_failed
= false;
835 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
836 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
837 targets_secure
? "secure" : "nonsecure", exc
);
840 /* Sanitize LR FType and PREFIX bits */
841 if (!cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
842 lr
|= R_V7M_EXCRET_FTYPE_MASK
;
844 lr
= deposit32(lr
, 24, 8, 0xff);
847 if (arm_feature(env
, ARM_FEATURE_V8
)) {
848 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
849 (lr
& R_V7M_EXCRET_S_MASK
)) {
851 * The background code (the owner of the registers in the
852 * exception frame) is Secure. This means it may either already
853 * have or now needs to push callee-saves registers.
855 if (targets_secure
) {
856 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
858 * We took an exception from Secure to NonSecure
859 * (which means the callee-saved registers got stacked)
860 * and are now tailchaining to a Secure exception.
861 * Clear DCRS so eventual return from this Secure
862 * exception unstacks the callee-saved registers.
864 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
868 * We're going to a non-secure exception; push the
869 * callee-saves registers to the stack now, if they're
872 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
873 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
874 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
877 lr
|= R_V7M_EXCRET_DCRS_MASK
;
881 lr
&= ~R_V7M_EXCRET_ES_MASK
;
882 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
883 lr
|= R_V7M_EXCRET_ES_MASK
;
885 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
886 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
887 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
891 * Clear registers if necessary to prevent non-secure exception
892 * code being able to see register values from secure code.
893 * Where register values become architecturally UNKNOWN we leave
894 * them with their previous values. v8.1M is tighter than v8.0M
895 * here and always zeroes the caller-saved registers regardless
896 * of the security state the exception is targeting.
898 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
899 if (!targets_secure
|| arm_feature(env
, ARM_FEATURE_V8_1M
)) {
901 * Always clear the caller-saved registers (they have been
902 * pushed to the stack earlier in v7m_push_stack()).
903 * Clear callee-saved registers if the background code is
904 * Secure (in which case these regs were saved in
905 * v7m_push_callee_stack()).
909 * r4..r11 are callee-saves, zero only if background
910 * state was Secure (EXCRET.S == 1) and exception
911 * targets Non-secure state
913 bool zero_callee_saves
= !targets_secure
&&
914 (lr
& R_V7M_EXCRET_S_MASK
);
916 for (i
= 0; i
< 13; i
++) {
917 if (i
< 4 || i
> 11 || zero_callee_saves
) {
922 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
927 if (push_failed
&& !ignore_stackfaults
) {
929 * Derived exception on callee-saves register stacking:
930 * we might now want to take a different exception which
931 * targets a different security state, so try again from the top.
933 qemu_log_mask(CPU_LOG_INT
,
934 "...derived exception on callee-saves register stacking");
935 v7m_exception_taken(cpu
, lr
, true, true);
939 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
940 /* Vector load failed: derived exception */
941 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
942 v7m_exception_taken(cpu
, lr
, true, true);
947 * Now we've done everything that might cause a derived exception
948 * we can go ahead and activate whichever exception we're going to
949 * take (which might now be the derived exception).
951 armv7m_nvic_acknowledge_irq(env
->nvic
);
953 /* Switch to target security state -- must do this before writing SPSEL */
954 switch_v7m_security_state(env
, targets_secure
);
955 write_v7m_control_spsel(env
, 0);
956 arm_clear_exclusive(env
);
957 /* Clear SFPA and FPCA (has no effect if no FPU) */
958 env
->v7m
.control
[M_REG_S
] &=
959 ~(R_V7M_CONTROL_FPCA_MASK
| R_V7M_CONTROL_SFPA_MASK
);
961 env
->condexec_bits
= 0;
963 env
->regs
[15] = addr
& 0xfffffffe;
964 env
->thumb
= addr
& 1;
965 arm_rebuild_hflags(env
);
968 static void v7m_update_fpccr(CPUARMState
*env
, uint32_t frameptr
,
972 * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
973 * that we will need later in order to do lazy FP reg stacking.
975 bool is_secure
= env
->v7m
.secure
;
976 void *nvic
= env
->nvic
;
978 * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
979 * are banked and we want to update the bit in the bank for the
980 * current security state; and in one case we want to specifically
981 * update the NS banked version of a bit even if we are secure.
983 uint32_t *fpccr_s
= &env
->v7m
.fpccr
[M_REG_S
];
984 uint32_t *fpccr_ns
= &env
->v7m
.fpccr
[M_REG_NS
];
985 uint32_t *fpccr
= &env
->v7m
.fpccr
[is_secure
];
986 bool hfrdy
, bfrdy
, mmrdy
, ns_ufrdy
, s_ufrdy
, sfrdy
, monrdy
;
988 env
->v7m
.fpcar
[is_secure
] = frameptr
& ~0x7;
990 if (apply_splim
&& arm_feature(env
, ARM_FEATURE_V8
)) {
992 uint32_t splim
= v7m_sp_limit(env
);
993 bool ign
= armv7m_nvic_neg_prio_requested(nvic
, is_secure
) &&
994 (env
->v7m
.ccr
[is_secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
);
996 splimviol
= !ign
&& frameptr
< splim
;
997 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, SPLIMVIOL
, splimviol
);
1000 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, LSPACT
, 1);
1002 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, S
, is_secure
);
1004 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, USER
, arm_current_el(env
) == 0);
1006 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, THREAD
,
1007 !arm_v7m_is_handler_mode(env
));
1009 hfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_HARD
, false);
1010 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, HFRDY
, hfrdy
);
1012 bfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_BUS
, false);
1013 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, BFRDY
, bfrdy
);
1015 mmrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_MEM
, is_secure
);
1016 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, MMRDY
, mmrdy
);
1018 ns_ufrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_USAGE
, false);
1019 *fpccr_ns
= FIELD_DP32(*fpccr_ns
, V7M_FPCCR
, UFRDY
, ns_ufrdy
);
1021 monrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_DEBUG
, false);
1022 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, MONRDY
, monrdy
);
1024 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1025 s_ufrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_USAGE
, true);
1026 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, UFRDY
, s_ufrdy
);
1028 sfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_SECURE
, false);
1029 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, SFRDY
, sfrdy
);
1033 void HELPER(v7m_vlstm
)(CPUARMState
*env
, uint32_t fptr
)
1035 /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1036 ARMCPU
*cpu
= env_archcpu(env
);
1037 bool s
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
1038 bool lspact
= env
->v7m
.fpccr
[s
] & R_V7M_FPCCR_LSPACT_MASK
;
1039 uintptr_t ra
= GETPC();
1041 assert(env
->v7m
.secure
);
1043 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
1047 /* Check access to the coprocessor is permitted */
1048 if (!v7m_cpacr_pass(env
, true, arm_current_el(env
) != 0)) {
1049 raise_exception_ra(env
, EXCP_NOCP
, 0, 1, GETPC());
1053 /* LSPACT should not be active when there is active FP state */
1054 raise_exception_ra(env
, EXCP_LSERR
, 0, 1, GETPC());
1058 raise_exception_ra(env
, EXCP_UNALIGNED
, 0, 1, GETPC());
1062 * Note that we do not use v7m_stack_write() here, because the
1063 * accesses should not set the FSR bits for stacking errors if they
1064 * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1065 * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1068 if (!(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPEN_MASK
)) {
1069 bool ts
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
;
1072 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
1073 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
1074 uint32_t faddr
= fptr
+ 4 * i
;
1075 uint32_t slo
= extract64(dn
, 0, 32);
1076 uint32_t shi
= extract64(dn
, 32, 32);
1079 faddr
+= 8; /* skip the slot for the FPSCR */
1081 cpu_stl_data_ra(env
, faddr
, slo
, ra
);
1082 cpu_stl_data_ra(env
, faddr
+ 4, shi
, ra
);
1084 cpu_stl_data_ra(env
, fptr
+ 0x40, vfp_get_fpscr(env
), ra
);
1085 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1086 cpu_stl_data_ra(env
, fptr
+ 0x44, env
->v7m
.vpr
, ra
);
1090 * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1091 * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1094 for (i
= 0; i
< 32; i
+= 2) {
1095 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1097 vfp_set_fpscr(env
, 0);
1098 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1103 v7m_update_fpccr(env
, fptr
, false);
1106 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
1109 void HELPER(v7m_vlldm
)(CPUARMState
*env
, uint32_t fptr
)
1111 ARMCPU
*cpu
= env_archcpu(env
);
1112 uintptr_t ra
= GETPC();
1114 /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1115 assert(env
->v7m
.secure
);
1117 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
1121 /* Check access to the coprocessor is permitted */
1122 if (!v7m_cpacr_pass(env
, true, arm_current_el(env
) != 0)) {
1123 raise_exception_ra(env
, EXCP_NOCP
, 0, 1, GETPC());
1126 if (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
) {
1127 /* State in FP is still valid */
1128 env
->v7m
.fpccr
[M_REG_S
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
1130 bool ts
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
;
1135 raise_exception_ra(env
, EXCP_UNALIGNED
, 0, 1, GETPC());
1138 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
1141 uint32_t faddr
= fptr
+ 4 * i
;
1144 faddr
+= 8; /* skip the slot for the FPSCR and VPR */
1147 slo
= cpu_ldl_data_ra(env
, faddr
, ra
);
1148 shi
= cpu_ldl_data_ra(env
, faddr
+ 4, ra
);
1150 dn
= (uint64_t) shi
<< 32 | slo
;
1151 *aa32_vfp_dreg(env
, i
/ 2) = dn
;
1153 fpscr
= cpu_ldl_data_ra(env
, fptr
+ 0x40, ra
);
1154 vfp_set_fpscr(env
, fpscr
);
1155 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1156 env
->v7m
.vpr
= cpu_ldl_data_ra(env
, fptr
+ 0x44, ra
);
1160 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
1163 static bool v7m_push_stack(ARMCPU
*cpu
)
1166 * Do the "set up stack frame" part of exception entry,
1167 * similar to pseudocode PushStack().
1168 * Return true if we generate a derived exception (and so
1169 * should ignore further stack faults trying to process
1170 * that derived exception.)
1172 bool stacked_ok
= true, limitviol
= false;
1173 CPUARMState
*env
= &cpu
->env
;
1174 uint32_t xpsr
= xpsr_read(env
);
1175 uint32_t frameptr
= env
->regs
[13];
1176 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
1178 bool nsacr_cp10
= extract32(env
->v7m
.nsacr
, 10, 1);
1180 if ((env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) &&
1181 (env
->v7m
.secure
|| nsacr_cp10
)) {
1182 if (env
->v7m
.secure
&&
1183 env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
) {
1192 /* Align stack pointer if the guest wants that */
1193 if ((frameptr
& 4) &&
1194 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
1196 xpsr
|= XPSR_SPREALIGN
;
1200 if (env
->v7m
.secure
&&
1201 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
1205 frameptr
-= framesize
;
1207 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1208 uint32_t limit
= v7m_sp_limit(env
);
1210 if (frameptr
< limit
) {
1212 * Stack limit failure: set SP to the limit value, and generate
1213 * STKOF UsageFault. Stack pushes below the limit must not be
1214 * performed. It is IMPDEF whether pushes above the limit are
1215 * performed; we choose not to.
1217 qemu_log_mask(CPU_LOG_INT
,
1218 "...STKOF during stacking\n");
1219 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
1220 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1222 env
->regs
[13] = limit
;
1224 * We won't try to perform any further memory accesses but
1225 * we must continue through the following code to check for
1226 * permission faults during FPU state preservation, and we
1227 * must update FPCCR if lazy stacking is enabled.
1235 * Write as much of the stack frame as we can. If we fail a stack
1236 * write this will result in a derived exception being pended
1237 * (which may be taken in preference to the one we started with
1238 * if it has higher priority).
1240 stacked_ok
= stacked_ok
&&
1241 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, STACK_NORMAL
) &&
1242 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1],
1243 mmu_idx
, STACK_NORMAL
) &&
1244 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2],
1245 mmu_idx
, STACK_NORMAL
) &&
1246 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3],
1247 mmu_idx
, STACK_NORMAL
) &&
1248 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12],
1249 mmu_idx
, STACK_NORMAL
) &&
1250 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14],
1251 mmu_idx
, STACK_NORMAL
) &&
1252 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15],
1253 mmu_idx
, STACK_NORMAL
) &&
1254 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, STACK_NORMAL
);
1256 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) {
1257 /* FPU is active, try to save its registers */
1258 bool fpccr_s
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
1259 bool lspact
= env
->v7m
.fpccr
[fpccr_s
] & R_V7M_FPCCR_LSPACT_MASK
;
1261 if (lspact
&& arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1262 qemu_log_mask(CPU_LOG_INT
,
1263 "...SecureFault because LSPACT and FPCA both set\n");
1264 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
1265 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1266 } else if (!env
->v7m
.secure
&& !nsacr_cp10
) {
1267 qemu_log_mask(CPU_LOG_INT
,
1268 "...Secure UsageFault with CFSR.NOCP because "
1269 "NSACR.CP10 prevents stacking FP regs\n");
1270 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, M_REG_S
);
1271 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
1273 if (!(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPEN_MASK
)) {
1274 /* Lazy stacking disabled, save registers now */
1276 bool cpacr_pass
= v7m_cpacr_pass(env
, env
->v7m
.secure
,
1277 arm_current_el(env
) != 0);
1279 if (stacked_ok
&& !cpacr_pass
) {
1281 * Take UsageFault if CPACR forbids access. The pseudocode
1282 * here does a full CheckCPEnabled() but we know the NSACR
1283 * check can never fail as we have already handled that.
1285 qemu_log_mask(CPU_LOG_INT
,
1286 "...UsageFault with CFSR.NOCP because "
1287 "CPACR.CP10 prevents stacking FP regs\n");
1288 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1290 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
1294 for (i
= 0; i
< ((framesize
== 0xa8) ? 32 : 16); i
+= 2) {
1295 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
1296 uint32_t faddr
= frameptr
+ 0x20 + 4 * i
;
1297 uint32_t slo
= extract64(dn
, 0, 32);
1298 uint32_t shi
= extract64(dn
, 32, 32);
1301 faddr
+= 8; /* skip the slot for the FPSCR and VPR */
1303 stacked_ok
= stacked_ok
&&
1304 v7m_stack_write(cpu
, faddr
, slo
,
1305 mmu_idx
, STACK_NORMAL
) &&
1306 v7m_stack_write(cpu
, faddr
+ 4, shi
,
1307 mmu_idx
, STACK_NORMAL
);
1309 stacked_ok
= stacked_ok
&&
1310 v7m_stack_write(cpu
, frameptr
+ 0x60,
1311 vfp_get_fpscr(env
), mmu_idx
, STACK_NORMAL
);
1312 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1313 stacked_ok
= stacked_ok
&&
1314 v7m_stack_write(cpu
, frameptr
+ 0x64,
1315 env
->v7m
.vpr
, mmu_idx
, STACK_NORMAL
);
1318 for (i
= 0; i
< ((framesize
== 0xa8) ? 32 : 16); i
+= 2) {
1319 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1321 vfp_set_fpscr(env
, 0);
1322 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1327 /* Lazy stacking enabled, save necessary info to stack later */
1328 v7m_update_fpccr(env
, frameptr
+ 0x20, true);
1334 * If we broke a stack limit then SP was already updated earlier;
1335 * otherwise we update SP regardless of whether any of the stack
1336 * accesses failed or we took some other kind of fault.
1339 env
->regs
[13] = frameptr
;
1345 static void do_v7m_exception_exit(ARMCPU
*cpu
)
1347 CPUARMState
*env
= &cpu
->env
;
1349 uint32_t xpsr
, xpsr_mask
;
1350 bool ufault
= false;
1351 bool sfault
= false;
1352 bool return_to_sp_process
;
1353 bool return_to_handler
;
1354 bool rettobase
= false;
1355 bool exc_secure
= false;
1356 bool return_to_secure
;
1358 bool restore_s16_s31
= false;
1361 * If we're not in Handler mode then jumps to magic exception-exit
1362 * addresses don't have magic behaviour. However for the v8M
1363 * security extensions the magic secure-function-return has to
1364 * work in thread mode too, so to avoid doing an extra check in
1365 * the generated code we allow exception-exit magic to also cause the
1366 * internal exception and bring us here in thread mode. Correct code
1367 * will never try to do this (the following insn fetch will always
1368 * fault) so we the overhead of having taken an unnecessary exception
1371 if (!arm_v7m_is_handler_mode(env
)) {
1376 * In the spec pseudocode ExceptionReturn() is called directly
1377 * from BXWritePC() and gets the full target PC value including
1378 * bit zero. In QEMU's implementation we treat it as a normal
1379 * jump-to-register (which is then caught later on), and so split
1380 * the target value up between env->regs[15] and env->thumb in
1381 * gen_bx(). Reconstitute it.
1383 excret
= env
->regs
[15];
1388 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
1389 " previous exception %d\n",
1390 excret
, env
->v7m
.exception
);
1392 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
1393 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
1394 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
1398 ftype
= excret
& R_V7M_EXCRET_FTYPE_MASK
;
1400 if (!ftype
&& !cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1401 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero FTYPE in exception "
1402 "exit PC value 0x%" PRIx32
" is UNPREDICTABLE "
1403 "if FPU not present\n",
1408 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1410 * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1411 * we pick which FAULTMASK to clear.
1413 if (!env
->v7m
.secure
&&
1414 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
1415 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
1417 /* For all other purposes, treat ES as 0 (R_HXSR) */
1418 excret
&= ~R_V7M_EXCRET_ES_MASK
;
1420 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
1423 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
1425 * Auto-clear FAULTMASK on return from other than NMI.
1426 * If the security extension is implemented then this only
1427 * happens if the raw execution priority is >= 0; the
1428 * value of the ES bit in the exception return value indicates
1429 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1431 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1432 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
1433 env
->v7m
.faultmask
[exc_secure
] = 0;
1436 env
->v7m
.faultmask
[M_REG_NS
] = 0;
1440 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
1443 /* attempt to exit an exception that isn't active */
1447 /* still an irq active now */
1451 * We returned to base exception level, no nesting.
1452 * (In the pseudocode this is written using "NestedActivation != 1"
1453 * where we have 'rettobase == false'.)
1458 g_assert_not_reached();
1461 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
1462 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
1463 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
1464 (excret
& R_V7M_EXCRET_S_MASK
);
1466 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1467 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1469 * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1470 * we choose to take the UsageFault.
1472 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
1473 (excret
& R_V7M_EXCRET_ES_MASK
) ||
1474 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
1478 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
1482 /* For v7M we only recognize certain combinations of the low bits */
1483 switch (excret
& 0xf) {
1484 case 1: /* Return to Handler */
1486 case 13: /* Return to Thread using Process stack */
1487 case 9: /* Return to Thread using Main stack */
1489 * We only need to check NONBASETHRDENA for v7M, because in
1490 * v8M this bit does not exist (it is RES1).
1493 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
1494 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
1504 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1505 * Handler mode (and will be until we write the new XPSR.Interrupt
1506 * field) this does not switch around the current stack pointer.
1507 * We must do this before we do any kind of tailchaining, including
1508 * for the derived exceptions on integrity check failures, or we will
1509 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1511 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
1514 * Clear scratch FP values left in caller saved registers; this
1515 * must happen before any kind of tail chaining.
1517 if ((env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_CLRONRET_MASK
) &&
1518 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
)) {
1519 if (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
) {
1520 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
1521 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1522 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
1523 "stackframe: error during lazy state deactivation\n");
1524 v7m_exception_taken(cpu
, excret
, true, false);
1527 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
1528 /* v8.1M adds this NOCP check */
1529 bool nsacr_pass
= exc_secure
||
1530 extract32(env
->v7m
.nsacr
, 10, 1);
1531 bool cpacr_pass
= v7m_cpacr_pass(env
, exc_secure
, true);
1533 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, true);
1534 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
1535 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1536 "stackframe: NSACR prevents clearing FPU registers\n");
1537 v7m_exception_taken(cpu
, excret
, true, false);
1539 } else if (!cpacr_pass
) {
1540 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1542 env
->v7m
.cfsr
[exc_secure
] |= R_V7M_CFSR_NOCP_MASK
;
1543 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1544 "stackframe: CPACR prevents clearing FPU registers\n");
1545 v7m_exception_taken(cpu
, excret
, true, false);
1549 /* Clear s0..s15, FPSCR and VPR */
1552 for (i
= 0; i
< 16; i
+= 2) {
1553 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1555 vfp_set_fpscr(env
, 0);
1556 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1563 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
1564 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1565 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
1566 "stackframe: failed EXC_RETURN.ES validity check\n");
1567 v7m_exception_taken(cpu
, excret
, true, false);
1573 * Bad exception return: instead of popping the exception
1574 * stack, directly take a usage fault on the current stack.
1576 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1577 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
1578 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1579 "stackframe: failed exception return integrity check\n");
1580 v7m_exception_taken(cpu
, excret
, true, false);
1585 * Tailchaining: if there is currently a pending exception that
1586 * is high enough priority to preempt execution at the level we're
1587 * about to return to, then just directly take that exception now,
1588 * avoiding an unstack-and-then-stack. Note that now we have
1589 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1590 * our current execution priority is already the execution priority we are
1591 * returning to -- none of the state we would unstack or set based on
1592 * the EXCRET value affects it.
1594 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
1595 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
1596 v7m_exception_taken(cpu
, excret
, true, false);
1600 switch_v7m_security_state(env
, return_to_secure
);
1604 * The stack pointer we should be reading the exception frame from
1605 * depends on bits in the magic exception return type value (and
1606 * for v8M isn't necessarily the stack pointer we will eventually
1607 * end up resuming execution with). Get a pointer to the location
1608 * in the CPU state struct where the SP we need is currently being
1609 * stored; we will use and modify it in place.
1610 * We use this limited C variable scope so we don't accidentally
1611 * use 'frame_sp_p' after we do something that makes it invalid.
1613 bool spsel
= env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_SPSEL_MASK
;
1614 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
1618 uint32_t frameptr
= *frame_sp_p
;
1621 bool return_to_priv
= return_to_handler
||
1622 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
1624 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
1627 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
1628 arm_feature(env
, ARM_FEATURE_V8
)) {
1629 qemu_log_mask(LOG_GUEST_ERROR
,
1630 "M profile exception return with non-8-aligned SP "
1631 "for destination state is UNPREDICTABLE\n");
1634 /* Do we need to pop callee-saved registers? */
1635 if (return_to_secure
&&
1636 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
1637 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
1638 uint32_t actual_sig
;
1640 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
1642 if (pop_ok
&& v7m_integrity_sig(env
, excret
) != actual_sig
) {
1643 /* Take a SecureFault on the current stack */
1644 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
1645 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1646 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
1647 "stackframe: failed exception return integrity "
1648 "signature check\n");
1649 v7m_exception_taken(cpu
, excret
, true, false);
1654 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
1655 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
1656 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
1657 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
1658 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
1659 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
1660 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
1661 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
1668 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
1669 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
1670 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
1671 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
1672 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
1673 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
1674 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
1675 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
1679 * v7m_stack_read() pended a fault, so take it (as a tail
1680 * chained exception on the same stack frame)
1682 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
1683 v7m_exception_taken(cpu
, excret
, true, false);
1688 * Returning from an exception with a PC with bit 0 set is defined
1689 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1690 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1691 * the lsbit, and there are several RTOSes out there which incorrectly
1692 * assume the r15 in the stack frame should be a Thumb-style "lsbit
1693 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1694 * complain about the badly behaved guest.
1696 if (env
->regs
[15] & 1) {
1697 env
->regs
[15] &= ~1U;
1698 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
1699 qemu_log_mask(LOG_GUEST_ERROR
,
1700 "M profile return from interrupt with misaligned "
1701 "PC is UNPREDICTABLE on v7M\n");
1705 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1707 * For v8M we have to check whether the xPSR exception field
1708 * matches the EXCRET value for return to handler/thread
1709 * before we commit to changing the SP and xPSR.
1711 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
1712 if (return_to_handler
!= will_be_handler
) {
1714 * Take an INVPC UsageFault on the current stack.
1715 * By this point we will have switched to the security state
1716 * for the background state, so this UsageFault will target
1719 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1721 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1722 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1723 "stackframe: failed exception return integrity "
1725 v7m_exception_taken(cpu
, excret
, true, false);
1731 /* FP present and we need to handle it */
1732 if (!return_to_secure
&&
1733 (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
)) {
1734 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1735 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
1736 qemu_log_mask(CPU_LOG_INT
,
1737 "...taking SecureFault on existing stackframe: "
1738 "Secure LSPACT set but exception return is "
1739 "not to secure state\n");
1740 v7m_exception_taken(cpu
, excret
, true, false);
1744 restore_s16_s31
= return_to_secure
&&
1745 (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
);
1747 if (env
->v7m
.fpccr
[return_to_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
1748 /* State in FPU is still valid, just clear LSPACT */
1749 env
->v7m
.fpccr
[return_to_secure
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
1753 bool cpacr_pass
, nsacr_pass
;
1755 cpacr_pass
= v7m_cpacr_pass(env
, return_to_secure
,
1757 nsacr_pass
= return_to_secure
||
1758 extract32(env
->v7m
.nsacr
, 10, 1);
1761 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1763 env
->v7m
.cfsr
[return_to_secure
] |= R_V7M_CFSR_NOCP_MASK
;
1764 qemu_log_mask(CPU_LOG_INT
,
1765 "...taking UsageFault on existing "
1766 "stackframe: CPACR.CP10 prevents unstacking "
1768 v7m_exception_taken(cpu
, excret
, true, false);
1770 } else if (!nsacr_pass
) {
1771 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, true);
1772 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_INVPC_MASK
;
1773 qemu_log_mask(CPU_LOG_INT
,
1774 "...taking Secure UsageFault on existing "
1775 "stackframe: NSACR.CP10 prevents unstacking "
1777 v7m_exception_taken(cpu
, excret
, true, false);
1781 for (i
= 0; i
< (restore_s16_s31
? 32 : 16); i
+= 2) {
1784 uint32_t faddr
= frameptr
+ 0x20 + 4 * i
;
1787 faddr
+= 8; /* Skip the slot for the FPSCR and VPR */
1791 v7m_stack_read(cpu
, &slo
, faddr
, mmu_idx
) &&
1792 v7m_stack_read(cpu
, &shi
, faddr
+ 4, mmu_idx
);
1798 dn
= (uint64_t)shi
<< 32 | slo
;
1799 *aa32_vfp_dreg(env
, i
/ 2) = dn
;
1802 v7m_stack_read(cpu
, &fpscr
, frameptr
+ 0x60, mmu_idx
);
1804 vfp_set_fpscr(env
, fpscr
);
1806 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1808 v7m_stack_read(cpu
, &env
->v7m
.vpr
,
1809 frameptr
+ 0x64, mmu_idx
);
1813 * These regs are 0 if security extension present;
1814 * otherwise merely UNKNOWN. We zero always.
1816 for (i
= 0; i
< (restore_s16_s31
? 32 : 16); i
+= 2) {
1817 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1819 vfp_set_fpscr(env
, 0);
1820 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1826 env
->v7m
.control
[M_REG_S
] = FIELD_DP32(env
->v7m
.control
[M_REG_S
],
1827 V7M_CONTROL
, FPCA
, !ftype
);
1829 /* Commit to consuming the stack frame */
1833 if (restore_s16_s31
) {
1838 * Undo stack alignment (the SPREALIGN bit indicates that the original
1839 * pre-exception SP was not 8-aligned and we added a padding word to
1840 * align it, so we undo this by ORing in the bit that increases it
1841 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1842 * would work too but a logical OR is how the pseudocode specifies it.)
1844 if (xpsr
& XPSR_SPREALIGN
) {
1847 *frame_sp_p
= frameptr
;
1850 xpsr_mask
= ~(XPSR_SPREALIGN
| XPSR_SFPA
);
1851 if (!arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
1852 xpsr_mask
&= ~XPSR_GE
;
1854 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1855 xpsr_write(env
, xpsr
, xpsr_mask
);
1857 if (env
->v7m
.secure
) {
1858 bool sfpa
= xpsr
& XPSR_SFPA
;
1860 env
->v7m
.control
[M_REG_S
] = FIELD_DP32(env
->v7m
.control
[M_REG_S
],
1861 V7M_CONTROL
, SFPA
, sfpa
);
1865 * The restored xPSR exception field will be zero if we're
1866 * resuming in Thread mode. If that doesn't match what the
1867 * exception return excret specified then this is a UsageFault.
1868 * v7M requires we make this check here; v8M did it earlier.
1870 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
1872 * Take an INVPC UsageFault by pushing the stack again;
1873 * we know we're v7M so this is never a Secure UsageFault.
1875 bool ignore_stackfaults
;
1877 assert(!arm_feature(env
, ARM_FEATURE_V8
));
1878 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
1879 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1880 ignore_stackfaults
= v7m_push_stack(cpu
);
1881 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
1882 "failed exception return integrity check\n");
1883 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
1887 /* Otherwise, we have a successful exception exit. */
1888 arm_clear_exclusive(env
);
1889 arm_rebuild_hflags(env
);
1890 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
1893 static bool do_v7m_function_return(ARMCPU
*cpu
)
1896 * v8M security extensions magic function return.
1898 * (1) throw an exception (longjump)
1899 * (2) return true if we successfully handled the function return
1900 * (3) return false if we failed a consistency check and have
1901 * pended a UsageFault that needs to be taken now
1903 * At this point the magic return value is split between env->regs[15]
1904 * and env->thumb. We don't bother to reconstitute it because we don't
1905 * need it (all values are handled the same way).
1907 CPUARMState
*env
= &cpu
->env
;
1908 uint32_t newpc
, newpsr
, newpsr_exc
;
1910 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
1913 bool threadmode
, spsel
;
1916 uint32_t *frame_sp_p
;
1919 /* Pull the return address and IPSR from the Secure stack */
1920 threadmode
= !arm_v7m_is_handler_mode(env
);
1921 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
1923 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
1924 frameptr
= *frame_sp_p
;
1927 * These loads may throw an exception (for MPU faults). We want to
1928 * do them as secure, so work out what MMU index that is.
1930 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
1931 oi
= make_memop_idx(MO_LEUL
, arm_to_core_mmu_idx(mmu_idx
));
1932 newpc
= cpu_ldl_le_mmu(env
, frameptr
, oi
, 0);
1933 newpsr
= cpu_ldl_le_mmu(env
, frameptr
+ 4, oi
, 0);
1935 /* Consistency checks on new IPSR */
1936 newpsr_exc
= newpsr
& XPSR_EXCP
;
1937 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
1938 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
1939 /* Pend the fault and tell our caller to take it */
1940 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1941 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1943 qemu_log_mask(CPU_LOG_INT
,
1944 "...taking INVPC UsageFault: "
1945 "IPSR consistency check failed\n");
1949 *frame_sp_p
= frameptr
+ 8;
1952 /* This invalidates frame_sp_p */
1953 switch_v7m_security_state(env
, true);
1954 env
->v7m
.exception
= newpsr_exc
;
1955 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
1956 if (newpsr
& XPSR_SFPA
) {
1957 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
1959 xpsr_write(env
, 0, XPSR_IT
);
1960 env
->thumb
= newpc
& 1;
1961 env
->regs
[15] = newpc
& ~1;
1962 arm_rebuild_hflags(env
);
1964 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
1968 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
, bool secure
,
1969 uint32_t addr
, uint16_t *insn
)
1972 * Load a 16-bit portion of a v7M instruction, returning true on success,
1973 * or false on failure (in which case we will have pended the appropriate
1975 * We need to do the instruction fetch's MPU and SAU checks
1976 * like this because there is no MMU index that would allow
1977 * doing the load with a single function call. Instead we must
1978 * first check that the security attributes permit the load
1979 * and that they don't mismatch on the two halves of the instruction,
1980 * and then we do the load as a secure load (ie using the security
1981 * attributes of the address, not the CPU, as architecturally required).
1983 CPUState
*cs
= CPU(cpu
);
1984 CPUARMState
*env
= &cpu
->env
;
1985 V8M_SAttributes sattrs
= {};
1986 GetPhysAddrResult res
= {};
1987 ARMMMUFaultInfo fi
= {};
1990 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, secure
, &sattrs
);
1991 if (!sattrs
.nsc
|| sattrs
.ns
) {
1993 * This must be the second half of the insn, and it straddles a
1994 * region boundary with the second half not being S&NSC.
1996 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
1997 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1998 qemu_log_mask(CPU_LOG_INT
,
1999 "...really SecureFault with SFSR.INVEP\n");
2002 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &res
, &fi
)) {
2003 /* the MPU lookup failed */
2004 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
2005 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
2006 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
2009 *insn
= address_space_lduw_le(arm_addressspace(cs
, res
.f
.attrs
),
2010 res
.f
.phys_addr
, res
.f
.attrs
, &txres
);
2011 if (txres
!= MEMTX_OK
) {
2012 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
2013 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
2014 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
2020 static bool v7m_read_sg_stack_word(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
2021 uint32_t addr
, uint32_t *spdata
)
2024 * Read a word of data from the stack for the SG instruction,
2025 * writing the value into *spdata. If the load succeeds, return
2026 * true; otherwise pend an appropriate exception and return false.
2027 * (We can't use data load helpers here that throw an exception
2028 * because of the context we're called in, which is halfway through
2029 * arm_v7m_cpu_do_interrupt().)
2031 CPUState
*cs
= CPU(cpu
);
2032 CPUARMState
*env
= &cpu
->env
;
2034 GetPhysAddrResult res
= {};
2035 ARMMMUFaultInfo fi
= {};
2038 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &res
, &fi
)) {
2039 /* MPU/SAU lookup failed */
2040 if (fi
.type
== ARMFault_QEMU_SFault
) {
2041 qemu_log_mask(CPU_LOG_INT
,
2042 "...SecureFault during stack word read\n");
2043 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
2044 env
->v7m
.sfar
= addr
;
2045 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2047 qemu_log_mask(CPU_LOG_INT
,
2048 "...MemManageFault during stack word read\n");
2049 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_DACCVIOL_MASK
|
2050 R_V7M_CFSR_MMARVALID_MASK
;
2051 env
->v7m
.mmfar
[M_REG_S
] = addr
;
2052 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, false);
2056 value
= address_space_ldl(arm_addressspace(cs
, res
.f
.attrs
),
2057 res
.f
.phys_addr
, res
.f
.attrs
, &txres
);
2058 if (txres
!= MEMTX_OK
) {
2059 /* BusFault trying to read the data */
2060 qemu_log_mask(CPU_LOG_INT
,
2061 "...BusFault during stack word read\n");
2062 env
->v7m
.cfsr
[M_REG_NS
] |=
2063 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
2064 env
->v7m
.bfar
= addr
;
2065 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
2073 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
2076 * Check whether this attempt to execute code in a Secure & NS-Callable
2077 * memory region is for an SG instruction; if so, then emulate the
2078 * effect of the SG instruction and return true. Otherwise pend
2079 * the correct kind of exception and return false.
2081 CPUARMState
*env
= &cpu
->env
;
2086 * We should never get here unless get_phys_addr_pmsav8() caused
2087 * an exception for NS executing in S&NSC memory.
2089 assert(!env
->v7m
.secure
);
2090 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
2092 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2093 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
2095 if (!v7m_read_half_insn(cpu
, mmu_idx
, true, env
->regs
[15], &insn
)) {
2103 if (insn
!= 0xe97f) {
2105 * Not an SG instruction first half (we choose the IMPDEF
2106 * early-SG-check option).
2111 if (!v7m_read_half_insn(cpu
, mmu_idx
, true, env
->regs
[15] + 2, &insn
)) {
2115 if (insn
!= 0xe97f) {
2117 * Not an SG instruction second half (yes, both halves of the SG
2118 * insn have the same hex value)
2124 * OK, we have confirmed that we really have an SG instruction.
2125 * We know we're NS in S memory so don't need to repeat those checks.
2127 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
2128 ", executing it\n", env
->regs
[15]);
2130 if (cpu_isar_feature(aa32_m_sec_state
, cpu
) &&
2131 !arm_v7m_is_handler_mode(env
)) {
2133 * v8.1M exception stack frame integrity check. Note that we
2134 * must perform the memory access even if CCR_S.TRD is zero
2135 * and we aren't going to check what the data loaded is.
2137 uint32_t spdata
, sp
;
2140 * We know we are currently NS, so the S stack pointers must be
2141 * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2143 sp
= v7m_using_psp(env
) ? env
->v7m
.other_ss_psp
: env
->v7m
.other_ss_msp
;
2144 if (!v7m_read_sg_stack_word(cpu
, mmu_idx
, sp
, &spdata
)) {
2145 /* Stack access failed and an exception has been pended */
2149 if (env
->v7m
.ccr
[M_REG_S
] & R_V7M_CCR_TRD_MASK
) {
2150 if (((spdata
& ~1) == 0xfefa125a) ||
2151 !(env
->v7m
.control
[M_REG_S
] & 1)) {
2157 env
->regs
[14] &= ~1;
2158 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
2159 switch_v7m_security_state(env
, true);
2160 xpsr_write(env
, 0, XPSR_IT
);
2162 arm_rebuild_hflags(env
);
2166 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
2167 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2168 qemu_log_mask(CPU_LOG_INT
,
2169 "...really SecureFault with SFSR.INVEP\n");
2173 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2175 ARMCPU
*cpu
= ARM_CPU(cs
);
2176 CPUARMState
*env
= &cpu
->env
;
2178 bool ignore_stackfaults
;
2180 arm_log_exception(cs
);
2183 * For exceptions we just mark as pending on the NVIC, and let that
2186 switch (cs
->exception_index
) {
2188 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2189 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
2194 * NOCP might be directed to something other than the current
2195 * security state if this fault is because of NSACR; we indicate
2196 * the target security state using exception.target_el.
2198 int target_secstate
;
2200 if (env
->exception
.target_el
== 3) {
2201 target_secstate
= M_REG_S
;
2203 target_secstate
= env
->v7m
.secure
;
2205 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, target_secstate
);
2206 env
->v7m
.cfsr
[target_secstate
] |= R_V7M_CFSR_NOCP_MASK
;
2210 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2211 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
2214 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2215 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
2218 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2219 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
2221 case EXCP_UNALIGNED
:
2222 /* Unaligned faults reported by M-profile aware code */
2223 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2224 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNALIGNED_MASK
;
2226 case EXCP_DIVBYZERO
:
2227 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2228 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_DIVBYZERO_MASK
;
2231 /* The PC already points to the next instruction. */
2232 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
2234 case EXCP_PREFETCH_ABORT
:
2235 case EXCP_DATA_ABORT
:
2237 * Note that for M profile we don't have a guest facing FSR, but
2238 * the env->exception.fsr will be populated by the code that
2239 * raises the fault, in the A profile short-descriptor format.
2241 * Log the exception.vaddress now regardless of subtype, because
2242 * logging below only logs it when it goes into a guest visible
2245 qemu_log_mask(CPU_LOG_INT
, "...at fault address 0x%x\n",
2246 (uint32_t)env
->exception
.vaddress
);
2247 switch (env
->exception
.fsr
& 0xf) {
2248 case M_FAKE_FSR_NSC_EXEC
:
2250 * Exception generated when we try to execute code at an address
2251 * which is marked as Secure & Non-Secure Callable and the CPU
2252 * is in the Non-Secure state. The only instruction which can
2253 * be executed like this is SG (and that only if both halves of
2254 * the SG instruction have the same security attributes.)
2255 * Everything else must generate an INVEP SecureFault, so we
2256 * emulate the SG instruction here.
2258 if (v7m_handle_execute_nsc(cpu
)) {
2262 case M_FAKE_FSR_SFAULT
:
2264 * Various flavours of SecureFault for attempts to execute or
2265 * access data in the wrong security state.
2267 switch (cs
->exception_index
) {
2268 case EXCP_PREFETCH_ABORT
:
2269 if (env
->v7m
.secure
) {
2270 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
2271 qemu_log_mask(CPU_LOG_INT
,
2272 "...really SecureFault with SFSR.INVTRAN\n");
2274 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
2275 qemu_log_mask(CPU_LOG_INT
,
2276 "...really SecureFault with SFSR.INVEP\n");
2279 case EXCP_DATA_ABORT
:
2280 /* This must be an NS access to S memory */
2281 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
2282 qemu_log_mask(CPU_LOG_INT
,
2283 "...really SecureFault with SFSR.AUVIOL\n");
2286 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2288 case 0x8: /* External Abort */
2289 switch (cs
->exception_index
) {
2290 case EXCP_PREFETCH_ABORT
:
2291 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
2292 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
2294 case EXCP_DATA_ABORT
:
2295 env
->v7m
.cfsr
[M_REG_NS
] |=
2296 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
2297 env
->v7m
.bfar
= env
->exception
.vaddress
;
2298 qemu_log_mask(CPU_LOG_INT
,
2299 "...with CFSR.PRECISERR and BFAR 0x%x\n",
2303 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
2305 case 0x1: /* Alignment fault reported by generic code */
2306 qemu_log_mask(CPU_LOG_INT
,
2307 "...really UsageFault with UFSR.UNALIGNED\n");
2308 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNALIGNED_MASK
;
2309 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
2314 * All other FSR values are either MPU faults or "can't happen
2315 * for M profile" cases.
2317 switch (cs
->exception_index
) {
2318 case EXCP_PREFETCH_ABORT
:
2319 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
2320 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
2322 case EXCP_DATA_ABORT
:
2323 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
2324 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
2325 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
2326 qemu_log_mask(CPU_LOG_INT
,
2327 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2328 env
->v7m
.mmfar
[env
->v7m
.secure
]);
2331 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
2337 qemu_log_mask(CPU_LOG_INT
,
2338 "...handling as semihosting call 0x%x\n",
2341 do_common_semihosting(cs
);
2343 g_assert_not_reached();
2345 env
->regs
[15] += env
->thumb
? 2 : 4;
2348 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
2352 case EXCP_EXCEPTION_EXIT
:
2353 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
2354 /* Must be v8M security extension function return */
2355 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
2356 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
2357 if (do_v7m_function_return(cpu
)) {
2361 do_v7m_exception_exit(cpu
);
2367 * We already pended the specific exception in the NVIC in the
2368 * v7m_preserve_fp_state() helper function.
2372 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
2373 return; /* Never happens. Keep compiler happy. */
2376 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2377 lr
= R_V7M_EXCRET_RES1_MASK
|
2378 R_V7M_EXCRET_DCRS_MASK
;
2380 * The S bit indicates whether we should return to Secure
2381 * or NonSecure (ie our current state).
2382 * The ES bit indicates whether we're taking this exception
2383 * to Secure or NonSecure (ie our target state). We set it
2384 * later, in v7m_exception_taken().
2385 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2386 * This corresponds to the ARM ARM pseudocode for v8M setting
2387 * some LR bits in PushStack() and some in ExceptionTaken();
2388 * the distinction matters for the tailchain cases where we
2389 * can take an exception without pushing the stack.
2391 if (env
->v7m
.secure
) {
2392 lr
|= R_V7M_EXCRET_S_MASK
;
2395 lr
= R_V7M_EXCRET_RES1_MASK
|
2396 R_V7M_EXCRET_S_MASK
|
2397 R_V7M_EXCRET_DCRS_MASK
|
2398 R_V7M_EXCRET_ES_MASK
;
2399 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
2400 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
2403 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
)) {
2404 lr
|= R_V7M_EXCRET_FTYPE_MASK
;
2406 if (!arm_v7m_is_handler_mode(env
)) {
2407 lr
|= R_V7M_EXCRET_MODE_MASK
;
2410 ignore_stackfaults
= v7m_push_stack(cpu
);
2411 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
2414 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2416 unsigned el
= arm_current_el(env
);
2418 /* First handle registers which unprivileged can read */
2420 case 0 ... 7: /* xPSR sub-fields */
2421 return v7m_mrs_xpsr(env
, reg
, el
);
2422 case 20: /* CONTROL */
2423 return v7m_mrs_control(env
, env
->v7m
.secure
);
2424 case 0x94: /* CONTROL_NS */
2426 * We have to handle this here because unprivileged Secure code
2427 * can read the NS CONTROL register.
2429 if (!env
->v7m
.secure
) {
2432 return env
->v7m
.control
[M_REG_NS
] |
2433 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
);
2437 return 0; /* unprivileged reads others as zero */
2440 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2442 case 0x88: /* MSP_NS */
2443 if (!env
->v7m
.secure
) {
2446 return env
->v7m
.other_ss_msp
;
2447 case 0x89: /* PSP_NS */
2448 if (!env
->v7m
.secure
) {
2451 return env
->v7m
.other_ss_psp
;
2452 case 0x8a: /* MSPLIM_NS */
2453 if (!env
->v7m
.secure
) {
2456 return env
->v7m
.msplim
[M_REG_NS
];
2457 case 0x8b: /* PSPLIM_NS */
2458 if (!env
->v7m
.secure
) {
2461 return env
->v7m
.psplim
[M_REG_NS
];
2462 case 0x90: /* PRIMASK_NS */
2463 if (!env
->v7m
.secure
) {
2466 return env
->v7m
.primask
[M_REG_NS
];
2467 case 0x91: /* BASEPRI_NS */
2468 if (!env
->v7m
.secure
) {
2471 return env
->v7m
.basepri
[M_REG_NS
];
2472 case 0x93: /* FAULTMASK_NS */
2473 if (!env
->v7m
.secure
) {
2476 return env
->v7m
.faultmask
[M_REG_NS
];
2477 case 0x98: /* SP_NS */
2480 * This gives the non-secure SP selected based on whether we're
2481 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2483 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
2485 if (!env
->v7m
.secure
) {
2488 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
2489 return env
->v7m
.other_ss_psp
;
2491 return env
->v7m
.other_ss_msp
;
2501 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
2503 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
2504 case 10: /* MSPLIM */
2505 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2508 return env
->v7m
.msplim
[env
->v7m
.secure
];
2509 case 11: /* PSPLIM */
2510 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2513 return env
->v7m
.psplim
[env
->v7m
.secure
];
2514 case 16: /* PRIMASK */
2515 return env
->v7m
.primask
[env
->v7m
.secure
];
2516 case 17: /* BASEPRI */
2517 case 18: /* BASEPRI_MAX */
2518 return env
->v7m
.basepri
[env
->v7m
.secure
];
2519 case 19: /* FAULTMASK */
2520 return env
->v7m
.faultmask
[env
->v7m
.secure
];
2523 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
2524 " register %d\n", reg
);
2529 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
2532 * We're passed bits [11..0] of the instruction; extract
2533 * SYSm and the mask bits.
2534 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2535 * we choose to treat them as if the mask bits were valid.
2536 * NB that the pseudocode 'mask' variable is bits [11..10],
2537 * whereas ours is [11..8].
2539 uint32_t mask
= extract32(maskreg
, 8, 4);
2540 uint32_t reg
= extract32(maskreg
, 0, 8);
2541 int cur_el
= arm_current_el(env
);
2543 if (cur_el
== 0 && reg
> 7 && reg
!= 20) {
2545 * only xPSR sub-fields and CONTROL.SFPA may be written by
2551 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2553 case 0x88: /* MSP_NS */
2554 if (!env
->v7m
.secure
) {
2557 env
->v7m
.other_ss_msp
= val
& ~3;
2559 case 0x89: /* PSP_NS */
2560 if (!env
->v7m
.secure
) {
2563 env
->v7m
.other_ss_psp
= val
& ~3;
2565 case 0x8a: /* MSPLIM_NS */
2566 if (!env
->v7m
.secure
) {
2569 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
2571 case 0x8b: /* PSPLIM_NS */
2572 if (!env
->v7m
.secure
) {
2575 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
2577 case 0x90: /* PRIMASK_NS */
2578 if (!env
->v7m
.secure
) {
2581 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
2583 case 0x91: /* BASEPRI_NS */
2584 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2587 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
2589 case 0x93: /* FAULTMASK_NS */
2590 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2593 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
2595 case 0x94: /* CONTROL_NS */
2596 if (!env
->v7m
.secure
) {
2599 write_v7m_control_spsel_for_secstate(env
,
2600 val
& R_V7M_CONTROL_SPSEL_MASK
,
2602 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2603 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
2604 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
2607 * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2608 * RES0 if the FPU is not present, and is stored in the S bank
2610 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
)) &&
2611 extract32(env
->v7m
.nsacr
, 10, 1)) {
2612 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
2613 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_FPCA_MASK
;
2616 case 0x98: /* SP_NS */
2619 * This gives the non-secure SP selected based on whether we're
2620 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2622 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
2623 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
2626 if (!env
->v7m
.secure
) {
2630 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
2635 raise_exception_ra(env
, EXCP_STKOF
, 0, 1, GETPC());
2639 env
->v7m
.other_ss_psp
= val
;
2641 env
->v7m
.other_ss_msp
= val
;
2651 case 0 ... 7: /* xPSR sub-fields */
2652 v7m_msr_xpsr(env
, mask
, reg
, val
);
2655 if (v7m_using_psp(env
)) {
2656 env
->v7m
.other_sp
= val
& ~3;
2658 env
->regs
[13] = val
& ~3;
2662 if (v7m_using_psp(env
)) {
2663 env
->regs
[13] = val
& ~3;
2665 env
->v7m
.other_sp
= val
& ~3;
2668 case 10: /* MSPLIM */
2669 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2672 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
2674 case 11: /* PSPLIM */
2675 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2678 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
2680 case 16: /* PRIMASK */
2681 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
2683 case 17: /* BASEPRI */
2684 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2687 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
2689 case 18: /* BASEPRI_MAX */
2690 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2694 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
2695 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
2696 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
2699 case 19: /* FAULTMASK */
2700 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2703 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
2705 case 20: /* CONTROL */
2707 * Writing to the SPSEL bit only has an effect if we are in
2708 * thread mode; other bits can be updated by any privileged code.
2709 * write_v7m_control_spsel() deals with updating the SPSEL bit in
2710 * env->v7m.control, so we only need update the others.
2711 * For v7M, we must just ignore explicit writes to SPSEL in handler
2712 * mode; for v8M the write is permitted but will have no effect.
2713 * All these bits are writes-ignored from non-privileged code,
2716 if (cur_el
> 0 && (arm_feature(env
, ARM_FEATURE_V8
) ||
2717 !arm_v7m_is_handler_mode(env
))) {
2718 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
2720 if (cur_el
> 0 && arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2721 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
2722 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
2724 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
2726 * SFPA is RAZ/WI from NS or if no FPU.
2727 * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2728 * Both are stored in the S bank.
2730 if (env
->v7m
.secure
) {
2731 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
2732 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_SFPA_MASK
;
2735 (env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
) ||
2736 extract32(env
->v7m
.nsacr
, 10, 1))) {
2737 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
2738 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_FPCA_MASK
;
2744 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
2745 " register %d\n", reg
);
2750 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
2752 /* Implement the TT instruction. op is bits [7:6] of the insn. */
2753 bool forceunpriv
= op
& 1;
2755 V8M_SAttributes sattrs
= {};
2757 bool r
, rw
, nsr
, nsrw
, mrvalid
;
2761 bool targetsec
= env
->v7m
.secure
;
2764 * Work out what the security state and privilege level we're
2765 * interested in is...
2768 targetsec
= !targetsec
;
2774 targetpriv
= arm_v7m_is_handler_mode(env
) ||
2775 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
2778 /* ...and then figure out which MMU index this is */
2779 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
2782 * We know that the MPU and SAU don't care about the access type
2783 * for our purposes beyond that we don't want to claim to be
2784 * an insn fetch, so we arbitrarily call this a read.
2788 * MPU region info only available for privileged or if
2789 * inspecting the other MPU state.
2791 if (arm_current_el(env
) != 0 || alt
) {
2792 GetPhysAddrResult res
= {};
2793 ARMMMUFaultInfo fi
= {};
2795 /* We can ignore the return value as prot is always set */
2796 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, targetsec
,
2797 &res
, &fi
, &mregion
);
2798 if (mregion
== -1) {
2804 r
= res
.f
.prot
& PAGE_READ
;
2805 rw
= res
.f
.prot
& PAGE_WRITE
;
2813 if (env
->v7m
.secure
) {
2814 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
2815 targetsec
, &sattrs
);
2816 nsr
= sattrs
.ns
&& r
;
2817 nsrw
= sattrs
.ns
&& rw
;
2824 tt_resp
= (sattrs
.iregion
<< 24) |
2825 (sattrs
.irvalid
<< 23) |
2826 ((!sattrs
.ns
) << 22) |
2831 (sattrs
.srvalid
<< 17) |
2833 (sattrs
.sregion
<< 8) |
2839 #endif /* !CONFIG_USER_ONLY */
2841 ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
2842 bool secstate
, bool priv
, bool negpri
)
2844 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
2847 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
2851 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
2855 mmu_idx
|= ARM_MMU_IDX_M_S
;
2861 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
2862 bool secstate
, bool priv
)
2864 bool negpri
= armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
);
2866 return arm_v7m_mmu_idx_all(env
, secstate
, priv
, negpri
);
2869 /* Return the MMU index for a v7M CPU in the specified security state */
2870 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
2872 bool priv
= arm_v7m_is_handler_mode(env
) ||
2873 !(env
->v7m
.control
[secstate
] & 1);
2875 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);