4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "internals.h"
12 #include "gdbstub/helpers.h"
13 #include "exec/helper-proto.h"
14 #include "qemu/main-loop.h"
15 #include "qemu/bitops.h"
17 #include "exec/exec-all.h"
19 #include "exec/cpu_ldst.h"
20 #include "semihosting/common-semi.h"
22 #if !defined(CONFIG_USER_ONLY)
23 #include "hw/intc/armv7m_nvic.h"
26 static void v7m_msr_xpsr(CPUARMState
*env
, uint32_t mask
,
27 uint32_t reg
, uint32_t val
)
29 /* Only APSR is actually writable */
31 uint32_t apsrmask
= 0;
34 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
36 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
39 xpsr_write(env
, val
, apsrmask
);
43 static uint32_t v7m_mrs_xpsr(CPUARMState
*env
, uint32_t reg
, unsigned el
)
47 if ((reg
& 1) && el
) {
48 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
51 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
52 if (arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
56 /* EPSR reads as zero */
57 return xpsr_read(env
) & mask
;
60 uint32_t arm_v7m_mrs_control(CPUARMState
*env
, uint32_t secure
)
62 uint32_t value
= env
->v7m
.control
[secure
];
65 /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
66 value
|= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
;
71 #ifdef CONFIG_USER_ONLY
73 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
75 uint32_t mask
= extract32(maskreg
, 8, 4);
76 uint32_t reg
= extract32(maskreg
, 0, 8);
79 case 0 ... 7: /* xPSR sub-fields */
80 v7m_msr_xpsr(env
, mask
, reg
, val
);
82 case 20: /* CONTROL */
83 /* There are no sub-fields that are actually writable from EL0. */
86 /* Unprivileged writes to other registers are ignored */
91 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
94 case 0 ... 7: /* xPSR sub-fields */
95 return v7m_mrs_xpsr(env
, reg
, 0);
96 case 20: /* CONTROL */
97 return arm_v7m_mrs_control(env
, 0);
99 /* Unprivileged reads others as zero. */
104 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
106 /* translate.c should never generate calls here in user-only mode */
107 g_assert_not_reached();
110 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
112 /* translate.c should never generate calls here in user-only mode */
113 g_assert_not_reached();
116 void HELPER(v7m_preserve_fp_state
)(CPUARMState
*env
)
118 /* translate.c should never generate calls here in user-only mode */
119 g_assert_not_reached();
122 void HELPER(v7m_vlstm
)(CPUARMState
*env
, uint32_t fptr
)
124 /* translate.c should never generate calls here in user-only mode */
125 g_assert_not_reached();
128 void HELPER(v7m_vlldm
)(CPUARMState
*env
, uint32_t fptr
)
130 /* translate.c should never generate calls here in user-only mode */
131 g_assert_not_reached();
134 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
137 * The TT instructions can be used by unprivileged code, but in
138 * user-only emulation we don't have the MPU.
139 * Luckily since we know we are NonSecure unprivileged (and that in
140 * turn means that the A flag wasn't specified), all the bits in the
141 * register must be zero:
142 * IREGION: 0 because IRVALID is 0
143 * IRVALID: 0 because NS
147 * RW: 0 because unpriv and A flag not set
148 * R: 0 because unpriv and A flag not set
149 * SRVALID: 0 because NS
150 * MRVALID: 0 because unpriv and A flag not set
151 * SREGION: 0 because SRVALID is 0
152 * MREGION: 0 because MRVALID is 0
157 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
159 return ARMMMUIdx_MUser
;
162 #else /* !CONFIG_USER_ONLY */
164 static ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
165 bool secstate
, bool priv
, bool negpri
)
167 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
170 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
174 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
178 mmu_idx
|= ARM_MMU_IDX_M_S
;
184 static ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
185 bool secstate
, bool priv
)
187 bool negpri
= armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
);
189 return arm_v7m_mmu_idx_all(env
, secstate
, priv
, negpri
);
192 /* Return the MMU index for a v7M CPU in the specified security state */
193 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
195 bool priv
= arm_v7m_is_handler_mode(env
) ||
196 !(env
->v7m
.control
[secstate
] & 1);
198 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
202 * What kind of stack write are we doing? This affects how exceptions
203 * generated during the stacking are treated.
205 typedef enum StackingMode
{
211 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
212 ARMMMUIdx mmu_idx
, StackingMode mode
)
214 CPUState
*cs
= CPU(cpu
);
215 CPUARMState
*env
= &cpu
->env
;
217 GetPhysAddrResult res
= {};
218 ARMMMUFaultInfo fi
= {};
219 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
223 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &res
, &fi
)) {
224 /* MPU/SAU lookup failed */
225 if (fi
.type
== ARMFault_QEMU_SFault
) {
226 if (mode
== STACK_LAZYFP
) {
227 qemu_log_mask(CPU_LOG_INT
,
228 "...SecureFault with SFSR.LSPERR "
229 "during lazy stacking\n");
230 env
->v7m
.sfsr
|= R_V7M_SFSR_LSPERR_MASK
;
232 qemu_log_mask(CPU_LOG_INT
,
233 "...SecureFault with SFSR.AUVIOL "
234 "during stacking\n");
235 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
237 env
->v7m
.sfsr
|= R_V7M_SFSR_SFARVALID_MASK
;
238 env
->v7m
.sfar
= addr
;
239 exc
= ARMV7M_EXCP_SECURE
;
242 if (mode
== STACK_LAZYFP
) {
243 qemu_log_mask(CPU_LOG_INT
,
244 "...MemManageFault with CFSR.MLSPERR\n");
245 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MLSPERR_MASK
;
247 qemu_log_mask(CPU_LOG_INT
,
248 "...MemManageFault with CFSR.MSTKERR\n");
249 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
251 exc
= ARMV7M_EXCP_MEM
;
256 address_space_stl_le(arm_addressspace(cs
, res
.f
.attrs
), res
.f
.phys_addr
,
257 value
, res
.f
.attrs
, &txres
);
258 if (txres
!= MEMTX_OK
) {
259 /* BusFault trying to write the data */
260 if (mode
== STACK_LAZYFP
) {
261 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.LSPERR\n");
262 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_LSPERR_MASK
;
264 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
265 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
267 exc
= ARMV7M_EXCP_BUS
;
275 * By pending the exception at this point we are making
276 * the IMPDEF choice "overridden exceptions pended" (see the
277 * MergeExcInfo() pseudocode). The other choice would be to not
278 * pend them now and then make a choice about which to throw away
279 * later if we have two derived exceptions.
280 * The only case when we must not pend the exception but instead
281 * throw it away is if we are doing the push of the callee registers
282 * and we've already generated a derived exception (this is indicated
283 * by the caller passing STACK_IGNFAULTS). Even in this case we will
284 * still update the fault status registers.
288 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
291 armv7m_nvic_set_pending_lazyfp(env
->nvic
, exc
, exc_secure
);
293 case STACK_IGNFAULTS
:
299 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
302 CPUState
*cs
= CPU(cpu
);
303 CPUARMState
*env
= &cpu
->env
;
305 GetPhysAddrResult res
= {};
306 ARMMMUFaultInfo fi
= {};
307 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
312 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &res
, &fi
)) {
313 /* MPU/SAU lookup failed */
314 if (fi
.type
== ARMFault_QEMU_SFault
) {
315 qemu_log_mask(CPU_LOG_INT
,
316 "...SecureFault with SFSR.AUVIOL during unstack\n");
317 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
318 env
->v7m
.sfar
= addr
;
319 exc
= ARMV7M_EXCP_SECURE
;
322 qemu_log_mask(CPU_LOG_INT
,
323 "...MemManageFault with CFSR.MUNSTKERR\n");
324 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
325 exc
= ARMV7M_EXCP_MEM
;
331 value
= address_space_ldl(arm_addressspace(cs
, res
.f
.attrs
),
332 res
.f
.phys_addr
, res
.f
.attrs
, &txres
);
333 if (txres
!= MEMTX_OK
) {
334 /* BusFault trying to read the data */
335 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
336 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
337 exc
= ARMV7M_EXCP_BUS
;
347 * By pending the exception at this point we are making
348 * the IMPDEF choice "overridden exceptions pended" (see the
349 * MergeExcInfo() pseudocode). The other choice would be to not
350 * pend them now and then make a choice about which to throw away
351 * later if we have two derived exceptions.
353 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
357 void HELPER(v7m_preserve_fp_state
)(CPUARMState
*env
)
360 * Preserve FP state (because LSPACT was set and we are about
361 * to execute an FP instruction). This corresponds to the
362 * PreserveFPState() pseudocode.
363 * We may throw an exception if the stacking fails.
365 ARMCPU
*cpu
= env_archcpu(env
);
366 bool is_secure
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
367 bool negpri
= !(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_HFRDY_MASK
);
368 bool is_priv
= !(env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_USER_MASK
);
369 bool splimviol
= env
->v7m
.fpccr
[is_secure
] & R_V7M_FPCCR_SPLIMVIOL_MASK
;
370 uint32_t fpcar
= env
->v7m
.fpcar
[is_secure
];
371 bool stacked_ok
= true;
372 bool ts
= is_secure
&& (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
);
375 /* Take the iothread lock as we are going to touch the NVIC */
376 qemu_mutex_lock_iothread();
378 /* Check the background context had access to the FPU */
379 if (!v7m_cpacr_pass(env
, is_secure
, is_priv
)) {
380 armv7m_nvic_set_pending_lazyfp(env
->nvic
, ARMV7M_EXCP_USAGE
, is_secure
);
381 env
->v7m
.cfsr
[is_secure
] |= R_V7M_CFSR_NOCP_MASK
;
383 } else if (!is_secure
&& !extract32(env
->v7m
.nsacr
, 10, 1)) {
384 armv7m_nvic_set_pending_lazyfp(env
->nvic
, ARMV7M_EXCP_USAGE
, M_REG_S
);
385 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
389 if (!splimviol
&& stacked_ok
) {
390 /* We only stack if the stack limit wasn't violated */
394 mmu_idx
= arm_v7m_mmu_idx_all(env
, is_secure
, is_priv
, negpri
);
395 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
396 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
397 uint32_t faddr
= fpcar
+ 4 * i
;
398 uint32_t slo
= extract64(dn
, 0, 32);
399 uint32_t shi
= extract64(dn
, 32, 32);
402 faddr
+= 8; /* skip the slot for the FPSCR/VPR */
404 stacked_ok
= stacked_ok
&&
405 v7m_stack_write(cpu
, faddr
, slo
, mmu_idx
, STACK_LAZYFP
) &&
406 v7m_stack_write(cpu
, faddr
+ 4, shi
, mmu_idx
, STACK_LAZYFP
);
409 stacked_ok
= stacked_ok
&&
410 v7m_stack_write(cpu
, fpcar
+ 0x40,
411 vfp_get_fpscr(env
), mmu_idx
, STACK_LAZYFP
);
412 if (cpu_isar_feature(aa32_mve
, cpu
)) {
413 stacked_ok
= stacked_ok
&&
414 v7m_stack_write(cpu
, fpcar
+ 0x44,
415 env
->v7m
.vpr
, mmu_idx
, STACK_LAZYFP
);
420 * We definitely pended an exception, but it's possible that it
421 * might not be able to be taken now. If its priority permits us
422 * to take it now, then we must not update the LSPACT or FP regs,
423 * but instead jump out to take the exception immediately.
424 * If it's just pending and won't be taken until the current
425 * handler exits, then we do update LSPACT and the FP regs.
427 take_exception
= !stacked_ok
&&
428 armv7m_nvic_can_take_pending_exception(env
->nvic
);
430 qemu_mutex_unlock_iothread();
432 if (take_exception
) {
433 raise_exception_ra(env
, EXCP_LAZYFP
, 0, 1, GETPC());
436 env
->v7m
.fpccr
[is_secure
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
439 /* Clear s0 to s31 and the FPSCR and VPR */
442 for (i
= 0; i
< 32; i
+= 2) {
443 *aa32_vfp_dreg(env
, i
/ 2) = 0;
445 vfp_set_fpscr(env
, 0);
446 if (cpu_isar_feature(aa32_mve
, cpu
)) {
451 * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
457 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
458 * This may change the current stack pointer between Main and Process
459 * stack pointers if it is done for the CONTROL register for the current
462 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
466 bool old_is_psp
= v7m_using_psp(env
);
468 env
->v7m
.control
[secstate
] =
469 deposit32(env
->v7m
.control
[secstate
],
470 R_V7M_CONTROL_SPSEL_SHIFT
,
471 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
473 if (secstate
== env
->v7m
.secure
) {
474 bool new_is_psp
= v7m_using_psp(env
);
477 if (old_is_psp
!= new_is_psp
) {
478 tmp
= env
->v7m
.other_sp
;
479 env
->v7m
.other_sp
= env
->regs
[13];
486 * Write to v7M CONTROL.SPSEL bit. This may change the current
487 * stack pointer between Main and Process stack pointers.
489 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
491 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
494 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
497 * Write a new value to v7m.exception, thus transitioning into or out
498 * of Handler mode; this may result in a change of active stack pointer.
500 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
503 env
->v7m
.exception
= new_exc
;
505 new_is_psp
= v7m_using_psp(env
);
507 if (old_is_psp
!= new_is_psp
) {
508 tmp
= env
->v7m
.other_sp
;
509 env
->v7m
.other_sp
= env
->regs
[13];
514 /* Switch M profile security state between NS and S */
515 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
517 uint32_t new_ss_msp
, new_ss_psp
;
519 if (env
->v7m
.secure
== new_secstate
) {
524 * All the banked state is accessed by looking at env->v7m.secure
525 * except for the stack pointer; rearrange the SP appropriately.
527 new_ss_msp
= env
->v7m
.other_ss_msp
;
528 new_ss_psp
= env
->v7m
.other_ss_psp
;
530 if (v7m_using_psp(env
)) {
531 env
->v7m
.other_ss_psp
= env
->regs
[13];
532 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
534 env
->v7m
.other_ss_msp
= env
->regs
[13];
535 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
538 env
->v7m
.secure
= new_secstate
;
540 if (v7m_using_psp(env
)) {
541 env
->regs
[13] = new_ss_psp
;
542 env
->v7m
.other_sp
= new_ss_msp
;
544 env
->regs
[13] = new_ss_msp
;
545 env
->v7m
.other_sp
= new_ss_psp
;
549 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
553 * - if the return value is a magic value, do exception return (like BX)
554 * - otherwise bit 0 of the return value is the target security state
558 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
559 /* Covers FNC_RETURN and EXC_RETURN magic */
560 min_magic
= FNC_RETURN_MIN_MAGIC
;
562 /* EXC_RETURN magic only */
563 min_magic
= EXC_RETURN_MIN_MAGIC
;
566 if (dest
>= min_magic
) {
568 * This is an exception return magic value; put it where
569 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
570 * Note that if we ever add gen_ss_advance() singlestep support to
571 * M profile this should count as an "instruction execution complete"
572 * event (compare gen_bx_excret_final_code()).
574 env
->regs
[15] = dest
& ~1;
575 env
->thumb
= dest
& 1;
576 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
580 /* translate.c should have made BXNS UNDEF unless we're secure */
581 assert(env
->v7m
.secure
);
584 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
586 switch_v7m_security_state(env
, dest
& 1);
588 env
->regs
[15] = dest
& ~1;
589 arm_rebuild_hflags(env
);
592 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
596 * - bit 0 of the destination address is the target security state
599 /* At this point regs[15] is the address just after the BLXNS */
600 uint32_t nextinst
= env
->regs
[15] | 1;
601 uint32_t sp
= env
->regs
[13] - 8;
604 /* translate.c will have made BLXNS UNDEF unless we're secure */
605 assert(env
->v7m
.secure
);
609 * Target is Secure, so this is just a normal BLX,
610 * except that the low bit doesn't indicate Thumb/not.
612 env
->regs
[14] = nextinst
;
614 env
->regs
[15] = dest
& ~1;
618 /* Target is non-secure: first push a stack frame */
619 if (!QEMU_IS_ALIGNED(sp
, 8)) {
620 qemu_log_mask(LOG_GUEST_ERROR
,
621 "BLXNS with misaligned SP is UNPREDICTABLE\n");
624 if (sp
< v7m_sp_limit(env
)) {
625 raise_exception(env
, EXCP_STKOF
, 0, 1);
628 saved_psr
= env
->v7m
.exception
;
629 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
630 saved_psr
|= XPSR_SFPA
;
633 /* Note that these stores can throw exceptions on MPU faults */
634 cpu_stl_data_ra(env
, sp
, nextinst
, GETPC());
635 cpu_stl_data_ra(env
, sp
+ 4, saved_psr
, GETPC());
638 env
->regs
[14] = 0xfeffffff;
639 if (arm_v7m_is_handler_mode(env
)) {
641 * Write a dummy value to IPSR, to avoid leaking the current secure
642 * exception number to non-secure code. This is guaranteed not
643 * to cause write_v7m_exception() to actually change stacks.
645 write_v7m_exception(env
, 1);
647 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
648 switch_v7m_security_state(env
, 0);
650 env
->regs
[15] = dest
;
651 arm_rebuild_hflags(env
);
654 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
657 CPUState
*cs
= CPU(cpu
);
658 CPUARMState
*env
= &cpu
->env
;
660 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
661 uint32_t vector_entry
;
662 MemTxAttrs attrs
= {};
666 qemu_log_mask(CPU_LOG_INT
,
667 "...loading from element %d of %s vector table at 0x%x\n",
668 exc
, targets_secure
? "secure" : "non-secure", addr
);
670 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
673 * We don't do a get_phys_addr() here because the rules for vector
674 * loads are special: they always use the default memory map, and
675 * the default memory map permits reads from all addresses.
676 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
677 * that we want this special case which would always say "yes",
678 * we just do the SAU lookup here followed by a direct physical load.
680 attrs
.secure
= targets_secure
;
683 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
684 V8M_SAttributes sattrs
= {};
686 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
687 targets_secure
, &sattrs
);
689 attrs
.secure
= false;
690 } else if (!targets_secure
) {
692 * NS access to S memory: the underlying exception which we escalate
693 * to HardFault is SecureFault, which always targets Secure.
700 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
702 if (result
!= MEMTX_OK
) {
704 * Underlying exception is BusFault: its target security state
705 * depends on BFHFNMINS.
707 exc_secure
= !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
710 *pvec
= vector_entry
;
711 qemu_log_mask(CPU_LOG_INT
, "...loaded new PC 0x%x\n", *pvec
);
716 * All vector table fetch fails are reported as HardFault, with
717 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
718 * technically the underlying exception is a SecureFault or BusFault
719 * that is escalated to HardFault.) This is a terminal exception,
720 * so we will either take the HardFault immediately or else enter
721 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
722 * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
723 * secure); otherwise it targets the same security state as the
724 * underlying exception.
725 * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
727 if (!(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
730 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
;
731 if (!arm_feature(env
, ARM_FEATURE_V8_1M
)) {
732 env
->v7m
.hfsr
|= R_V7M_HFSR_FORCED_MASK
;
734 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
738 static uint32_t v7m_integrity_sig(CPUARMState
*env
, uint32_t lr
)
741 * Return the integrity signature value for the callee-saves
742 * stack frame section. @lr is the exception return payload/LR value
743 * whose FType bit forms bit 0 of the signature if FP is present.
745 uint32_t sig
= 0xfefa125a;
747 if (!cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))
748 || (lr
& R_V7M_EXCRET_FTYPE_MASK
)) {
754 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
758 * For v8M, push the callee-saves register part of the stack frame.
759 * Compare the v8M pseudocode PushCalleeStack().
760 * In the tailchaining case this may not be the current stack.
762 CPUARMState
*env
= &cpu
->env
;
763 uint32_t *frame_sp_p
;
770 StackingMode smode
= ignore_faults
? STACK_IGNFAULTS
: STACK_NORMAL
;
773 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
774 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
777 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
778 frame_sp_p
= arm_v7m_get_sp_ptr(env
, M_REG_S
, mode
,
779 lr
& R_V7M_EXCRET_SPSEL_MASK
);
780 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
782 limit
= env
->v7m
.psplim
[M_REG_S
];
784 limit
= env
->v7m
.msplim
[M_REG_S
];
787 mmu_idx
= arm_mmu_idx(env
);
788 frame_sp_p
= &env
->regs
[13];
789 limit
= v7m_sp_limit(env
);
792 frameptr
= *frame_sp_p
- 0x28;
793 if (frameptr
< limit
) {
795 * Stack limit failure: set SP to the limit value, and generate
796 * STKOF UsageFault. Stack pushes below the limit must not be
797 * performed. It is IMPDEF whether pushes above the limit are
798 * performed; we choose not to.
800 qemu_log_mask(CPU_LOG_INT
,
801 "...STKOF during callee-saves register stacking\n");
802 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
803 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
810 * Write as much of the stack frame as we can. A write failure may
811 * cause us to pend a derived exception.
813 sig
= v7m_integrity_sig(env
, lr
);
815 v7m_stack_write(cpu
, frameptr
, sig
, mmu_idx
, smode
) &&
816 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
, smode
) &&
817 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
, smode
) &&
818 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
, smode
) &&
819 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
, smode
) &&
820 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
, smode
) &&
821 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
, smode
) &&
822 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
, smode
) &&
823 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
, smode
);
825 /* Update SP regardless of whether any of the stack accesses failed. */
826 *frame_sp_p
= frameptr
;
831 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
832 bool ignore_stackfaults
)
835 * Do the "take the exception" parts of exception entry,
836 * but not the pushing of state to the stack. This is
837 * similar to the pseudocode ExceptionTaken() function.
839 CPUARMState
*env
= &cpu
->env
;
843 bool push_failed
= false;
845 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
846 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
847 targets_secure
? "secure" : "nonsecure", exc
);
850 /* Sanitize LR FType and PREFIX bits */
851 if (!cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
852 lr
|= R_V7M_EXCRET_FTYPE_MASK
;
854 lr
= deposit32(lr
, 24, 8, 0xff);
857 if (arm_feature(env
, ARM_FEATURE_V8
)) {
858 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
859 (lr
& R_V7M_EXCRET_S_MASK
)) {
861 * The background code (the owner of the registers in the
862 * exception frame) is Secure. This means it may either already
863 * have or now needs to push callee-saves registers.
865 if (targets_secure
) {
866 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
868 * We took an exception from Secure to NonSecure
869 * (which means the callee-saved registers got stacked)
870 * and are now tailchaining to a Secure exception.
871 * Clear DCRS so eventual return from this Secure
872 * exception unstacks the callee-saved registers.
874 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
878 * We're going to a non-secure exception; push the
879 * callee-saves registers to the stack now, if they're
882 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
883 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
884 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
887 lr
|= R_V7M_EXCRET_DCRS_MASK
;
891 lr
&= ~R_V7M_EXCRET_ES_MASK
;
892 if (targets_secure
) {
893 lr
|= R_V7M_EXCRET_ES_MASK
;
895 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
896 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
897 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
901 * Clear registers if necessary to prevent non-secure exception
902 * code being able to see register values from secure code.
903 * Where register values become architecturally UNKNOWN we leave
904 * them with their previous values. v8.1M is tighter than v8.0M
905 * here and always zeroes the caller-saved registers regardless
906 * of the security state the exception is targeting.
908 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
909 if (!targets_secure
|| arm_feature(env
, ARM_FEATURE_V8_1M
)) {
911 * Always clear the caller-saved registers (they have been
912 * pushed to the stack earlier in v7m_push_stack()).
913 * Clear callee-saved registers if the background code is
914 * Secure (in which case these regs were saved in
915 * v7m_push_callee_stack()).
919 * r4..r11 are callee-saves, zero only if background
920 * state was Secure (EXCRET.S == 1) and exception
921 * targets Non-secure state
923 bool zero_callee_saves
= !targets_secure
&&
924 (lr
& R_V7M_EXCRET_S_MASK
);
926 for (i
= 0; i
< 13; i
++) {
927 if (i
< 4 || i
> 11 || zero_callee_saves
) {
932 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
937 if (push_failed
&& !ignore_stackfaults
) {
939 * Derived exception on callee-saves register stacking:
940 * we might now want to take a different exception which
941 * targets a different security state, so try again from the top.
943 qemu_log_mask(CPU_LOG_INT
,
944 "...derived exception on callee-saves register stacking");
945 v7m_exception_taken(cpu
, lr
, true, true);
949 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
950 /* Vector load failed: derived exception */
951 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
952 v7m_exception_taken(cpu
, lr
, true, true);
957 * Now we've done everything that might cause a derived exception
958 * we can go ahead and activate whichever exception we're going to
959 * take (which might now be the derived exception).
961 armv7m_nvic_acknowledge_irq(env
->nvic
);
963 /* Switch to target security state -- must do this before writing SPSEL */
964 switch_v7m_security_state(env
, targets_secure
);
965 write_v7m_control_spsel(env
, 0);
966 arm_clear_exclusive(env
);
967 /* Clear SFPA and FPCA (has no effect if no FPU) */
968 env
->v7m
.control
[M_REG_S
] &=
969 ~(R_V7M_CONTROL_FPCA_MASK
| R_V7M_CONTROL_SFPA_MASK
);
971 env
->condexec_bits
= 0;
973 env
->regs
[15] = addr
& 0xfffffffe;
974 env
->thumb
= addr
& 1;
975 arm_rebuild_hflags(env
);
978 static void v7m_update_fpccr(CPUARMState
*env
, uint32_t frameptr
,
982 * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
983 * that we will need later in order to do lazy FP reg stacking.
985 bool is_secure
= env
->v7m
.secure
;
986 NVICState
*nvic
= env
->nvic
;
988 * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
989 * are banked and we want to update the bit in the bank for the
990 * current security state; and in one case we want to specifically
991 * update the NS banked version of a bit even if we are secure.
993 uint32_t *fpccr_s
= &env
->v7m
.fpccr
[M_REG_S
];
994 uint32_t *fpccr_ns
= &env
->v7m
.fpccr
[M_REG_NS
];
995 uint32_t *fpccr
= &env
->v7m
.fpccr
[is_secure
];
996 bool hfrdy
, bfrdy
, mmrdy
, ns_ufrdy
, s_ufrdy
, sfrdy
, monrdy
;
998 env
->v7m
.fpcar
[is_secure
] = frameptr
& ~0x7;
1000 if (apply_splim
&& arm_feature(env
, ARM_FEATURE_V8
)) {
1002 uint32_t splim
= v7m_sp_limit(env
);
1003 bool ign
= armv7m_nvic_neg_prio_requested(nvic
, is_secure
) &&
1004 (env
->v7m
.ccr
[is_secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
);
1006 splimviol
= !ign
&& frameptr
< splim
;
1007 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, SPLIMVIOL
, splimviol
);
1010 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, LSPACT
, 1);
1012 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, S
, is_secure
);
1014 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, USER
, arm_current_el(env
) == 0);
1016 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, THREAD
,
1017 !arm_v7m_is_handler_mode(env
));
1019 hfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_HARD
, false);
1020 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, HFRDY
, hfrdy
);
1022 bfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_BUS
, false);
1023 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, BFRDY
, bfrdy
);
1025 mmrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_MEM
, is_secure
);
1026 *fpccr
= FIELD_DP32(*fpccr
, V7M_FPCCR
, MMRDY
, mmrdy
);
1028 ns_ufrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_USAGE
, false);
1029 *fpccr_ns
= FIELD_DP32(*fpccr_ns
, V7M_FPCCR
, UFRDY
, ns_ufrdy
);
1031 monrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_DEBUG
, false);
1032 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, MONRDY
, monrdy
);
1034 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1035 s_ufrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_USAGE
, true);
1036 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, UFRDY
, s_ufrdy
);
1038 sfrdy
= armv7m_nvic_get_ready_status(nvic
, ARMV7M_EXCP_SECURE
, false);
1039 *fpccr_s
= FIELD_DP32(*fpccr_s
, V7M_FPCCR
, SFRDY
, sfrdy
);
1043 void HELPER(v7m_vlstm
)(CPUARMState
*env
, uint32_t fptr
)
1045 /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1046 ARMCPU
*cpu
= env_archcpu(env
);
1047 bool s
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
1048 bool lspact
= env
->v7m
.fpccr
[s
] & R_V7M_FPCCR_LSPACT_MASK
;
1049 uintptr_t ra
= GETPC();
1051 assert(env
->v7m
.secure
);
1053 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
1057 /* Check access to the coprocessor is permitted */
1058 if (!v7m_cpacr_pass(env
, true, arm_current_el(env
) != 0)) {
1059 raise_exception_ra(env
, EXCP_NOCP
, 0, 1, GETPC());
1063 /* LSPACT should not be active when there is active FP state */
1064 raise_exception_ra(env
, EXCP_LSERR
, 0, 1, GETPC());
1068 raise_exception_ra(env
, EXCP_UNALIGNED
, 0, 1, GETPC());
1072 * Note that we do not use v7m_stack_write() here, because the
1073 * accesses should not set the FSR bits for stacking errors if they
1074 * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1075 * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1078 if (!(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPEN_MASK
)) {
1079 bool ts
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
;
1082 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
1083 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
1084 uint32_t faddr
= fptr
+ 4 * i
;
1085 uint32_t slo
= extract64(dn
, 0, 32);
1086 uint32_t shi
= extract64(dn
, 32, 32);
1089 faddr
+= 8; /* skip the slot for the FPSCR */
1091 cpu_stl_data_ra(env
, faddr
, slo
, ra
);
1092 cpu_stl_data_ra(env
, faddr
+ 4, shi
, ra
);
1094 cpu_stl_data_ra(env
, fptr
+ 0x40, vfp_get_fpscr(env
), ra
);
1095 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1096 cpu_stl_data_ra(env
, fptr
+ 0x44, env
->v7m
.vpr
, ra
);
1100 * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1101 * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1104 for (i
= 0; i
< 32; i
+= 2) {
1105 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1107 vfp_set_fpscr(env
, 0);
1108 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1113 v7m_update_fpccr(env
, fptr
, false);
1116 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
1119 void HELPER(v7m_vlldm
)(CPUARMState
*env
, uint32_t fptr
)
1121 ARMCPU
*cpu
= env_archcpu(env
);
1122 uintptr_t ra
= GETPC();
1124 /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1125 assert(env
->v7m
.secure
);
1127 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
1131 /* Check access to the coprocessor is permitted */
1132 if (!v7m_cpacr_pass(env
, true, arm_current_el(env
) != 0)) {
1133 raise_exception_ra(env
, EXCP_NOCP
, 0, 1, GETPC());
1136 if (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
) {
1137 /* State in FP is still valid */
1138 env
->v7m
.fpccr
[M_REG_S
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
1140 bool ts
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
;
1145 raise_exception_ra(env
, EXCP_UNALIGNED
, 0, 1, GETPC());
1148 for (i
= 0; i
< (ts
? 32 : 16); i
+= 2) {
1151 uint32_t faddr
= fptr
+ 4 * i
;
1154 faddr
+= 8; /* skip the slot for the FPSCR and VPR */
1157 slo
= cpu_ldl_data_ra(env
, faddr
, ra
);
1158 shi
= cpu_ldl_data_ra(env
, faddr
+ 4, ra
);
1160 dn
= (uint64_t) shi
<< 32 | slo
;
1161 *aa32_vfp_dreg(env
, i
/ 2) = dn
;
1163 fpscr
= cpu_ldl_data_ra(env
, fptr
+ 0x40, ra
);
1164 vfp_set_fpscr(env
, fpscr
);
1165 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1166 env
->v7m
.vpr
= cpu_ldl_data_ra(env
, fptr
+ 0x44, ra
);
1170 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
1173 static bool v7m_push_stack(ARMCPU
*cpu
)
1176 * Do the "set up stack frame" part of exception entry,
1177 * similar to pseudocode PushStack().
1178 * Return true if we generate a derived exception (and so
1179 * should ignore further stack faults trying to process
1180 * that derived exception.)
1182 bool stacked_ok
= true, limitviol
= false;
1183 CPUARMState
*env
= &cpu
->env
;
1184 uint32_t xpsr
= xpsr_read(env
);
1185 uint32_t frameptr
= env
->regs
[13];
1186 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
1188 bool nsacr_cp10
= extract32(env
->v7m
.nsacr
, 10, 1);
1190 if ((env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) &&
1191 (env
->v7m
.secure
|| nsacr_cp10
)) {
1192 if (env
->v7m
.secure
&&
1193 env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
) {
1202 /* Align stack pointer if the guest wants that */
1203 if ((frameptr
& 4) &&
1204 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
1206 xpsr
|= XPSR_SPREALIGN
;
1210 if (env
->v7m
.secure
&&
1211 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
)) {
1215 frameptr
-= framesize
;
1217 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1218 uint32_t limit
= v7m_sp_limit(env
);
1220 if (frameptr
< limit
) {
1222 * Stack limit failure: set SP to the limit value, and generate
1223 * STKOF UsageFault. Stack pushes below the limit must not be
1224 * performed. It is IMPDEF whether pushes above the limit are
1225 * performed; we choose not to.
1227 qemu_log_mask(CPU_LOG_INT
,
1228 "...STKOF during stacking\n");
1229 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
1230 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1232 env
->regs
[13] = limit
;
1234 * We won't try to perform any further memory accesses but
1235 * we must continue through the following code to check for
1236 * permission faults during FPU state preservation, and we
1237 * must update FPCCR if lazy stacking is enabled.
1245 * Write as much of the stack frame as we can. If we fail a stack
1246 * write this will result in a derived exception being pended
1247 * (which may be taken in preference to the one we started with
1248 * if it has higher priority).
1250 stacked_ok
= stacked_ok
&&
1251 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, STACK_NORMAL
) &&
1252 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1],
1253 mmu_idx
, STACK_NORMAL
) &&
1254 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2],
1255 mmu_idx
, STACK_NORMAL
) &&
1256 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3],
1257 mmu_idx
, STACK_NORMAL
) &&
1258 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12],
1259 mmu_idx
, STACK_NORMAL
) &&
1260 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14],
1261 mmu_idx
, STACK_NORMAL
) &&
1262 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15],
1263 mmu_idx
, STACK_NORMAL
) &&
1264 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, STACK_NORMAL
);
1266 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
) {
1267 /* FPU is active, try to save its registers */
1268 bool fpccr_s
= env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_S_MASK
;
1269 bool lspact
= env
->v7m
.fpccr
[fpccr_s
] & R_V7M_FPCCR_LSPACT_MASK
;
1271 if (lspact
&& arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1272 qemu_log_mask(CPU_LOG_INT
,
1273 "...SecureFault because LSPACT and FPCA both set\n");
1274 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
1275 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1276 } else if (!env
->v7m
.secure
&& !nsacr_cp10
) {
1277 qemu_log_mask(CPU_LOG_INT
,
1278 "...Secure UsageFault with CFSR.NOCP because "
1279 "NSACR.CP10 prevents stacking FP regs\n");
1280 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, M_REG_S
);
1281 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
1283 if (!(env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPEN_MASK
)) {
1284 /* Lazy stacking disabled, save registers now */
1286 bool cpacr_pass
= v7m_cpacr_pass(env
, env
->v7m
.secure
,
1287 arm_current_el(env
) != 0);
1289 if (stacked_ok
&& !cpacr_pass
) {
1291 * Take UsageFault if CPACR forbids access. The pseudocode
1292 * here does a full CheckCPEnabled() but we know the NSACR
1293 * check can never fail as we have already handled that.
1295 qemu_log_mask(CPU_LOG_INT
,
1296 "...UsageFault with CFSR.NOCP because "
1297 "CPACR.CP10 prevents stacking FP regs\n");
1298 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1300 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
1304 for (i
= 0; i
< ((framesize
== 0xa8) ? 32 : 16); i
+= 2) {
1305 uint64_t dn
= *aa32_vfp_dreg(env
, i
/ 2);
1306 uint32_t faddr
= frameptr
+ 0x20 + 4 * i
;
1307 uint32_t slo
= extract64(dn
, 0, 32);
1308 uint32_t shi
= extract64(dn
, 32, 32);
1311 faddr
+= 8; /* skip the slot for the FPSCR and VPR */
1313 stacked_ok
= stacked_ok
&&
1314 v7m_stack_write(cpu
, faddr
, slo
,
1315 mmu_idx
, STACK_NORMAL
) &&
1316 v7m_stack_write(cpu
, faddr
+ 4, shi
,
1317 mmu_idx
, STACK_NORMAL
);
1319 stacked_ok
= stacked_ok
&&
1320 v7m_stack_write(cpu
, frameptr
+ 0x60,
1321 vfp_get_fpscr(env
), mmu_idx
, STACK_NORMAL
);
1322 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1323 stacked_ok
= stacked_ok
&&
1324 v7m_stack_write(cpu
, frameptr
+ 0x64,
1325 env
->v7m
.vpr
, mmu_idx
, STACK_NORMAL
);
1328 for (i
= 0; i
< ((framesize
== 0xa8) ? 32 : 16); i
+= 2) {
1329 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1331 vfp_set_fpscr(env
, 0);
1332 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1337 /* Lazy stacking enabled, save necessary info to stack later */
1338 v7m_update_fpccr(env
, frameptr
+ 0x20, true);
1344 * If we broke a stack limit then SP was already updated earlier;
1345 * otherwise we update SP regardless of whether any of the stack
1346 * accesses failed or we took some other kind of fault.
1349 env
->regs
[13] = frameptr
;
1355 static void do_v7m_exception_exit(ARMCPU
*cpu
)
1357 CPUARMState
*env
= &cpu
->env
;
1359 uint32_t xpsr
, xpsr_mask
;
1360 bool ufault
= false;
1361 bool sfault
= false;
1362 bool return_to_sp_process
;
1363 bool return_to_handler
;
1364 bool rettobase
= false;
1365 bool exc_secure
= false;
1366 bool return_to_secure
;
1368 bool restore_s16_s31
= false;
1371 * If we're not in Handler mode then jumps to magic exception-exit
1372 * addresses don't have magic behaviour. However for the v8M
1373 * security extensions the magic secure-function-return has to
1374 * work in thread mode too, so to avoid doing an extra check in
1375 * the generated code we allow exception-exit magic to also cause the
1376 * internal exception and bring us here in thread mode. Correct code
1377 * will never try to do this (the following insn fetch will always
1378 * fault) so we the overhead of having taken an unnecessary exception
1381 if (!arm_v7m_is_handler_mode(env
)) {
1386 * In the spec pseudocode ExceptionReturn() is called directly
1387 * from BXWritePC() and gets the full target PC value including
1388 * bit zero. In QEMU's implementation we treat it as a normal
1389 * jump-to-register (which is then caught later on), and so split
1390 * the target value up between env->regs[15] and env->thumb in
1391 * gen_bx(). Reconstitute it.
1393 excret
= env
->regs
[15];
1398 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
1399 " previous exception %d\n",
1400 excret
, env
->v7m
.exception
);
1402 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
1403 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
1404 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
1408 ftype
= excret
& R_V7M_EXCRET_FTYPE_MASK
;
1410 if (!ftype
&& !cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1411 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero FTYPE in exception "
1412 "exit PC value 0x%" PRIx32
" is UNPREDICTABLE "
1413 "if FPU not present\n",
1418 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1420 * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1421 * we pick which FAULTMASK to clear.
1423 if (!env
->v7m
.secure
&&
1424 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
1425 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
1427 /* For all other purposes, treat ES as 0 (R_HXSR) */
1428 excret
&= ~R_V7M_EXCRET_ES_MASK
;
1430 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
1433 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
1435 * Auto-clear FAULTMASK on return from other than NMI.
1436 * If the security extension is implemented then this only
1437 * happens if the raw execution priority is >= 0; the
1438 * value of the ES bit in the exception return value indicates
1439 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1441 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1442 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
1443 env
->v7m
.faultmask
[exc_secure
] = 0;
1446 env
->v7m
.faultmask
[M_REG_NS
] = 0;
1450 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
1453 /* attempt to exit an exception that isn't active */
1457 /* still an irq active now */
1461 * We returned to base exception level, no nesting.
1462 * (In the pseudocode this is written using "NestedActivation != 1"
1463 * where we have 'rettobase == false'.)
1468 g_assert_not_reached();
1471 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
1472 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
1473 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
1474 (excret
& R_V7M_EXCRET_S_MASK
);
1476 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1477 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1479 * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1480 * we choose to take the UsageFault.
1482 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
1483 (excret
& R_V7M_EXCRET_ES_MASK
) ||
1484 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
1488 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
1492 /* For v7M we only recognize certain combinations of the low bits */
1493 switch (excret
& 0xf) {
1494 case 1: /* Return to Handler */
1496 case 13: /* Return to Thread using Process stack */
1497 case 9: /* Return to Thread using Main stack */
1499 * We only need to check NONBASETHRDENA for v7M, because in
1500 * v8M this bit does not exist (it is RES1).
1503 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
1504 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
1514 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1515 * Handler mode (and will be until we write the new XPSR.Interrupt
1516 * field) this does not switch around the current stack pointer.
1517 * We must do this before we do any kind of tailchaining, including
1518 * for the derived exceptions on integrity check failures, or we will
1519 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1521 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
1524 * Clear scratch FP values left in caller saved registers; this
1525 * must happen before any kind of tail chaining.
1527 if ((env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_CLRONRET_MASK
) &&
1528 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
)) {
1529 if (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
) {
1530 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
1531 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1532 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
1533 "stackframe: error during lazy state deactivation\n");
1534 v7m_exception_taken(cpu
, excret
, true, false);
1537 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
1538 /* v8.1M adds this NOCP check */
1539 bool nsacr_pass
= exc_secure
||
1540 extract32(env
->v7m
.nsacr
, 10, 1);
1541 bool cpacr_pass
= v7m_cpacr_pass(env
, exc_secure
, true);
1543 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, true);
1544 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_NOCP_MASK
;
1545 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1546 "stackframe: NSACR prevents clearing FPU registers\n");
1547 v7m_exception_taken(cpu
, excret
, true, false);
1549 } else if (!cpacr_pass
) {
1550 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1552 env
->v7m
.cfsr
[exc_secure
] |= R_V7M_CFSR_NOCP_MASK
;
1553 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1554 "stackframe: CPACR prevents clearing FPU registers\n");
1555 v7m_exception_taken(cpu
, excret
, true, false);
1559 /* Clear s0..s15, FPSCR and VPR */
1562 for (i
= 0; i
< 16; i
+= 2) {
1563 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1565 vfp_set_fpscr(env
, 0);
1566 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1573 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
1574 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1575 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
1576 "stackframe: failed EXC_RETURN.ES validity check\n");
1577 v7m_exception_taken(cpu
, excret
, true, false);
1583 * Bad exception return: instead of popping the exception
1584 * stack, directly take a usage fault on the current stack.
1586 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1587 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
1588 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1589 "stackframe: failed exception return integrity check\n");
1590 v7m_exception_taken(cpu
, excret
, true, false);
1595 * Tailchaining: if there is currently a pending exception that
1596 * is high enough priority to preempt execution at the level we're
1597 * about to return to, then just directly take that exception now,
1598 * avoiding an unstack-and-then-stack. Note that now we have
1599 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1600 * our current execution priority is already the execution priority we are
1601 * returning to -- none of the state we would unstack or set based on
1602 * the EXCRET value affects it.
1604 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
1605 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
1606 v7m_exception_taken(cpu
, excret
, true, false);
1610 switch_v7m_security_state(env
, return_to_secure
);
1614 * The stack pointer we should be reading the exception frame from
1615 * depends on bits in the magic exception return type value (and
1616 * for v8M isn't necessarily the stack pointer we will eventually
1617 * end up resuming execution with). Get a pointer to the location
1618 * in the CPU state struct where the SP we need is currently being
1619 * stored; we will use and modify it in place.
1620 * We use this limited C variable scope so we don't accidentally
1621 * use 'frame_sp_p' after we do something that makes it invalid.
1623 bool spsel
= env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_SPSEL_MASK
;
1624 uint32_t *frame_sp_p
= arm_v7m_get_sp_ptr(env
, return_to_secure
,
1625 !return_to_handler
, spsel
);
1626 uint32_t frameptr
= *frame_sp_p
;
1629 bool return_to_priv
= return_to_handler
||
1630 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
1632 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
1635 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
1636 arm_feature(env
, ARM_FEATURE_V8
)) {
1637 qemu_log_mask(LOG_GUEST_ERROR
,
1638 "M profile exception return with non-8-aligned SP "
1639 "for destination state is UNPREDICTABLE\n");
1642 /* Do we need to pop callee-saved registers? */
1643 if (return_to_secure
&&
1644 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
1645 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
1646 uint32_t actual_sig
;
1648 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
1650 if (pop_ok
&& v7m_integrity_sig(env
, excret
) != actual_sig
) {
1651 /* Take a SecureFault on the current stack */
1652 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
1653 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1654 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
1655 "stackframe: failed exception return integrity "
1656 "signature check\n");
1657 v7m_exception_taken(cpu
, excret
, true, false);
1662 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
1663 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
1664 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
1665 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
1666 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
1667 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
1668 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
1669 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
1676 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
1677 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
1678 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
1679 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
1680 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
1681 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
1682 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
1683 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
1687 * v7m_stack_read() pended a fault, so take it (as a tail
1688 * chained exception on the same stack frame)
1690 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
1691 v7m_exception_taken(cpu
, excret
, true, false);
1696 * Returning from an exception with a PC with bit 0 set is defined
1697 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1698 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1699 * the lsbit, and there are several RTOSes out there which incorrectly
1700 * assume the r15 in the stack frame should be a Thumb-style "lsbit
1701 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1702 * complain about the badly behaved guest.
1704 if (env
->regs
[15] & 1) {
1705 env
->regs
[15] &= ~1U;
1706 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
1707 qemu_log_mask(LOG_GUEST_ERROR
,
1708 "M profile return from interrupt with misaligned "
1709 "PC is UNPREDICTABLE on v7M\n");
1713 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1715 * For v8M we have to check whether the xPSR exception field
1716 * matches the EXCRET value for return to handler/thread
1717 * before we commit to changing the SP and xPSR.
1719 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
1720 if (return_to_handler
!= will_be_handler
) {
1722 * Take an INVPC UsageFault on the current stack.
1723 * By this point we will have switched to the security state
1724 * for the background state, so this UsageFault will target
1727 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1729 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1730 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
1731 "stackframe: failed exception return integrity "
1733 v7m_exception_taken(cpu
, excret
, true, false);
1739 /* FP present and we need to handle it */
1740 if (!return_to_secure
&&
1741 (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_LSPACT_MASK
)) {
1742 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
1743 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
1744 qemu_log_mask(CPU_LOG_INT
,
1745 "...taking SecureFault on existing stackframe: "
1746 "Secure LSPACT set but exception return is "
1747 "not to secure state\n");
1748 v7m_exception_taken(cpu
, excret
, true, false);
1752 restore_s16_s31
= return_to_secure
&&
1753 (env
->v7m
.fpccr
[M_REG_S
] & R_V7M_FPCCR_TS_MASK
);
1755 if (env
->v7m
.fpccr
[return_to_secure
] & R_V7M_FPCCR_LSPACT_MASK
) {
1756 /* State in FPU is still valid, just clear LSPACT */
1757 env
->v7m
.fpccr
[return_to_secure
] &= ~R_V7M_FPCCR_LSPACT_MASK
;
1761 bool cpacr_pass
, nsacr_pass
;
1763 cpacr_pass
= v7m_cpacr_pass(env
, return_to_secure
,
1765 nsacr_pass
= return_to_secure
||
1766 extract32(env
->v7m
.nsacr
, 10, 1);
1769 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1771 env
->v7m
.cfsr
[return_to_secure
] |= R_V7M_CFSR_NOCP_MASK
;
1772 qemu_log_mask(CPU_LOG_INT
,
1773 "...taking UsageFault on existing "
1774 "stackframe: CPACR.CP10 prevents unstacking "
1776 v7m_exception_taken(cpu
, excret
, true, false);
1778 } else if (!nsacr_pass
) {
1779 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, true);
1780 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_INVPC_MASK
;
1781 qemu_log_mask(CPU_LOG_INT
,
1782 "...taking Secure UsageFault on existing "
1783 "stackframe: NSACR.CP10 prevents unstacking "
1785 v7m_exception_taken(cpu
, excret
, true, false);
1789 for (i
= 0; i
< (restore_s16_s31
? 32 : 16); i
+= 2) {
1792 uint32_t faddr
= frameptr
+ 0x20 + 4 * i
;
1795 faddr
+= 8; /* Skip the slot for the FPSCR and VPR */
1799 v7m_stack_read(cpu
, &slo
, faddr
, mmu_idx
) &&
1800 v7m_stack_read(cpu
, &shi
, faddr
+ 4, mmu_idx
);
1806 dn
= (uint64_t)shi
<< 32 | slo
;
1807 *aa32_vfp_dreg(env
, i
/ 2) = dn
;
1810 v7m_stack_read(cpu
, &fpscr
, frameptr
+ 0x60, mmu_idx
);
1812 vfp_set_fpscr(env
, fpscr
);
1814 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1816 v7m_stack_read(cpu
, &env
->v7m
.vpr
,
1817 frameptr
+ 0x64, mmu_idx
);
1821 * These regs are 0 if security extension present;
1822 * otherwise merely UNKNOWN. We zero always.
1824 for (i
= 0; i
< (restore_s16_s31
? 32 : 16); i
+= 2) {
1825 *aa32_vfp_dreg(env
, i
/ 2) = 0;
1827 vfp_set_fpscr(env
, 0);
1828 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1834 env
->v7m
.control
[M_REG_S
] = FIELD_DP32(env
->v7m
.control
[M_REG_S
],
1835 V7M_CONTROL
, FPCA
, !ftype
);
1837 /* Commit to consuming the stack frame */
1841 if (restore_s16_s31
) {
1846 * Undo stack alignment (the SPREALIGN bit indicates that the original
1847 * pre-exception SP was not 8-aligned and we added a padding word to
1848 * align it, so we undo this by ORing in the bit that increases it
1849 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1850 * would work too but a logical OR is how the pseudocode specifies it.)
1852 if (xpsr
& XPSR_SPREALIGN
) {
1855 *frame_sp_p
= frameptr
;
1858 xpsr_mask
= ~(XPSR_SPREALIGN
| XPSR_SFPA
);
1859 if (!arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
1860 xpsr_mask
&= ~XPSR_GE
;
1862 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1863 xpsr_write(env
, xpsr
, xpsr_mask
);
1865 if (env
->v7m
.secure
) {
1866 bool sfpa
= xpsr
& XPSR_SFPA
;
1868 env
->v7m
.control
[M_REG_S
] = FIELD_DP32(env
->v7m
.control
[M_REG_S
],
1869 V7M_CONTROL
, SFPA
, sfpa
);
1873 * The restored xPSR exception field will be zero if we're
1874 * resuming in Thread mode. If that doesn't match what the
1875 * exception return excret specified then this is a UsageFault.
1876 * v7M requires we make this check here; v8M did it earlier.
1878 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
1880 * Take an INVPC UsageFault by pushing the stack again;
1881 * we know we're v7M so this is never a Secure UsageFault.
1883 bool ignore_stackfaults
;
1885 assert(!arm_feature(env
, ARM_FEATURE_V8
));
1886 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
1887 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1888 ignore_stackfaults
= v7m_push_stack(cpu
);
1889 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
1890 "failed exception return integrity check\n");
1891 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
1895 /* Otherwise, we have a successful exception exit. */
1896 arm_clear_exclusive(env
);
1897 arm_rebuild_hflags(env
);
1898 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
1901 static bool do_v7m_function_return(ARMCPU
*cpu
)
1904 * v8M security extensions magic function return.
1906 * (1) throw an exception (longjump)
1907 * (2) return true if we successfully handled the function return
1908 * (3) return false if we failed a consistency check and have
1909 * pended a UsageFault that needs to be taken now
1911 * At this point the magic return value is split between env->regs[15]
1912 * and env->thumb. We don't bother to reconstitute it because we don't
1913 * need it (all values are handled the same way).
1915 CPUARMState
*env
= &cpu
->env
;
1916 uint32_t newpc
, newpsr
, newpsr_exc
;
1918 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
1921 bool threadmode
, spsel
;
1924 uint32_t *frame_sp_p
;
1927 /* Pull the return address and IPSR from the Secure stack */
1928 threadmode
= !arm_v7m_is_handler_mode(env
);
1929 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
1931 frame_sp_p
= arm_v7m_get_sp_ptr(env
, true, threadmode
, spsel
);
1932 frameptr
= *frame_sp_p
;
1935 * These loads may throw an exception (for MPU faults). We want to
1936 * do them as secure, so work out what MMU index that is.
1938 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
1939 oi
= make_memop_idx(MO_LEUL
, arm_to_core_mmu_idx(mmu_idx
));
1940 newpc
= cpu_ldl_mmu(env
, frameptr
, oi
, 0);
1941 newpsr
= cpu_ldl_mmu(env
, frameptr
+ 4, oi
, 0);
1943 /* Consistency checks on new IPSR */
1944 newpsr_exc
= newpsr
& XPSR_EXCP
;
1945 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
1946 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
1947 /* Pend the fault and tell our caller to take it */
1948 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
1949 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
1951 qemu_log_mask(CPU_LOG_INT
,
1952 "...taking INVPC UsageFault: "
1953 "IPSR consistency check failed\n");
1957 *frame_sp_p
= frameptr
+ 8;
1960 /* This invalidates frame_sp_p */
1961 switch_v7m_security_state(env
, true);
1962 env
->v7m
.exception
= newpsr_exc
;
1963 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
1964 if (newpsr
& XPSR_SFPA
) {
1965 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
1967 xpsr_write(env
, 0, XPSR_IT
);
1968 env
->thumb
= newpc
& 1;
1969 env
->regs
[15] = newpc
& ~1;
1970 arm_rebuild_hflags(env
);
1972 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
1976 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
, bool secure
,
1977 uint32_t addr
, uint16_t *insn
)
1980 * Load a 16-bit portion of a v7M instruction, returning true on success,
1981 * or false on failure (in which case we will have pended the appropriate
1983 * We need to do the instruction fetch's MPU and SAU checks
1984 * like this because there is no MMU index that would allow
1985 * doing the load with a single function call. Instead we must
1986 * first check that the security attributes permit the load
1987 * and that they don't mismatch on the two halves of the instruction,
1988 * and then we do the load as a secure load (ie using the security
1989 * attributes of the address, not the CPU, as architecturally required).
1991 CPUState
*cs
= CPU(cpu
);
1992 CPUARMState
*env
= &cpu
->env
;
1993 V8M_SAttributes sattrs
= {};
1994 GetPhysAddrResult res
= {};
1995 ARMMMUFaultInfo fi
= {};
1998 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, secure
, &sattrs
);
1999 if (!sattrs
.nsc
|| sattrs
.ns
) {
2001 * This must be the second half of the insn, and it straddles a
2002 * region boundary with the second half not being S&NSC.
2004 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
2005 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2006 qemu_log_mask(CPU_LOG_INT
,
2007 "...really SecureFault with SFSR.INVEP\n");
2010 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &res
, &fi
)) {
2011 /* the MPU lookup failed */
2012 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
2013 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
2014 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
2017 *insn
= address_space_lduw_le(arm_addressspace(cs
, res
.f
.attrs
),
2018 res
.f
.phys_addr
, res
.f
.attrs
, &txres
);
2019 if (txres
!= MEMTX_OK
) {
2020 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
2021 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
2022 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
2028 static bool v7m_read_sg_stack_word(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
2029 uint32_t addr
, uint32_t *spdata
)
2032 * Read a word of data from the stack for the SG instruction,
2033 * writing the value into *spdata. If the load succeeds, return
2034 * true; otherwise pend an appropriate exception and return false.
2035 * (We can't use data load helpers here that throw an exception
2036 * because of the context we're called in, which is halfway through
2037 * arm_v7m_cpu_do_interrupt().)
2039 CPUState
*cs
= CPU(cpu
);
2040 CPUARMState
*env
= &cpu
->env
;
2042 GetPhysAddrResult res
= {};
2043 ARMMMUFaultInfo fi
= {};
2046 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &res
, &fi
)) {
2047 /* MPU/SAU lookup failed */
2048 if (fi
.type
== ARMFault_QEMU_SFault
) {
2049 qemu_log_mask(CPU_LOG_INT
,
2050 "...SecureFault during stack word read\n");
2051 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
2052 env
->v7m
.sfar
= addr
;
2053 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2055 qemu_log_mask(CPU_LOG_INT
,
2056 "...MemManageFault during stack word read\n");
2057 env
->v7m
.cfsr
[M_REG_S
] |= R_V7M_CFSR_DACCVIOL_MASK
|
2058 R_V7M_CFSR_MMARVALID_MASK
;
2059 env
->v7m
.mmfar
[M_REG_S
] = addr
;
2060 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, false);
2064 value
= address_space_ldl(arm_addressspace(cs
, res
.f
.attrs
),
2065 res
.f
.phys_addr
, res
.f
.attrs
, &txres
);
2066 if (txres
!= MEMTX_OK
) {
2067 /* BusFault trying to read the data */
2068 qemu_log_mask(CPU_LOG_INT
,
2069 "...BusFault during stack word read\n");
2070 env
->v7m
.cfsr
[M_REG_NS
] |=
2071 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
2072 env
->v7m
.bfar
= addr
;
2073 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
2081 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
2084 * Check whether this attempt to execute code in a Secure & NS-Callable
2085 * memory region is for an SG instruction; if so, then emulate the
2086 * effect of the SG instruction and return true. Otherwise pend
2087 * the correct kind of exception and return false.
2089 CPUARMState
*env
= &cpu
->env
;
2094 * We should never get here unless get_phys_addr_pmsav8() caused
2095 * an exception for NS executing in S&NSC memory.
2097 assert(!env
->v7m
.secure
);
2098 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
2100 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2101 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
2103 if (!v7m_read_half_insn(cpu
, mmu_idx
, true, env
->regs
[15], &insn
)) {
2111 if (insn
!= 0xe97f) {
2113 * Not an SG instruction first half (we choose the IMPDEF
2114 * early-SG-check option).
2119 if (!v7m_read_half_insn(cpu
, mmu_idx
, true, env
->regs
[15] + 2, &insn
)) {
2123 if (insn
!= 0xe97f) {
2125 * Not an SG instruction second half (yes, both halves of the SG
2126 * insn have the same hex value)
2132 * OK, we have confirmed that we really have an SG instruction.
2133 * We know we're NS in S memory so don't need to repeat those checks.
2135 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
2136 ", executing it\n", env
->regs
[15]);
2138 if (cpu_isar_feature(aa32_m_sec_state
, cpu
) &&
2139 !arm_v7m_is_handler_mode(env
)) {
2141 * v8.1M exception stack frame integrity check. Note that we
2142 * must perform the memory access even if CCR_S.TRD is zero
2143 * and we aren't going to check what the data loaded is.
2145 uint32_t spdata
, sp
;
2148 * We know we are currently NS, so the S stack pointers must be
2149 * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2151 sp
= v7m_using_psp(env
) ? env
->v7m
.other_ss_psp
: env
->v7m
.other_ss_msp
;
2152 if (!v7m_read_sg_stack_word(cpu
, mmu_idx
, sp
, &spdata
)) {
2153 /* Stack access failed and an exception has been pended */
2157 if (env
->v7m
.ccr
[M_REG_S
] & R_V7M_CCR_TRD_MASK
) {
2158 if (((spdata
& ~1) == 0xfefa125a) ||
2159 !(env
->v7m
.control
[M_REG_S
] & 1)) {
2165 env
->regs
[14] &= ~1;
2166 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
2167 switch_v7m_security_state(env
, true);
2168 xpsr_write(env
, 0, XPSR_IT
);
2170 arm_rebuild_hflags(env
);
2174 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
2175 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2176 qemu_log_mask(CPU_LOG_INT
,
2177 "...really SecureFault with SFSR.INVEP\n");
2181 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2183 ARMCPU
*cpu
= ARM_CPU(cs
);
2184 CPUARMState
*env
= &cpu
->env
;
2186 bool ignore_stackfaults
;
2188 arm_log_exception(cs
);
2191 * For exceptions we just mark as pending on the NVIC, and let that
2194 switch (cs
->exception_index
) {
2196 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2197 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
2202 * NOCP might be directed to something other than the current
2203 * security state if this fault is because of NSACR; we indicate
2204 * the target security state using exception.target_el.
2206 int target_secstate
;
2208 if (env
->exception
.target_el
== 3) {
2209 target_secstate
= M_REG_S
;
2211 target_secstate
= env
->v7m
.secure
;
2213 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, target_secstate
);
2214 env
->v7m
.cfsr
[target_secstate
] |= R_V7M_CFSR_NOCP_MASK
;
2218 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2219 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
2222 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2223 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
2226 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2227 env
->v7m
.sfsr
|= R_V7M_SFSR_LSERR_MASK
;
2229 case EXCP_UNALIGNED
:
2230 /* Unaligned faults reported by M-profile aware code */
2231 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2232 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNALIGNED_MASK
;
2234 case EXCP_DIVBYZERO
:
2235 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
2236 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_DIVBYZERO_MASK
;
2239 /* The PC already points to the next instruction. */
2240 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
2242 case EXCP_PREFETCH_ABORT
:
2243 case EXCP_DATA_ABORT
:
2245 * Note that for M profile we don't have a guest facing FSR, but
2246 * the env->exception.fsr will be populated by the code that
2247 * raises the fault, in the A profile short-descriptor format.
2249 * Log the exception.vaddress now regardless of subtype, because
2250 * logging below only logs it when it goes into a guest visible
2253 qemu_log_mask(CPU_LOG_INT
, "...at fault address 0x%x\n",
2254 (uint32_t)env
->exception
.vaddress
);
2255 switch (env
->exception
.fsr
& 0xf) {
2256 case M_FAKE_FSR_NSC_EXEC
:
2258 * Exception generated when we try to execute code at an address
2259 * which is marked as Secure & Non-Secure Callable and the CPU
2260 * is in the Non-Secure state. The only instruction which can
2261 * be executed like this is SG (and that only if both halves of
2262 * the SG instruction have the same security attributes.)
2263 * Everything else must generate an INVEP SecureFault, so we
2264 * emulate the SG instruction here.
2266 if (v7m_handle_execute_nsc(cpu
)) {
2270 case M_FAKE_FSR_SFAULT
:
2272 * Various flavours of SecureFault for attempts to execute or
2273 * access data in the wrong security state.
2275 switch (cs
->exception_index
) {
2276 case EXCP_PREFETCH_ABORT
:
2277 if (env
->v7m
.secure
) {
2278 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
2279 qemu_log_mask(CPU_LOG_INT
,
2280 "...really SecureFault with SFSR.INVTRAN\n");
2282 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
2283 qemu_log_mask(CPU_LOG_INT
,
2284 "...really SecureFault with SFSR.INVEP\n");
2287 case EXCP_DATA_ABORT
:
2288 /* This must be an NS access to S memory */
2289 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
2290 qemu_log_mask(CPU_LOG_INT
,
2291 "...really SecureFault with SFSR.AUVIOL\n");
2294 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
2296 case 0x8: /* External Abort */
2297 switch (cs
->exception_index
) {
2298 case EXCP_PREFETCH_ABORT
:
2299 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
2300 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
2302 case EXCP_DATA_ABORT
:
2303 env
->v7m
.cfsr
[M_REG_NS
] |=
2304 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
2305 env
->v7m
.bfar
= env
->exception
.vaddress
;
2306 qemu_log_mask(CPU_LOG_INT
,
2307 "...with CFSR.PRECISERR and BFAR 0x%x\n",
2311 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
2313 case 0x1: /* Alignment fault reported by generic code */
2314 qemu_log_mask(CPU_LOG_INT
,
2315 "...really UsageFault with UFSR.UNALIGNED\n");
2316 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNALIGNED_MASK
;
2317 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
2322 * All other FSR values are either MPU faults or "can't happen
2323 * for M profile" cases.
2325 switch (cs
->exception_index
) {
2326 case EXCP_PREFETCH_ABORT
:
2327 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
2328 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
2330 case EXCP_DATA_ABORT
:
2331 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
2332 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
2333 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
2334 qemu_log_mask(CPU_LOG_INT
,
2335 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2336 env
->v7m
.mmfar
[env
->v7m
.secure
]);
2339 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
2345 qemu_log_mask(CPU_LOG_INT
,
2346 "...handling as semihosting call 0x%x\n",
2349 do_common_semihosting(cs
);
2351 g_assert_not_reached();
2353 env
->regs
[15] += env
->thumb
? 2 : 4;
2356 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
2360 case EXCP_EXCEPTION_EXIT
:
2361 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
2362 /* Must be v8M security extension function return */
2363 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
2364 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
2365 if (do_v7m_function_return(cpu
)) {
2369 do_v7m_exception_exit(cpu
);
2375 * We already pended the specific exception in the NVIC in the
2376 * v7m_preserve_fp_state() helper function.
2380 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
2381 return; /* Never happens. Keep compiler happy. */
2384 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2385 lr
= R_V7M_EXCRET_RES1_MASK
|
2386 R_V7M_EXCRET_DCRS_MASK
;
2388 * The S bit indicates whether we should return to Secure
2389 * or NonSecure (ie our current state).
2390 * The ES bit indicates whether we're taking this exception
2391 * to Secure or NonSecure (ie our target state). We set it
2392 * later, in v7m_exception_taken().
2393 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2394 * This corresponds to the ARM ARM pseudocode for v8M setting
2395 * some LR bits in PushStack() and some in ExceptionTaken();
2396 * the distinction matters for the tailchain cases where we
2397 * can take an exception without pushing the stack.
2399 if (env
->v7m
.secure
) {
2400 lr
|= R_V7M_EXCRET_S_MASK
;
2403 lr
= R_V7M_EXCRET_RES1_MASK
|
2404 R_V7M_EXCRET_S_MASK
|
2405 R_V7M_EXCRET_DCRS_MASK
|
2406 R_V7M_EXCRET_ES_MASK
;
2407 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
2408 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
2411 if (!(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
)) {
2412 lr
|= R_V7M_EXCRET_FTYPE_MASK
;
2414 if (!arm_v7m_is_handler_mode(env
)) {
2415 lr
|= R_V7M_EXCRET_MODE_MASK
;
2418 ignore_stackfaults
= v7m_push_stack(cpu
);
2419 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
2422 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2424 unsigned el
= arm_current_el(env
);
2426 /* First handle registers which unprivileged can read */
2428 case 0 ... 7: /* xPSR sub-fields */
2429 return v7m_mrs_xpsr(env
, reg
, el
);
2430 case 20: /* CONTROL */
2431 return arm_v7m_mrs_control(env
, env
->v7m
.secure
);
2432 case 0x94: /* CONTROL_NS */
2434 * We have to handle this here because unprivileged Secure code
2435 * can read the NS CONTROL register.
2437 if (!env
->v7m
.secure
) {
2440 return env
->v7m
.control
[M_REG_NS
] |
2441 (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_FPCA_MASK
);
2445 return 0; /* unprivileged reads others as zero */
2448 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2450 case 0x88: /* MSP_NS */
2451 if (!env
->v7m
.secure
) {
2454 return env
->v7m
.other_ss_msp
;
2455 case 0x89: /* PSP_NS */
2456 if (!env
->v7m
.secure
) {
2459 return env
->v7m
.other_ss_psp
;
2460 case 0x8a: /* MSPLIM_NS */
2461 if (!env
->v7m
.secure
) {
2464 return env
->v7m
.msplim
[M_REG_NS
];
2465 case 0x8b: /* PSPLIM_NS */
2466 if (!env
->v7m
.secure
) {
2469 return env
->v7m
.psplim
[M_REG_NS
];
2470 case 0x90: /* PRIMASK_NS */
2471 if (!env
->v7m
.secure
) {
2474 return env
->v7m
.primask
[M_REG_NS
];
2475 case 0x91: /* BASEPRI_NS */
2476 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2479 if (!env
->v7m
.secure
) {
2482 return env
->v7m
.basepri
[M_REG_NS
];
2483 case 0x93: /* FAULTMASK_NS */
2484 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2487 if (!env
->v7m
.secure
) {
2490 return env
->v7m
.faultmask
[M_REG_NS
];
2491 case 0x98: /* SP_NS */
2494 * This gives the non-secure SP selected based on whether we're
2495 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2497 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
2499 if (!env
->v7m
.secure
) {
2502 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
2503 return env
->v7m
.other_ss_psp
;
2505 return env
->v7m
.other_ss_msp
;
2515 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
2517 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
2518 case 10: /* MSPLIM */
2519 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2522 return env
->v7m
.msplim
[env
->v7m
.secure
];
2523 case 11: /* PSPLIM */
2524 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2527 return env
->v7m
.psplim
[env
->v7m
.secure
];
2528 case 16: /* PRIMASK */
2529 return env
->v7m
.primask
[env
->v7m
.secure
];
2530 case 17: /* BASEPRI */
2531 case 18: /* BASEPRI_MAX */
2532 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2535 return env
->v7m
.basepri
[env
->v7m
.secure
];
2536 case 19: /* FAULTMASK */
2537 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2540 return env
->v7m
.faultmask
[env
->v7m
.secure
];
2543 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
2544 " register %d\n", reg
);
2549 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
2552 * We're passed bits [11..0] of the instruction; extract
2553 * SYSm and the mask bits.
2554 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2555 * we choose to treat them as if the mask bits were valid.
2556 * NB that the pseudocode 'mask' variable is bits [11..10],
2557 * whereas ours is [11..8].
2559 uint32_t mask
= extract32(maskreg
, 8, 4);
2560 uint32_t reg
= extract32(maskreg
, 0, 8);
2561 int cur_el
= arm_current_el(env
);
2563 if (cur_el
== 0 && reg
> 7 && reg
!= 20) {
2565 * only xPSR sub-fields and CONTROL.SFPA may be written by
2571 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2573 case 0x88: /* MSP_NS */
2574 if (!env
->v7m
.secure
) {
2577 env
->v7m
.other_ss_msp
= val
& ~3;
2579 case 0x89: /* PSP_NS */
2580 if (!env
->v7m
.secure
) {
2583 env
->v7m
.other_ss_psp
= val
& ~3;
2585 case 0x8a: /* MSPLIM_NS */
2586 if (!env
->v7m
.secure
) {
2589 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
2591 case 0x8b: /* PSPLIM_NS */
2592 if (!env
->v7m
.secure
) {
2595 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
2597 case 0x90: /* PRIMASK_NS */
2598 if (!env
->v7m
.secure
) {
2601 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
2603 case 0x91: /* BASEPRI_NS */
2604 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2607 if (!env
->v7m
.secure
) {
2610 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
2612 case 0x93: /* FAULTMASK_NS */
2613 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2616 if (!env
->v7m
.secure
) {
2619 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
2621 case 0x94: /* CONTROL_NS */
2622 if (!env
->v7m
.secure
) {
2625 write_v7m_control_spsel_for_secstate(env
,
2626 val
& R_V7M_CONTROL_SPSEL_MASK
,
2628 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2629 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
2630 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
2633 * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2634 * RES0 if the FPU is not present, and is stored in the S bank
2636 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
)) &&
2637 extract32(env
->v7m
.nsacr
, 10, 1)) {
2638 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
2639 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_FPCA_MASK
;
2642 case 0x98: /* SP_NS */
2645 * This gives the non-secure SP selected based on whether we're
2646 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2648 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
2649 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
2652 if (!env
->v7m
.secure
) {
2656 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
2661 raise_exception_ra(env
, EXCP_STKOF
, 0, 1, GETPC());
2665 env
->v7m
.other_ss_psp
= val
;
2667 env
->v7m
.other_ss_msp
= val
;
2677 case 0 ... 7: /* xPSR sub-fields */
2678 v7m_msr_xpsr(env
, mask
, reg
, val
);
2681 if (v7m_using_psp(env
)) {
2682 env
->v7m
.other_sp
= val
& ~3;
2684 env
->regs
[13] = val
& ~3;
2688 if (v7m_using_psp(env
)) {
2689 env
->regs
[13] = val
& ~3;
2691 env
->v7m
.other_sp
= val
& ~3;
2694 case 10: /* MSPLIM */
2695 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2698 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
2700 case 11: /* PSPLIM */
2701 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2704 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
2706 case 16: /* PRIMASK */
2707 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
2709 case 17: /* BASEPRI */
2710 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2713 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
2715 case 18: /* BASEPRI_MAX */
2716 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2720 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
2721 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
2722 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
2725 case 19: /* FAULTMASK */
2726 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2729 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
2731 case 20: /* CONTROL */
2733 * Writing to the SPSEL bit only has an effect if we are in
2734 * thread mode; other bits can be updated by any privileged code.
2735 * write_v7m_control_spsel() deals with updating the SPSEL bit in
2736 * env->v7m.control, so we only need update the others.
2737 * For v7M, we must just ignore explicit writes to SPSEL in handler
2738 * mode; for v8M the write is permitted but will have no effect.
2739 * All these bits are writes-ignored from non-privileged code,
2742 if (cur_el
> 0 && (arm_feature(env
, ARM_FEATURE_V8
) ||
2743 !arm_v7m_is_handler_mode(env
))) {
2744 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
2746 if (cur_el
> 0 && arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
2747 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
2748 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
2750 if (cpu_isar_feature(aa32_vfp_simd
, env_archcpu(env
))) {
2752 * SFPA is RAZ/WI from NS or if no FPU.
2753 * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2754 * Both are stored in the S bank.
2756 if (env
->v7m
.secure
) {
2757 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
2758 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_SFPA_MASK
;
2761 (env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
) ||
2762 extract32(env
->v7m
.nsacr
, 10, 1))) {
2763 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_FPCA_MASK
;
2764 env
->v7m
.control
[M_REG_S
] |= val
& R_V7M_CONTROL_FPCA_MASK
;
2770 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
2771 " register %d\n", reg
);
2776 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
2778 /* Implement the TT instruction. op is bits [7:6] of the insn. */
2779 bool forceunpriv
= op
& 1;
2781 V8M_SAttributes sattrs
= {};
2783 bool r
, rw
, nsr
, nsrw
, mrvalid
;
2787 bool targetsec
= env
->v7m
.secure
;
2790 * Work out what the security state and privilege level we're
2791 * interested in is...
2794 targetsec
= !targetsec
;
2800 targetpriv
= arm_v7m_is_handler_mode(env
) ||
2801 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
2804 /* ...and then figure out which MMU index this is */
2805 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
2808 * We know that the MPU and SAU don't care about the access type
2809 * for our purposes beyond that we don't want to claim to be
2810 * an insn fetch, so we arbitrarily call this a read.
2814 * MPU region info only available for privileged or if
2815 * inspecting the other MPU state.
2817 if (arm_current_el(env
) != 0 || alt
) {
2818 GetPhysAddrResult res
= {};
2819 ARMMMUFaultInfo fi
= {};
2821 /* We can ignore the return value as prot is always set */
2822 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, targetsec
,
2823 &res
, &fi
, &mregion
);
2824 if (mregion
== -1) {
2830 r
= res
.f
.prot
& PAGE_READ
;
2831 rw
= res
.f
.prot
& PAGE_WRITE
;
2839 if (env
->v7m
.secure
) {
2840 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
2841 targetsec
, &sattrs
);
2842 nsr
= sattrs
.ns
&& r
;
2843 nsrw
= sattrs
.ns
&& rw
;
2850 tt_resp
= (sattrs
.iregion
<< 24) |
2851 (sattrs
.irvalid
<< 23) |
2852 ((!sattrs
.ns
) << 22) |
2857 (sattrs
.srvalid
<< 17) |
2859 (sattrs
.sregion
<< 8) |
2865 #endif /* !CONFIG_USER_ONLY */
2867 uint32_t *arm_v7m_get_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
2871 * Return a pointer to the location where we currently store the
2872 * stack pointer for the requested security state and thread mode.
2873 * This pointer will become invalid if the CPU state is updated
2874 * such that the stack pointers are switched around (eg changing
2875 * the SPSEL control bit).
2876 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
2877 * Unlike that pseudocode, we require the caller to pass us in the
2878 * SPSEL control bit value; this is because we also use this
2879 * function in handling of pushing of the callee-saves registers
2880 * part of the v8M stack frame (pseudocode PushCalleeStack()),
2881 * and in the tailchain codepath the SPSEL bit comes from the exception
2882 * return magic LR value from the previous exception. The pseudocode
2883 * opencodes the stack-selection in PushCalleeStack(), but we prefer
2884 * to make this utility function generic enough to do the job.
2886 bool want_psp
= threadmode
&& spsel
;
2888 if (secure
== env
->v7m
.secure
) {
2889 if (want_psp
== v7m_using_psp(env
)) {
2890 return &env
->regs
[13];
2892 return &env
->v7m
.other_sp
;
2896 return &env
->v7m
.other_ss_psp
;
2898 return &env
->v7m
.other_ss_msp
;