FIELD(V7M_CONTROL, NPRIV, 0, 1)
FIELD(V7M_CONTROL, SPSEL, 1, 1)
FIELD(V7M_CONTROL, FPCA, 2, 1)
+FIELD(V7M_CONTROL, SFPA, 3, 1)
/* Bit definitions for v7M exception return payload */
FIELD(V7M_EXCRET, ES, 0, 1)
FIELD(V7M_EXCRET, S, 6, 1)
FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
+/* Minimum value which is a magic number for exception return */
+#define EXC_RETURN_MIN_MAGIC 0xff000000
+/* Minimum number which is a magic number for function or exception return
+ * when using v8M security extension
+ */
+#define FNC_RETURN_MIN_MAGIC 0xfefffffe
+
+/* We use a few fake FSR values for internal purposes in M profile.
+ * M profile cores don't have A/R format FSRs, but currently our
+ * get_phys_addr() code assumes A/R profile and reports failures via
+ * an A/R format FSR value. We then translate that into the proper
+ * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
+ * Mostly the FSR values we use for this are those defined for v7PMSA,
+ * since we share some of that codepath. A few kinds of fault are
+ * only for M profile and have no A/R equivalent, though, so we have
+ * to pick a value from the reserved range (which we never otherwise
+ * generate) to use for these.
+ * These values will never be visible to the guest.
+ */
+#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
+#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
+
+/**
+ * raise_exception: Raise the specified exception.
+ * Raise a guest exception with the specified value, syndrome register
+ * and target exception level. This should be called from helper functions,
+ * and never returns because we will longjump back up to the CPU main loop.
+ */
+void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el);
+
+/*
+ * Similarly, but also use unwinding to restore cpu state.
+ */
+void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el,
+ uintptr_t ra);
+
/*
* For AArch64, map a given EL to an index in the banked_spsr array.
* Note that this mapping and the AArch32 mapping defined in bank_number()
g_assert_not_reached();
}
-void switch_mode(CPUARMState *, int);
+/**
+ * r14_bank_number: Map CPU mode onto register bank for r14
+ *
+ * Given an AArch32 CPU mode, return the index into the saved register
+ * banks to use for the R14 (LR) in that mode. This is the same as
+ * bank_number(), except for the special case of Hyp mode, where
+ * R14 is shared with USR and SYS, unlike its R13 and SPSR.
+ * This should be used as the index into env->banked_r14[], and
+ * bank_number() used for the index into env->banked_r13[] and
+ * env->banked_spsr[].
+ */
+static inline int r14_bank_number(int mode)
+{
+ return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
+}
+
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
[4] = 44,
[5] = 48,
};
- unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4);
+ unsigned int parange =
+ FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
/* id_aa64mmfr0 is a read-only register so values outside of the
* supported mappings can be considered an implementation error. */
EC_CP14DTTRAP = 0x06,
EC_ADVSIMDFPACCESSTRAP = 0x07,
EC_FPIDTRAP = 0x08,
+ EC_PACTRAP = 0x09,
EC_CP14RRTTRAP = 0x0c,
+ EC_BTITRAP = 0x0d,
EC_ILLEGALSTATE = 0x0e,
EC_AA32_SVC = 0x11,
EC_AA32_HVC = 0x12,
EC_AA64_HVC = 0x16,
EC_AA64_SMC = 0x17,
EC_SYSTEMREGISTERTRAP = 0x18,
+ EC_SVEACCESSTRAP = 0x19,
EC_INSNABORT = 0x20,
EC_INSNABORT_SAME_EL = 0x21,
EC_PCALIGNMENT = 0x22,
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
+static inline uint32_t syn_get_ec(uint32_t syn)
+{
+ return syn >> ARM_EL_EC_SHIFT;
+}
+
/* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
* few cases the value in HSR for exceptions taken to AArch32 Hyp
- * mode differs slightly, so if we ever implemented Hyp mode then the
- * syndrome value would need some massaging on exception entry.
- * (One example of this is that AArch64 defaults to IL bit set for
- * exceptions which don't specifically indicate information about the
- * trapping instruction, whereas AArch32 defaults to IL bit clear.)
+ * mode differs slightly, and we fix this up when populating HSR in
+ * arm_cpu_do_interrupt_aarch32_hyp().
+ * The exception is FP/SIMD access traps -- these report extra information
+ * when taking an exception to AArch32. For those we include the extra coproc
+ * and TA fields, and mask them out when taking the exception to AArch64.
*/
static inline uint32_t syn_uncategorized(void)
{
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
{
+ /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | 0xa;
+}
+
+static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
+{
+ /* AArch32 SIMD trap: TA == 1 coproc == 0 */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
- | (cv << 24) | (cond << 20);
+ | (cv << 24) | (cond << 20) | (1 << 5);
+}
+
+static inline uint32_t syn_sve_access_trap(void)
+{
+ return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
+}
+
+static inline uint32_t syn_pactrap(void)
+{
+ return EC_PACTRAP << ARM_EL_EC_SHIFT;
+}
+
+static inline uint32_t syn_btitrap(int btype)
+{
+ return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
}
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
| ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
}
-static inline uint32_t syn_data_abort_no_iss(int same_el,
+static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
int ea, int cm, int s1ptw,
int wnr, int fsc)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL
- | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+ | (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
+ | (wnr << 6) | fsc;
}
static inline uint32_t syn_data_abort_with_iss(int same_el,
| ARM_EL_IL | 0x22;
}
-static inline uint32_t syn_wfx(int cv, int cond, int ti)
+static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
{
return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
+ (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
(cv << 24) | (cond << 20) | ti;
}
/* Callback function for when a watchpoint or breakpoint triggers. */
void arm_debug_excp_handler(CPUState *cs);
-#ifdef CONFIG_USER_ONLY
+#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
return false;
}
+static inline void arm_handle_psci_call(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
#else
/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
env->exclusive_addr = -1;
}
+/**
+ * ARMFaultType: type of an ARM MMU fault
+ * This corresponds to the v8A pseudocode's Fault enumeration,
+ * with extensions for QEMU internal conditions.
+ */
+typedef enum ARMFaultType {
+ ARMFault_None,
+ ARMFault_AccessFlag,
+ ARMFault_Alignment,
+ ARMFault_Background,
+ ARMFault_Domain,
+ ARMFault_Permission,
+ ARMFault_Translation,
+ ARMFault_AddressSize,
+ ARMFault_SyncExternal,
+ ARMFault_SyncExternalOnWalk,
+ ARMFault_SyncParity,
+ ARMFault_SyncParityOnWalk,
+ ARMFault_AsyncParity,
+ ARMFault_AsyncExternal,
+ ARMFault_Debug,
+ ARMFault_TLBConflict,
+ ARMFault_Lockdown,
+ ARMFault_Exclusive,
+ ARMFault_ICacheMaint,
+ ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
+ ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
+} ARMFaultType;
+
/**
* ARMMMUFaultInfo: Information describing an ARM MMU Fault
+ * @type: Type of fault
+ * @level: Table walk level (for translation, access flag and permission faults)
+ * @domain: Domain of the fault address (for non-LPAE CPUs only)
* @s2addr: Address that caused a fault at stage 2
* @stage2: True if we faulted at stage 2
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
*/
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
struct ARMMMUFaultInfo {
+ ARMFaultType type;
target_ulong s2addr;
+ int level;
+ int domain;
bool stage2;
bool s1ptw;
bool ea;
};
-/* Do a page table walk and add page to TLB if possible */
-bool arm_tlb_fill(CPUState *cpu, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- uint32_t *fsr, ARMMMUFaultInfo *fi);
+/**
+ * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
+ * Compare pseudocode EncodeSDFSC(), though unlike that function
+ * we set up a whole FSR-format code including domain field and
+ * putting the high bit of the FSC into bit 10.
+ */
+static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
+{
+ uint32_t fsc;
+
+ switch (fi->type) {
+ case ARMFault_None:
+ return 0;
+ case ARMFault_AccessFlag:
+ fsc = fi->level == 1 ? 0x3 : 0x6;
+ break;
+ case ARMFault_Alignment:
+ fsc = 0x1;
+ break;
+ case ARMFault_Permission:
+ fsc = fi->level == 1 ? 0xd : 0xf;
+ break;
+ case ARMFault_Domain:
+ fsc = fi->level == 1 ? 0x9 : 0xb;
+ break;
+ case ARMFault_Translation:
+ fsc = fi->level == 1 ? 0x5 : 0x7;
+ break;
+ case ARMFault_SyncExternal:
+ fsc = 0x8 | (fi->ea << 12);
+ break;
+ case ARMFault_SyncExternalOnWalk:
+ fsc = fi->level == 1 ? 0xc : 0xe;
+ fsc |= (fi->ea << 12);
+ break;
+ case ARMFault_SyncParity:
+ fsc = 0x409;
+ break;
+ case ARMFault_SyncParityOnWalk:
+ fsc = fi->level == 1 ? 0x40c : 0x40e;
+ break;
+ case ARMFault_AsyncParity:
+ fsc = 0x408;
+ break;
+ case ARMFault_AsyncExternal:
+ fsc = 0x406 | (fi->ea << 12);
+ break;
+ case ARMFault_Debug:
+ fsc = 0x2;
+ break;
+ case ARMFault_TLBConflict:
+ fsc = 0x400;
+ break;
+ case ARMFault_Lockdown:
+ fsc = 0x404;
+ break;
+ case ARMFault_Exclusive:
+ fsc = 0x405;
+ break;
+ case ARMFault_ICacheMaint:
+ fsc = 0x4;
+ break;
+ case ARMFault_Background:
+ fsc = 0x0;
+ break;
+ case ARMFault_QEMU_NSCExec:
+ fsc = M_FAKE_FSR_NSC_EXEC;
+ break;
+ case ARMFault_QEMU_SFault:
+ fsc = M_FAKE_FSR_SFAULT;
+ break;
+ default:
+ /* Other faults can't occur in a context that requires a
+ * short-format status code.
+ */
+ g_assert_not_reached();
+ }
+
+ fsc |= (fi->domain << 4);
+ return fsc;
+}
+
+/**
+ * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
+ * Compare pseudocode EncodeLDFSC(), though unlike that function
+ * we fill in also the LPAE bit 9 of a DFSR format.
+ */
+static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
+{
+ uint32_t fsc;
+
+ switch (fi->type) {
+ case ARMFault_None:
+ return 0;
+ case ARMFault_AddressSize:
+ fsc = fi->level & 3;
+ break;
+ case ARMFault_AccessFlag:
+ fsc = (fi->level & 3) | (0x2 << 2);
+ break;
+ case ARMFault_Permission:
+ fsc = (fi->level & 3) | (0x3 << 2);
+ break;
+ case ARMFault_Translation:
+ fsc = (fi->level & 3) | (0x1 << 2);
+ break;
+ case ARMFault_SyncExternal:
+ fsc = 0x10 | (fi->ea << 12);
+ break;
+ case ARMFault_SyncExternalOnWalk:
+ fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
+ break;
+ case ARMFault_SyncParity:
+ fsc = 0x18;
+ break;
+ case ARMFault_SyncParityOnWalk:
+ fsc = (fi->level & 3) | (0x7 << 2);
+ break;
+ case ARMFault_AsyncParity:
+ fsc = 0x19;
+ break;
+ case ARMFault_AsyncExternal:
+ fsc = 0x11 | (fi->ea << 12);
+ break;
+ case ARMFault_Alignment:
+ fsc = 0x21;
+ break;
+ case ARMFault_Debug:
+ fsc = 0x22;
+ break;
+ case ARMFault_TLBConflict:
+ fsc = 0x30;
+ break;
+ case ARMFault_Lockdown:
+ fsc = 0x34;
+ break;
+ case ARMFault_Exclusive:
+ fsc = 0x35;
+ break;
+ default:
+ /* Other faults can't occur in a context that requires a
+ * long-format status code.
+ */
+ g_assert_not_reached();
+ }
+
+ fsc |= 1 << 9;
+ return fsc;
+}
+
+static inline bool arm_extabort_type(MemTxResult result)
+{
+ /* The EA bit in syndromes and fault status registers is an
+ * IMPDEF classification of external aborts. ARM implementations
+ * usually use this to indicate AXI bus Decode error (0) or
+ * Slave error (1); in QEMU we follow that.
+ */
+ return result != MEMTX_DECODE_ERROR;
+}
+
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
+}
+
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return mmu_idx | ARM_MMU_IDX_M;
+ } else {
+ return mmu_idx | ARM_MMU_IDX_A;
+ }
+}
+
+static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
+{
+ /* AArch64 is always a-profile. */
+ return mmu_idx | ARM_MMU_IDX_A;
+}
+
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
+
+/*
+ * Return the MMU index for a v7M CPU with all relevant information
+ * manually specified.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
+ bool secstate, bool priv, bool negpri);
+
+/*
+ * Return the MMU index for a v7M CPU in the specified security and
+ * privilege state.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
+ bool secstate, bool priv);
+
+/* Return the MMU index for a v7M CPU in the specified security state */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr);
-/* Call the EL change hook if one has been registered */
+/* Call any registered EL change hooks */
+static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
+{
+ ARMELChangeHook *hook, *next;
+ QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
+ hook->hook(cpu, hook->opaque);
+ }
+}
static inline void arm_call_el_change_hook(ARMCPU *cpu)
{
- if (cpu->el_change_hook) {
- cpu->el_change_hook(cpu, cpu->el_change_hook_opaque);
+ ARMELChangeHook *hook, *next;
+ QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
+ hook->hook(cpu, hook->opaque);
+ }
+}
+
+/* Return true if this address translation regime has two ranges. */
+static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ return true;
+ default:
+ return false;
}
}
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
- case ARMMMUIdx_S12NSE0:
- case ARMMMUIdx_S12NSE1:
- case ARMMMUIdx_S1NSE0:
- case ARMMMUIdx_S1NSE1:
- case ARMMMUIdx_S1E2:
- case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_Stage2:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
case ARMMMUIdx_MUser:
return false;
- case ARMMMUIdx_S1E3:
- case ARMMMUIdx_S1SE0:
- case ARMMMUIdx_S1SE1:
+ case ARMMMUIdx_SE3:
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
case ARMMMUIdx_MSUser:
return true;
default:
}
}
+static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE10_1_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Return the FSR value for a debug exception (watchpoint, hardware
+ * breakpoint or BKPT insn) targeting the specified exception level.
+ */
+static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
+{
+ ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
+ int target_el = arm_debug_target_el(env);
+ bool using_lpae = false;
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
+ using_lpae = true;
+ } else {
+ if (arm_feature(env, ARM_FEATURE_LPAE) &&
+ (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
+ using_lpae = true;
+ }
+ }
+
+ if (using_lpae) {
+ return arm_fi_to_lfsc(&fi);
+ } else {
+ return arm_fi_to_sfsc(&fi);
+ }
+}
+
+/**
+ * arm_num_brps: Return number of implemented breakpoints.
+ * Note that the ID register BRPS field is "number of bps - 1",
+ * and we return the actual number of breakpoints.
+ */
+static inline int arm_num_brps(ARMCPU *cpu)
+{
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
+ } else {
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
+ }
+}
+
+/**
+ * arm_num_wrps: Return number of implemented watchpoints.
+ * Note that the ID register WRPS field is "number of wps - 1",
+ * and we return the actual number of watchpoints.
+ */
+static inline int arm_num_wrps(ARMCPU *cpu)
+{
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
+ } else {
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
+ }
+}
+
+/**
+ * arm_num_ctx_cmps: Return number of implemented context comparators.
+ * Note that the ID register CTX_CMPS field is "number of cmps - 1",
+ * and we return the actual number of comparators.
+ */
+static inline int arm_num_ctx_cmps(ARMCPU *cpu)
+{
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
+ } else {
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
+ }
+}
+
+/**
+ * v7m_using_psp: Return true if using process stack pointer
+ * Return true if the CPU is currently using the process stack
+ * pointer, or false if it is using the main stack pointer.
+ */
+static inline bool v7m_using_psp(CPUARMState *env)
+{
+ /* Handler mode always uses the main stack; for thread mode
+ * the CONTROL.SPSEL bit determines the answer.
+ * Note that in v7M it is not possible to be in Handler mode with
+ * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
+ */
+ return !arm_v7m_is_handler_mode(env) &&
+ env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
+}
+
+/**
+ * v7m_sp_limit: Return SP limit for current CPU state
+ * Return the SP limit value for the current CPU security state
+ * and stack pointer.
+ */
+static inline uint32_t v7m_sp_limit(CPUARMState *env)
+{
+ if (v7m_using_psp(env)) {
+ return env->v7m.psplim[env->v7m.secure];
+ } else {
+ return env->v7m.msplim[env->v7m.secure];
+ }
+}
+
+/**
+ * v7m_cpacr_pass:
+ * Return true if the v7M CPACR permits access to the FPU for the specified
+ * security state and privilege level.
+ */
+static inline bool v7m_cpacr_pass(CPUARMState *env,
+ bool is_secure, bool is_priv)
+{
+ switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
+ case 0:
+ case 2: /* UNPREDICTABLE: we treat like 0 */
+ return false;
+ case 1:
+ return is_priv;
+ case 3:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
+ * aarch32_mode_name(): Return name of the AArch32 CPU mode
+ * @psr: Program Status Register indicating CPU mode
+ *
+ * Returns, for debug logging purposes, a printable representation
+ * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
+ * the low bits of the specified PSR.
+ */
+static inline const char *aarch32_mode_name(uint32_t psr)
+{
+ static const char cpu_mode_names[16][4] = {
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
+ };
+
+ return cpu_mode_names[psr & 0xf];
+}
+
+/**
+ * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
+ *
+ * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
+ * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
+ * Must be called with the iothread lock held.
+ */
+void arm_cpu_update_virq(ARMCPU *cpu);
+
+/**
+ * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
+ *
+ * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
+ * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
+ * Must be called with the iothread lock held.
+ */
+void arm_cpu_update_vfiq(ARMCPU *cpu);
+
+/**
+ * arm_mmu_idx_el:
+ * @env: The cpu environment
+ * @el: The EL to use.
+ *
+ * Return the full ARMMMUIdx for the translation regime for EL.
+ */
+ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
+
+/**
+ * arm_mmu_idx:
+ * @env: The cpu environment
+ *
+ * Return the full ARMMMUIdx for the current translation regime.
+ */
+ARMMMUIdx arm_mmu_idx(CPUARMState *env);
+
+/**
+ * arm_stage1_mmu_idx:
+ * @env: The cpu environment
+ *
+ * Return the ARMMMUIdx for the stage1 traversal for the current regime.
+ */
+#ifdef CONFIG_USER_ONLY
+static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
+{
+ return ARMMMUIdx_Stage1_E0;
+}
+#else
+ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
+#endif
+
+/**
+ * arm_mmu_idx_is_stage1_of_2:
+ * @mmu_idx: The ARMMMUIdx to test
+ *
+ * Return true if @mmu_idx is a NOTLB mmu_idx that is the
+ * first stage of a two stage regime.
+ */
+static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
+ const ARMISARegisters *id)
+{
+ uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
+
+ if ((features >> ARM_FEATURE_V4T) & 1) {
+ valid |= CPSR_T;
+ }
+ if ((features >> ARM_FEATURE_V5) & 1) {
+ valid |= CPSR_Q; /* V5TE in reality*/
+ }
+ if ((features >> ARM_FEATURE_V6) & 1) {
+ valid |= CPSR_E | CPSR_GE;
+ }
+ if ((features >> ARM_FEATURE_THUMB2) & 1) {
+ valid |= CPSR_IT;
+ }
+ if (isar_feature_aa32_jazelle(id)) {
+ valid |= CPSR_J;
+ }
+ if (isar_feature_aa32_pan(id)) {
+ valid |= CPSR_PAN;
+ }
+
+ return valid;
+}
+
+static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
+{
+ uint32_t valid;
+
+ valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
+ if (isar_feature_aa64_bti(id)) {
+ valid |= PSTATE_BTYPE;
+ }
+ if (isar_feature_aa64_pan(id)) {
+ valid |= PSTATE_PAN;
+ }
+ if (isar_feature_aa64_uao(id)) {
+ valid |= PSTATE_UAO;
+ }
+ if (isar_feature_aa64_mte(id)) {
+ valid |= PSTATE_TCO;
+ }
+
+ return valid;
+}
+
+/*
+ * Parameters of a given virtual address, as extracted from the
+ * translation control register (TCR) for a given regime.
+ */
+typedef struct ARMVAParameters {
+ unsigned tsz : 8;
+ unsigned select : 1;
+ bool tbi : 1;
+ bool epd : 1;
+ bool hpd : 1;
+ bool using16k : 1;
+ bool using64k : 1;
+} ARMVAParameters;
+
+ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
+ ARMMMUIdx mmu_idx, bool data);
+
+static inline int exception_target_el(CPUARMState *env)
+{
+ int target_el = MAX(1, arm_current_el(env));
+
+ /*
+ * No such thing as secure EL1 if EL3 is aarch32,
+ * so update the target EL to EL3 in this case.
+ */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
+ target_el = 3;
+ }
+
+ return target_el;
+}
+
+/* Determine if allocation tags are available. */
+static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
+ uint64_t sctlr)
+{
+ if (el < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_ATA)) {
+ return false;
+ }
+ if (el < 2
+ && arm_feature(env, ARM_FEATURE_EL2)
+ && !(arm_hcr_el2_eff(env) & HCR_ATA)) {
+ return false;
+ }
+ sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
+ return sctlr != 0;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
+
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+
+void arm_log_exception(int idx);
+
+#endif /* !CONFIG_USER_ONLY */
+
+/*
+ * The log2 of the words in the tag block, for GMID_EL1.BS.
+ * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
+ */
+#define GMID_EL1_BS 6
+
+/* We associate one allocation tag per 16 bytes, the minimum. */
+#define LOG2_TAG_GRANULE 4
+#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
+
+static inline int allocation_tag_from_addr(uint64_t ptr)
+{
+ return extract64(ptr, 56, 4);
+}
+
+static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
+{
+ return deposit64(ptr, 56, 4, rtag);
+}
+
#endif