void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
+void arm_restore_state_to_opc(CPUState *cs,
+ const TranslationBlock *tb,
+ const uint64_t *data);
+
#ifdef CONFIG_TCG
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
#endif /* CONFIG_TCG */
static inline bool extended_addresses_enabled(CPUARMState *env)
{
uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ return true;
+ }
return arm_el_is_aa64(env, 1) ||
(arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
}
ARMFault_AsyncExternal,
ARMFault_Debug,
ARMFault_TLBConflict,
+ ARMFault_UnsuppAtomicUpdate,
ARMFault_Lockdown,
ARMFault_Exclusive,
ARMFault_ICacheMaint,
case ARMFault_TLBConflict:
fsc = 0x30;
break;
+ case ARMFault_UnsuppAtomicUpdate:
+ fsc = 0x31;
+ break;
case ARMFault_Lockdown:
fsc = 0x34;
break;
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
-/*
- * Return the MMU index for a v7M CPU with all relevant information
- * manually specified.
- */
-ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
- bool secstate, bool priv, bool negpri);
-
-/*
- * Return the MMU index for a v7M CPU in the specified security and
- * privilege state.
- */
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
- bool secstate, bool priv);
-
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
}
}
+static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
+}
+
/* Return the exception level which controls this address translation regime */
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
{
}
}
+static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MSUserNegPri:
+ return true;
+ default:
+ return false;
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ g_assert_not_reached();
+ }
+}
+
/* Return the SCTLR value which controls this address translation regime */
static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
bool hpd : 1;
bool tsz_oob : 1; /* tsz has been clamped to legal range */
bool ds : 1;
+ bool ha : 1;
+ bool hd : 1;
ARMGranuleSize gran : 2;
} ARMVAParameters;
unsigned int attrs:8;
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
bool is_s2_format:1;
+ bool guarded:1; /* guarded bit of the v8-64 PTE */
} ARMCacheAttrs;
/* Fields that are valid upon success. */
*/
#define GMID_EL1_BS 6
-/* We associate one allocation tag per 16 bytes, the minimum. */
-#define LOG2_TAG_GRANULE 4
-#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
-
/*
* SVE predicates are 1/8 the size of SVE vectors, and cannot use
* the same simd_desc() encoding due to restrictions on size.
((1 << (1 - 1)) | (1 << (2 - 1)) | \
(1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
+/*
+ * Return true if it is possible to take a fine-grained-trap to EL2.
+ */
+static inline bool arm_fgt_active(CPUARMState *env, int el)
+{
+ /*
+ * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
+ * that can affect EL0, but it is harmless to do the test also for
+ * traps on registers that are only accessible at EL1 because if the test
+ * returns true then we can't be executing at EL1 anyway.
+ * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
+ * traps from AArch32 only happen for the EL0 is AArch32 case.
+ */
+ return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
+ el < 2 && arm_is_el2_enabled(env) &&
+ arm_el_is_aa64(env, 1) &&
+ (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
+ (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
+}
+
#endif