#include "exec/exec-all.h"
#include "cpu.h"
#include "internals.h"
+#include "cpu-features.h"
#include "idau.h"
#ifdef CONFIG_TCG
# include "tcg/oversized-guest.h"
#endif
typedef struct S1Translate {
+ /*
+ * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
+ * Together with in_space, specifies the architectural translation regime.
+ */
ARMMMUIdx in_mmu_idx;
+ /*
+ * in_ptw_idx: specifies which mmuidx to use for the actual
+ * page table descriptor load operations. This will be one of the
+ * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
+ * this field is updated accordingly.
+ */
ARMMMUIdx in_ptw_idx;
+ /*
+ * in_space: the security space for this walk. This plus
+ * the in_mmu_idx specify the architectural translation regime.
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
+ * this field is updated accordingly.
+ *
+ * Note that the security space for the in_ptw_idx may be different
+ * from that for the in_mmu_idx. We do not need to explicitly track
+ * the in_ptw_idx security space because:
+ * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
+ * itself specifies the security space
+ * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
+ * space used for ptw reads is the same as that of the security
+ * space of the stage 1 translation for all cases except where
+ * stage 1 is Secure; in that case the only possibilities for
+ * the ptw read are Secure and NonSecure, and the in_ptw_idx
+ * value being Stage2 vs Stage2_S distinguishes those.
+ */
ARMSecuritySpace in_space;
- bool in_secure;
+ /*
+ * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
+ * accesses will not update the guest page table access flags
+ * and will not change the state of the softmmu TLBs.
+ */
bool in_debug;
- bool out_secure;
+ /*
+ * If this is stage 2 of a stage 1+2 page table walk, then this must
+ * be true if stage 1 is an EL0 access; otherwise this is ignored.
+ * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
+ */
+ bool in_s1_is_el0;
bool out_rw;
bool out_be;
ARMSecuritySpace out_space;
void *out_host;
} S1Translate;
-static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
- uint64_t address,
- MMUAccessType access_type, bool s1_is_el0,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi);
+static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
+ target_ulong address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi);
-static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
- GetPhysAddrResult *result,
- ARMMMUFaultInfo *fi);
+static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
+ target_ulong address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi);
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
static const uint8_t pamax_map[] = {
/*
* We're OK to check the current state of the CPU here because
- * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
+ * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
+ * changes.
* (2) there's no way to do a lookup that cares about Stage 2 for a
* different security state to the current one for AArch64, and AArch32
* never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
* an NS stage 1+2 lookup while the NS bit is 0.)
*/
- if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
+ if (!arm_el_is_aa64(env, 3)) {
return ARMMMUIdx_Phys_NS;
}
- if (stage2idx == ARMMMUIdx_Stage2_S) {
- s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
- } else {
- s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
- }
- return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
+ switch (arm_security_space_below_el3(env)) {
+ case ARMSS_NonSecure:
+ return ARMMMUIdx_Phys_NS;
+ case ARMSS_Realm:
+ return ARMMMUIdx_Phys_Realm;
+ case ARMSS_Secure:
+ if (stage2idx == ARMMMUIdx_Stage2_S) {
+ s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
+ } else {
+ s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
+ }
+ return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
+ default:
+ g_assert_not_reached();
+ }
}
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
/* Return true if the specified stage of address translation is disabled */
static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
- bool is_secure)
+ ARMSecuritySpace space)
{
uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_M)) {
+ bool is_secure = arm_space_is_secure(space);
switch (env->v7m.mpu_ctrl[is_secure] &
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
case R_V7M_MPU_CTRL_ENABLE_MASK:
}
}
- hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
switch (mmu_idx) {
case ARMMMUIdx_Stage2:
case ARMMMUIdx_Stage2_S:
/* HCR.DC means HCR.VM behaves as 1 */
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_TGE) {
return true;
}
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_DC) {
return true;
}
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
+static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
+ ARMSecuritySpace pspace,
+ ARMMMUFaultInfo *fi)
+{
+ MemTxAttrs attrs = {
+ .secure = true,
+ .space = ARMSS_Root,
+ };
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t gpccr = env->cp15.gpccr_el3;
+ unsigned pps, pgs, l0gptsz, level = 0;
+ uint64_t tableaddr, pps_mask, align, entry, index;
+ AddressSpace *as;
+ MemTxResult result;
+ int gpi;
+
+ if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
+ return true;
+ }
+
+ /*
+ * GPC Priority 1 (R_GMGRR):
+ * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
+ * the access fails as GPT walk fault at level 0.
+ */
+
+ /*
+ * Configuration of PPS to a value exceeding the implemented
+ * physical address size is invalid.
+ */
+ pps = FIELD_EX64(gpccr, GPCCR, PPS);
+ if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
+ goto fault_walk;
+ }
+ pps = pamax_map[pps];
+ pps_mask = MAKE_64BIT_MASK(0, pps);
+
+ switch (FIELD_EX64(gpccr, GPCCR, SH)) {
+ case 0b10: /* outer shareable */
+ break;
+ case 0b00: /* non-shareable */
+ case 0b11: /* inner shareable */
+ /* Inner and Outer non-cacheable requires Outer shareable. */
+ if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
+ FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
+ goto fault_walk;
+ }
+ break;
+ default: /* reserved */
+ goto fault_walk;
+ }
+
+ switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
+ case 0b00: /* 4KB */
+ pgs = 12;
+ break;
+ case 0b01: /* 64KB */
+ pgs = 16;
+ break;
+ case 0b10: /* 16KB */
+ pgs = 14;
+ break;
+ default: /* reserved */
+ goto fault_walk;
+ }
+
+ /* Note this field is read-only and fixed at reset. */
+ l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
+
+ /*
+ * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
+ * R_CPDSB: A NonSecure physical address input exceeding PPS
+ * does not experience any fault.
+ */
+ if (paddress & ~pps_mask) {
+ if (pspace == ARMSS_NonSecure) {
+ return true;
+ }
+ goto fault_size;
+ }
+
+ /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
+ tableaddr = env->cp15.gptbr_el3 << 12;
+ if (tableaddr & ~pps_mask) {
+ goto fault_size;
+ }
+
+ /*
+ * BADDR is aligned per a function of PPS and L0GPTSZ.
+ * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
+ * unlike the RES0 bits of the GPT entries (R_XNKFZ).
+ */
+ align = MAX(pps - l0gptsz + 3, 12);
+ align = MAKE_64BIT_MASK(0, align);
+ tableaddr &= ~align;
+
+ as = arm_addressspace(env_cpu(env), attrs);
+
+ /* Level 0 lookup. */
+ index = extract64(paddress, l0gptsz, pps - l0gptsz);
+ tableaddr += index * 8;
+ entry = address_space_ldq_le(as, tableaddr, attrs, &result);
+ if (result != MEMTX_OK) {
+ goto fault_eabt;
+ }
+
+ switch (extract32(entry, 0, 4)) {
+ case 1: /* block descriptor */
+ if (entry >> 8) {
+ goto fault_walk; /* RES0 bits not 0 */
+ }
+ gpi = extract32(entry, 4, 4);
+ goto found;
+ case 3: /* table descriptor */
+ tableaddr = entry & ~0xf;
+ align = MAX(l0gptsz - pgs - 1, 12);
+ align = MAKE_64BIT_MASK(0, align);
+ if (tableaddr & (~pps_mask | align)) {
+ goto fault_walk; /* RES0 bits not 0 */
+ }
+ break;
+ default: /* invalid */
+ goto fault_walk;
+ }
+
+ /* Level 1 lookup */
+ level = 1;
+ index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
+ tableaddr += index * 8;
+ entry = address_space_ldq_le(as, tableaddr, attrs, &result);
+ if (result != MEMTX_OK) {
+ goto fault_eabt;
+ }
+
+ switch (extract32(entry, 0, 4)) {
+ case 1: /* contiguous descriptor */
+ if (entry >> 10) {
+ goto fault_walk; /* RES0 bits not 0 */
+ }
+ /*
+ * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
+ * and because we cannot invalidate by pa, and thus will always
+ * flush entire tlbs, we don't actually care about the range here
+ * and can simply extract the GPI as the result.
+ */
+ if (extract32(entry, 8, 2) == 0) {
+ goto fault_walk; /* reserved contig */
+ }
+ gpi = extract32(entry, 4, 4);
+ break;
+ default:
+ index = extract64(paddress, pgs, 4);
+ gpi = extract64(entry, index * 4, 4);
+ break;
+ }
+
+ found:
+ switch (gpi) {
+ case 0b0000: /* no access */
+ break;
+ case 0b1111: /* all access */
+ return true;
+ case 0b1000:
+ case 0b1001:
+ case 0b1010:
+ case 0b1011:
+ if (pspace == (gpi & 3)) {
+ return true;
+ }
+ break;
+ default:
+ goto fault_walk; /* reserved */
+ }
+
+ fi->gpcf = GPCF_Fail;
+ goto fault_common;
+ fault_eabt:
+ fi->gpcf = GPCF_EABT;
+ goto fault_common;
+ fault_size:
+ fi->gpcf = GPCF_AddressSize;
+ goto fault_common;
+ fault_walk:
+ fi->gpcf = GPCF_Walk;
+ fault_common:
+ fi->level = level;
+ fi->paddr = paddress;
+ fi->paddr_space = pspace;
+ return false;
+}
+
static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
{
/*
}
}
+static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
+ ARMMMUIdx s2_mmu_idx)
+{
+ /*
+ * Return the security space to use for stage 2 when doing
+ * the S1 page table descriptor load.
+ */
+ if (regime_is_stage2(s2_mmu_idx)) {
+ /*
+ * The security space for ptw reads is almost always the same
+ * as that of the security space of the stage 1 translation.
+ * The only exception is when stage 1 is Secure; in that case
+ * the ptw read might be to the Secure or the NonSecure space
+ * (but never Realm or Root), and the s2_mmu_idx tells us which.
+ * Root translations are always single-stage.
+ */
+ if (s1_space == ARMSS_Secure) {
+ return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
+ } else {
+ assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
+ assert(s1_space != ARMSS_Root);
+ return s1_space;
+ }
+ } else {
+ /* ptw loads are from phys: the mmu idx itself says which space */
+ return arm_phys_to_space(s2_mmu_idx);
+ }
+}
+
+static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx)
+{
+ /*
+ * For stage 2 faults in Secure EL22, S1NS indicates
+ * whether the faulting IPA is in the Secure or NonSecure
+ * IPA space. For all other kinds of fault, it is false.
+ */
+ return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx)
+ && s2_mmu_idx == ARMMMUIdx_Stage2_S;
+}
+
/* Translate a S1 pagetable walk through S2 if needed. */
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
hwaddr addr, ARMMMUFaultInfo *fi)
{
- ARMSecuritySpace space = ptw->in_space;
- bool is_secure = ptw->in_secure;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
uint8_t pte_attrs;
* From gdbstub, do not use softmmu so that we don't modify the
* state of the cpu at all, including softmmu tlb contents.
*/
- if (regime_is_stage2(s2_mmu_idx)) {
- S1Translate s2ptw = {
- .in_mmu_idx = s2_mmu_idx,
- .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
- .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
- .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
- : space == ARMSS_Realm ? ARMSS_Realm
- : ARMSS_NonSecure),
- .in_debug = true,
- };
- GetPhysAddrResult s2 = { };
-
- if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
- false, &s2, fi)) {
- goto fail;
- }
- ptw->out_phys = s2.f.phys_addr;
- pte_attrs = s2.cacheattrs.attrs;
- ptw->out_secure = s2.f.attrs.secure;
- ptw->out_space = s2.f.attrs.space;
- } else {
- /* Regime is physical. */
- ptw->out_phys = addr;
- pte_attrs = 0;
- ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S;
- ptw->out_space = (s2_mmu_idx == ARMMMUIdx_Phys_S ? ARMSS_Secure
- : space == ARMSS_Realm ? ARMSS_Realm
- : ARMSS_NonSecure);
+ ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
+ S1Translate s2ptw = {
+ .in_mmu_idx = s2_mmu_idx,
+ .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
+ .in_space = s2_space,
+ .in_debug = true,
+ };
+ GetPhysAddrResult s2 = { };
+
+ if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
+ goto fail;
}
+
+ ptw->out_phys = s2.f.phys_addr;
+ pte_attrs = s2.cacheattrs.attrs;
ptw->out_host = NULL;
ptw->out_rw = false;
+ ptw->out_space = s2.f.attrs.space;
} else {
#ifdef CONFIG_TCG
CPUTLBEntryFull *full;
int flags;
env->tlb_fi = fi;
- flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
- arm_to_core_mmu_idx(s2_mmu_idx),
- true, &ptw->out_host, &full, 0);
+ flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD,
+ arm_to_core_mmu_idx(s2_mmu_idx),
+ &ptw->out_host, &full);
env->tlb_fi = NULL;
if (unlikely(flags & TLB_INVALID_MASK)) {
}
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE;
- pte_attrs = full->pte_attrs;
- ptw->out_secure = full->attrs.secure;
+ pte_attrs = full->extra.arm.pte_attrs;
ptw->out_space = full->attrs.space;
#else
g_assert_not_reached();
}
if (regime_is_stage2(s2_mmu_idx)) {
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
/*
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
- fi->s1ns = !is_secure;
+ fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
return false;
}
}
fail:
assert(fi->type != ARMFault_None);
+ if (fi->type == ARMFault_GPCFOnOutput) {
+ fi->type = ARMFault_GPCFOnWalk;
+ }
fi->s2addr = addr;
- fi->stage2 = true;
- fi->s1ptw = true;
- fi->s1ns = !is_secure;
+ fi->stage2 = regime_is_stage2(s2_mmu_idx);
+ fi->s1ptw = fi->stage2;
+ fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
return false;
}
} else {
/* Page tables are in MMIO. */
MemTxAttrs attrs = {
- .secure = ptw->out_secure,
.space = ptw->out_space,
+ .secure = arm_space_is_secure(ptw->out_space),
};
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
} else {
/* Page tables are in MMIO. */
MemTxAttrs attrs = {
- .secure = ptw->out_secure,
.space = ptw->out_space,
+ .secure = arm_space_is_secure(ptw->out_space),
};
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
-#ifdef TARGET_AARCH64
+#if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
uint64_t cur_val;
void *host = ptw->out_host;
if (unlikely(!host)) {
fi->type = ARMFault_UnsuppAtomicUpdate;
- fi->s1ptw = true;
return 0;
}
*/
if (unlikely(!ptw->out_rw)) {
int flags;
- void *discard;
env->tlb_fi = fi;
- flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
- arm_to_core_mmu_idx(ptw->in_ptw_idx),
- true, &discard, 0);
+ flags = probe_access_full_mmu(env, ptw->out_virt, 0,
+ MMU_DATA_STORE,
+ arm_to_core_mmu_idx(ptw->in_ptw_idx),
+ NULL, NULL);
env->tlb_fi = NULL;
if (unlikely(flags & TLB_INVALID_MASK)) {
+ /*
+ * We know this must be a stage 2 fault because the granule
+ * protection table does not separately track read and write
+ * permission, so all GPC faults are caught in S1_ptw_translate():
+ * we only get here for "readable but not writeable".
+ */
assert(fi->type != ARMFault_None);
fi->s2addr = ptw->out_virt;
fi->stage2 = true;
fi->s1ptw = true;
- fi->s1ns = !ptw->in_secure;
+ fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
return 0;
}
return cur_val;
#else
- /* AArch32 does not have FEAT_HADFS. */
+ /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
g_assert_not_reached();
#endif
}
* @xn: XN (execute-never) bits
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
*/
-static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+static int get_S2prot_noexecute(int s2ap)
{
int prot = 0;
if (s2ap & 2) {
prot |= PAGE_WRITE;
}
+ return prot;
+}
+
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+{
+ int prot = get_S2prot_noexecute(s2ap);
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
switch (xn) {
}
}
- if (out_pa == ARMSS_NonSecure && in_pa == ARMSS_Secure &&
- (env->cp15.scr_el3 & SCR_SIF)) {
- return prot_rw;
+ if (in_pa != out_pa) {
+ switch (in_pa) {
+ case ARMSS_Root:
+ /*
+ * R_ZWRVD: permission fault for insn fetched from non-Root,
+ * I_WWBFB: SIF has no effect in EL3.
+ */
+ return prot_rw;
+ case ARMSS_Realm:
+ /*
+ * R_PKTDS: permission fault for insn fetched from non-Realm,
+ * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
+ * happens during any stage2 translation.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return prot_rw;
+ default:
+ break;
+ }
+ break;
+ case ARMSS_Secure:
+ if (env->cp15.scr_el3 & SCR_SIF) {
+ return prot_rw;
+ }
+ break;
+ default:
+ /* Input NonSecure must have output NonSecure. */
+ g_assert_not_reached();
+ }
}
/* TODO have_wxn should be replaced with
return INT_MIN;
}
+static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
+ ARMGranuleSize gran, int level)
+{
+ /*
+ * See pseudocode AArch46.BlockDescSupported(): block descriptors
+ * are not valid at all levels, depending on the page size.
+ */
+ switch (gran) {
+ case Gran4K:
+ return (level == 0 && ds) || level == 1 || level == 2;
+ case Gran16K:
+ return (level == 1 && ds) || level == 2;
+ case Gran64K:
+ return (level == 1 && arm_pamax(cpu) == 52) || level == 2;
+ default:
+ g_assert_not_reached();
+ }
+}
+
/**
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
*
* @ptw: Current and next stage parameters for the walk.
* @address: virtual address to get physical address for
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
- * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
- * (so this is a stage 2 page table walk),
- * must be true if this is stage 2 of a stage 1+2
- * walk for an EL0 access. If @mmu_idx is anything else,
- * @s1_is_el0 is ignored.
* @result: set on translation success,
* @fi: set to fault info if the translation fails
*/
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t address,
- MMUAccessType access_type, bool s1_is_el0,
+ MMUAccessType access_type,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
ptw->in_ptw_idx += 1;
- ptw->in_secure = false;
ptw->in_space = ARMSS_NonSecure;
}
new_descriptor = descriptor;
restart_atomic_update:
- if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
- /* Invalid, or the Reserved level 3 encoding */
+ if (!(descriptor & 1) ||
+ (!(descriptor & 2) &&
+ !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) {
+ /* Invalid, or a block descriptor at an invalid level */
goto do_translation_fault;
}
* Extract attributes from the (modified) descriptor, and apply
* table descriptors. Stage 2 table descriptors do not include
* any attribute fields. HPD disables all the table attributes
- * except NSTable.
+ * except NSTable (which we have already handled).
*/
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
if (!regime_is_stage2(mmu_idx)) {
- attrs |= !ptw->in_secure << 5; /* NS */
if (!param.hpd) {
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
/*
/*
* R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
* The bit remains ignored for other security states.
+ * R_YMCSL: Executing an insn fetched from non-Realm causes
+ * a stage2 permission fault.
*/
if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
out_space = ARMSS_NonSecure;
+ result->f.prot = get_S2prot_noexecute(ap);
+ } else {
+ xn = extract64(attrs, 53, 2);
+ result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
}
- xn = extract64(attrs, 53, 2);
- result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
} else {
int nse, ns = extract32(attrs, 5, 1);
switch (out_space) {
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.guarded = extract64(attrs, 50, 1); /* GP */
+ result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
}
}
do_translation_fault:
fi->type = ARMFault_Translation;
do_fault:
- fi->level = level;
- /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
- fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
- fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
+ if (fi->s1ptw) {
+ /* Retain the existing stage 2 fi->level */
+ assert(fi->stage2);
+ } else {
+ fi->level = level;
+ fi->stage2 = regime_is_stage2(mmu_idx);
+ }
+ fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
return true;
}
-static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool is_secure, GetPhysAddrResult *result,
+static bool get_phys_addr_pmsav5(CPUARMState *env,
+ S1Translate *ptw,
+ uint32_t address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
int n;
uint32_t mask;
uint32_t base;
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
bool is_user = regime_is_user(env, mmu_idx);
- if (regime_translation_disabled(env, mmu_idx, is_secure)) {
+ if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
/* MPU disabled. */
result->f.phys_addr = address;
result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
}
-static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool secure, GetPhysAddrResult *result,
+static bool get_phys_addr_pmsav7(CPUARMState *env,
+ S1Translate *ptw,
+ uint32_t address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
int n;
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
bool is_user = regime_is_user(env, mmu_idx);
+ bool secure = arm_space_is_secure(ptw->in_space);
result->f.phys_addr = address;
result->f.lg_page_size = TARGET_PAGE_BITS;
result->f.prot = 0;
- if (regime_translation_disabled(env, mmu_idx, secure) ||
+ if (regime_translation_disabled(env, mmu_idx, ptw->in_space) ||
m_is_ppb_region(env, address)) {
/*
* MPU disabled or M profile PPB access: use default memory map.
* are done in arm_v7m_load_vector(), which always does a direct
* read using address_space_ldl(), rather than going via this function.
*/
- if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
+ if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) {
+ /* MPU disabled */
hit = true;
} else if (m_is_ppb_region(env, address)) {
hit = true;
}
}
-static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool secure, GetPhysAddrResult *result,
+static bool get_phys_addr_pmsav8(CPUARMState *env,
+ S1Translate *ptw,
+ uint32_t address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
V8M_SAttributes sattrs = {};
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
+ bool secure = arm_space_is_secure(ptw->in_space);
bool ret;
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
assert(!s1.is_s2_format);
ret.is_s2_format = false;
- ret.guarded = s1.guarded;
if (s1.attrs == 0xf0) {
tagged = true;
* MMU disabled. S1 addresses within aa64 translation regimes are
* still checked for bounds -- see AArch64.S1DisabledOutput().
*/
-static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
+static bool get_phys_addr_disabled(CPUARMState *env,
+ S1Translate *ptw,
+ target_ulong address,
MMUAccessType access_type,
- ARMMMUIdx mmu_idx, bool is_secure,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
uint8_t memattr = 0x00; /* Device nGnRnE */
- uint8_t shareability = 0; /* non-sharable */
+ uint8_t shareability = 0; /* non-shareable */
int r_el;
switch (mmu_idx) {
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
if (r_el == 1) {
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
if (hcr & HCR_DC) {
if (hcr & HCR_DCT) {
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
}
}
}
- if (memattr == 0 && access_type == MMU_INST_FETCH) {
- if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
- memattr = 0xee; /* Normal, WT, RA, NT */
- } else {
- memattr = 0x44; /* Normal, NC, No */
+ if (memattr == 0) {
+ if (access_type == MMU_INST_FETCH) {
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
+ memattr = 0xee; /* Normal, WT, RA, NT */
+ } else {
+ memattr = 0x44; /* Normal, NC, No */
+ }
}
- shareability = 2; /* outer sharable */
+ shareability = 2; /* outer shareable */
}
result->cacheattrs.is_s2_format = false;
break;
{
hwaddr ipa;
int s1_prot, s1_lgpgsz;
- bool is_secure = ptw->in_secure;
- bool ret, ipa_secure;
+ ARMSecuritySpace in_space = ptw->in_space;
+ bool ret, ipa_secure, s1_guarded;
ARMCacheAttrs cacheattrs1;
ARMSecuritySpace ipa_space;
- bool is_el0;
uint64_t hcr;
- ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
/* If S1 fails, return early. */
if (ret) {
ipa_secure = result->f.attrs.secure;
ipa_space = result->f.attrs.space;
- is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
+ ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
- ptw->in_secure = ipa_secure;
ptw->in_space = ipa_space;
ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
*/
s1_prot = result->f.prot;
s1_lgpgsz = result->f.lg_page_size;
+ s1_guarded = result->f.extra.arm.guarded;
cacheattrs1 = result->cacheattrs;
memset(result, 0, sizeof(*result));
- if (arm_feature(env, ARM_FEATURE_PMSA)) {
- ret = get_phys_addr_pmsav8(env, ipa, access_type,
- ptw->in_mmu_idx, is_secure, result, fi);
- } else {
- ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
- is_el0, result, fi);
- }
+ ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
}
/* Combine the S1 and S2 cache attributes. */
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
+ hcr = arm_hcr_el2_eff_secstate(env, in_space);
if (hcr & HCR_DC) {
/*
* HCR.DC forces the first stage attributes to
result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
result->cacheattrs);
+ /* No BTI GP information in stage 2, we just use the S1 value */
+ result->f.extra.arm.guarded = s1_guarded;
+
/*
* Check if IPA translates to secure or non-secure PA space.
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
*/
- result->f.attrs.secure =
- (is_secure
- && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
- && (ipa_secure
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
+ if (in_space == ARMSS_Secure) {
+ result->f.attrs.secure =
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
+ && (ipa_secure
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
+ result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
+ }
return false;
}
-static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
+static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
target_ulong address,
MMUAccessType access_type,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
- bool is_secure = ptw->in_secure;
ARMMMUIdx s1_mmu_idx;
/*
* cannot upgrade a NonSecure translation regime's attributes
* to Secure or Realm.
*/
- result->f.attrs.secure = is_secure;
result->f.attrs.space = ptw->in_space;
+ result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
switch (mmu_idx) {
case ARMMMUIdx_Phys_S:
case ARMMMUIdx_Phys_Root:
case ARMMMUIdx_Phys_Realm:
/* Checking Phys early avoids special casing later vs regime_el. */
- return get_phys_addr_disabled(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+ return get_phys_addr_disabled(env, ptw, address, access_type,
+ result, fi);
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
- /* First stage lookup uses second stage for ptw. */
- ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
+ /*
+ * First stage lookup uses second stage for ptw; only
+ * Secure has both S and NS IPA and starts with Stage2_S.
+ */
+ ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ?
+ ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
break;
case ARMMMUIdx_Stage2:
*/
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
if (arm_feature(env, ARM_FEATURE_EL2) &&
- !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
+ !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
return get_phys_addr_twostage(env, ptw, address, access_type,
result, fi);
}
if (arm_feature(env, ARM_FEATURE_V8)) {
/* PMSAv8 */
- ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+ ret = get_phys_addr_pmsav8(env, ptw, address, access_type,
+ result, fi);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
/* PMSAv7 */
- ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+ ret = get_phys_addr_pmsav7(env, ptw, address, access_type,
+ result, fi);
} else {
/* Pre-v7 MPU */
- ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+ ret = get_phys_addr_pmsav5(env, ptw, address, access_type,
+ result, fi);
}
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
" mmu_idx %u -> %s (prot %c%c%c)\n",
/* Definitely a real MMU, not an MPU */
- if (regime_translation_disabled(env, mmu_idx, is_secure)) {
- return get_phys_addr_disabled(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+ if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
+ return get_phys_addr_disabled(env, ptw, address, access_type,
+ result, fi);
}
if (regime_using_lpae_format(env, mmu_idx)) {
- return get_phys_addr_lpae(env, ptw, address, access_type, false,
- result, fi);
+ return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
} else if (arm_feature(env, ARM_FEATURE_V7) ||
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
}
}
-bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool is_secure, GetPhysAddrResult *result,
- ARMMMUFaultInfo *fi)
+static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
+ target_ulong address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
+{
+ if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
+ return true;
+ }
+ if (!granule_protection_check(env, result->f.phys_addr,
+ result->f.attrs.space, fi)) {
+ fi->type = ARMFault_GPCFOnOutput;
+ return true;
+ }
+ return false;
+}
+
+bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type,
+ ARMMMUIdx mmu_idx, ARMSecuritySpace space,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
{
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
- .in_secure = is_secure,
- .in_space = arm_secure_to_space(is_secure),
+ .in_space = space,
};
- return get_phys_addr_with_struct(env, &ptw, address, access_type,
- result, fi);
+ return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
}
bool get_phys_addr(CPUARMState *env, target_ulong address,
}
ptw.in_space = ss;
- ptw.in_secure = arm_space_is_secure(ss);
- return get_phys_addr_with_struct(env, &ptw, address, access_type,
- result, fi);
+ return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
}
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_space = ss,
- .in_secure = arm_space_is_secure(ss),
.in_debug = true,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
bool ret;
- ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
+ ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
*attrs = res.f.attrs;
if (ret) {