#include "cpu.h"
#include "internals.h"
#include "idau.h"
-
+#ifdef CONFIG_TCG
+# include "tcg/oversized-guest.h"
+#endif
typedef struct S1Translate {
ARMMMUIdx in_mmu_idx;
ARMMMUIdx in_ptw_idx;
+ ARMSecuritySpace in_space;
bool in_secure;
bool in_debug;
+ /*
+ * If this is stage 2 of a stage 1+2 page table walk, then this must
+ * be true if stage 1 is an EL0 access; otherwise this is ignored.
+ * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
+ */
+ bool in_s1_is_el0;
bool out_secure;
bool out_rw;
bool out_be;
+ ARMSecuritySpace out_space;
hwaddr out_virt;
hwaddr out_phys;
void *out_host;
} S1Translate;
-static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
- uint64_t address,
- MMUAccessType access_type, bool s1_is_el0,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
- __attribute__((nonnull));
+static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
+ target_ulong address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi);
-static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
- target_ulong address,
- MMUAccessType access_type,
- GetPhysAddrResult *result,
- ARMMMUFaultInfo *fi)
- __attribute__((nonnull));
+static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
+ target_ulong address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi);
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
static const uint8_t pamax_map[] = {
return stage_1_mmu_idx(arm_mmu_idx(env));
}
+/*
+ * Return where we should do ptw loads from for a stage 2 walk.
+ * This depends on whether the address we are looking up is a
+ * Secure IPA or a NonSecure IPA, which we know from whether this is
+ * Stage2 or Stage2_S.
+ * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
+ */
+static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
+{
+ bool s2walk_secure;
+
+ /*
+ * We're OK to check the current state of the CPU here because
+ * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
+ * (2) there's no way to do a lookup that cares about Stage 2 for a
+ * different security state to the current one for AArch64, and AArch32
+ * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
+ * an NS stage 1+2 lookup while the NS bit is 0.)
+ */
+ if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
+ return ARMMMUIdx_Phys_NS;
+ }
+ if (stage2idx == ARMMMUIdx_Stage2_S) {
+ s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
+ } else {
+ s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
+ }
+ return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
+
+}
+
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
{
return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
case ARMMMUIdx_E3:
break;
- case ARMMMUIdx_Phys_NS:
case ARMMMUIdx_Phys_S:
+ case ARMMMUIdx_Phys_NS:
+ case ARMMMUIdx_Phys_Root:
+ case ARMMMUIdx_Phys_Realm:
/* No translation for physical address spaces. */
return true;
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
+static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
+ ARMSecuritySpace pspace,
+ ARMMMUFaultInfo *fi)
+{
+ MemTxAttrs attrs = {
+ .secure = true,
+ .space = ARMSS_Root,
+ };
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t gpccr = env->cp15.gpccr_el3;
+ unsigned pps, pgs, l0gptsz, level = 0;
+ uint64_t tableaddr, pps_mask, align, entry, index;
+ AddressSpace *as;
+ MemTxResult result;
+ int gpi;
+
+ if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
+ return true;
+ }
+
+ /*
+ * GPC Priority 1 (R_GMGRR):
+ * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
+ * the access fails as GPT walk fault at level 0.
+ */
+
+ /*
+ * Configuration of PPS to a value exceeding the implemented
+ * physical address size is invalid.
+ */
+ pps = FIELD_EX64(gpccr, GPCCR, PPS);
+ if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
+ goto fault_walk;
+ }
+ pps = pamax_map[pps];
+ pps_mask = MAKE_64BIT_MASK(0, pps);
+
+ switch (FIELD_EX64(gpccr, GPCCR, SH)) {
+ case 0b10: /* outer shareable */
+ break;
+ case 0b00: /* non-shareable */
+ case 0b11: /* inner shareable */
+ /* Inner and Outer non-cacheable requires Outer shareable. */
+ if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
+ FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
+ goto fault_walk;
+ }
+ break;
+ default: /* reserved */
+ goto fault_walk;
+ }
+
+ switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
+ case 0b00: /* 4KB */
+ pgs = 12;
+ break;
+ case 0b01: /* 64KB */
+ pgs = 16;
+ break;
+ case 0b10: /* 16KB */
+ pgs = 14;
+ break;
+ default: /* reserved */
+ goto fault_walk;
+ }
+
+ /* Note this field is read-only and fixed at reset. */
+ l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
+
+ /*
+ * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
+ * R_CPDSB: A NonSecure physical address input exceeding PPS
+ * does not experience any fault.
+ */
+ if (paddress & ~pps_mask) {
+ if (pspace == ARMSS_NonSecure) {
+ return true;
+ }
+ goto fault_size;
+ }
+
+ /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
+ tableaddr = env->cp15.gptbr_el3 << 12;
+ if (tableaddr & ~pps_mask) {
+ goto fault_size;
+ }
+
+ /*
+ * BADDR is aligned per a function of PPS and L0GPTSZ.
+ * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
+ * unlike the RES0 bits of the GPT entries (R_XNKFZ).
+ */
+ align = MAX(pps - l0gptsz + 3, 12);
+ align = MAKE_64BIT_MASK(0, align);
+ tableaddr &= ~align;
+
+ as = arm_addressspace(env_cpu(env), attrs);
+
+ /* Level 0 lookup. */
+ index = extract64(paddress, l0gptsz, pps - l0gptsz);
+ tableaddr += index * 8;
+ entry = address_space_ldq_le(as, tableaddr, attrs, &result);
+ if (result != MEMTX_OK) {
+ goto fault_eabt;
+ }
+
+ switch (extract32(entry, 0, 4)) {
+ case 1: /* block descriptor */
+ if (entry >> 8) {
+ goto fault_walk; /* RES0 bits not 0 */
+ }
+ gpi = extract32(entry, 4, 4);
+ goto found;
+ case 3: /* table descriptor */
+ tableaddr = entry & ~0xf;
+ align = MAX(l0gptsz - pgs - 1, 12);
+ align = MAKE_64BIT_MASK(0, align);
+ if (tableaddr & (~pps_mask | align)) {
+ goto fault_walk; /* RES0 bits not 0 */
+ }
+ break;
+ default: /* invalid */
+ goto fault_walk;
+ }
+
+ /* Level 1 lookup */
+ level = 1;
+ index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
+ tableaddr += index * 8;
+ entry = address_space_ldq_le(as, tableaddr, attrs, &result);
+ if (result != MEMTX_OK) {
+ goto fault_eabt;
+ }
+
+ switch (extract32(entry, 0, 4)) {
+ case 1: /* contiguous descriptor */
+ if (entry >> 10) {
+ goto fault_walk; /* RES0 bits not 0 */
+ }
+ /*
+ * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
+ * and because we cannot invalidate by pa, and thus will always
+ * flush entire tlbs, we don't actually care about the range here
+ * and can simply extract the GPI as the result.
+ */
+ if (extract32(entry, 8, 2) == 0) {
+ goto fault_walk; /* reserved contig */
+ }
+ gpi = extract32(entry, 4, 4);
+ break;
+ default:
+ index = extract64(paddress, pgs, 4);
+ gpi = extract64(entry, index * 4, 4);
+ break;
+ }
+
+ found:
+ switch (gpi) {
+ case 0b0000: /* no access */
+ break;
+ case 0b1111: /* all access */
+ return true;
+ case 0b1000:
+ case 0b1001:
+ case 0b1010:
+ case 0b1011:
+ if (pspace == (gpi & 3)) {
+ return true;
+ }
+ break;
+ default:
+ goto fault_walk; /* reserved */
+ }
+
+ fi->gpcf = GPCF_Fail;
+ goto fault_common;
+ fault_eabt:
+ fi->gpcf = GPCF_EABT;
+ goto fault_common;
+ fault_size:
+ fi->gpcf = GPCF_AddressSize;
+ goto fault_common;
+ fault_walk:
+ fi->gpcf = GPCF_Walk;
+ fault_common:
+ fi->level = level;
+ fi->paddr = paddress;
+ fi->paddr_space = pspace;
+ return false;
+}
+
static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
{
/*
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
hwaddr addr, ARMMMUFaultInfo *fi)
{
+ ARMSecuritySpace space = ptw->in_space;
bool is_secure = ptw->in_secure;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
uint8_t pte_attrs;
- bool pte_secure;
ptw->out_virt = addr;
* From gdbstub, do not use softmmu so that we don't modify the
* state of the cpu at all, including softmmu tlb contents.
*/
- if (regime_is_stage2(s2_mmu_idx)) {
- S1Translate s2ptw = {
- .in_mmu_idx = s2_mmu_idx,
- .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS,
- .in_secure = is_secure,
- .in_debug = true,
- };
- GetPhysAddrResult s2 = { };
-
- if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
- false, &s2, fi)) {
- goto fail;
- }
- ptw->out_phys = s2.f.phys_addr;
- pte_attrs = s2.cacheattrs.attrs;
- pte_secure = s2.f.attrs.secure;
- } else {
- /* Regime is physical. */
- ptw->out_phys = addr;
- pte_attrs = 0;
- pte_secure = is_secure;
+ S1Translate s2ptw = {
+ .in_mmu_idx = s2_mmu_idx,
+ .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
+ .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
+ .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
+ : space == ARMSS_Realm ? ARMSS_Realm
+ : ARMSS_NonSecure),
+ .in_debug = true,
+ };
+ GetPhysAddrResult s2 = { };
+
+ if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
+ goto fail;
}
+
+ ptw->out_phys = s2.f.phys_addr;
+ pte_attrs = s2.cacheattrs.attrs;
ptw->out_host = NULL;
ptw->out_rw = false;
+ ptw->out_secure = s2.f.attrs.secure;
+ ptw->out_space = s2.f.attrs.space;
} else {
+#ifdef CONFIG_TCG
CPUTLBEntryFull *full;
int flags;
env->tlb_fi = fi;
- flags = probe_access_full(env, addr, MMU_DATA_LOAD,
+ flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
arm_to_core_mmu_idx(s2_mmu_idx),
true, &ptw->out_host, &full, 0);
env->tlb_fi = NULL;
if (unlikely(flags & TLB_INVALID_MASK)) {
goto fail;
}
- ptw->out_phys = full->phys_addr;
+ ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE;
pte_attrs = full->pte_attrs;
- pte_secure = full->attrs.secure;
+ ptw->out_secure = full->attrs.secure;
+ ptw->out_space = full->attrs.space;
+#else
+ g_assert_not_reached();
+#endif
}
if (regime_is_stage2(s2_mmu_idx)) {
}
}
- /* Check if page table walk is to secure or non-secure PA space. */
- ptw->out_secure = (is_secure
- && !(pte_secure
- ? env->cp15.vstcr_el2 & VSTCR_SW
- : env->cp15.vtcr_el2 & VTCR_NSW));
ptw->out_be = regime_translation_big_endian(env, mmu_idx);
return true;
fail:
assert(fi->type != ARMFault_None);
+ if (fi->type == ARMFault_GPCFOnOutput) {
+ fi->type = ARMFault_GPCFOnWalk;
+ }
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
}
} else {
/* Page tables are in MMIO. */
- MemTxAttrs attrs = { .secure = ptw->out_secure };
+ MemTxAttrs attrs = {
+ .secure = ptw->out_secure,
+ .space = ptw->out_space,
+ };
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
#endif
} else {
/* Page tables are in MMIO. */
- MemTxAttrs attrs = { .secure = ptw->out_secure };
+ MemTxAttrs attrs = {
+ .secure = ptw->out_secure,
+ .space = ptw->out_space,
+ };
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
+#ifdef TARGET_AARCH64
uint64_t cur_val;
void *host = ptw->out_host;
void *discard;
env->tlb_fi = fi;
- flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE,
+ flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
arm_to_core_mmu_idx(ptw->in_ptw_idx),
true, &discard, 0);
env->tlb_fi = NULL;
* we know that TCG_OVERSIZED_GUEST is set, which means that we are
* running in round-robin mode and could only race with dma i/o.
*/
-#ifndef TCG_OVERSIZED_GUEST
+#if !TCG_OVERSIZED_GUEST
# error "Unexpected configuration"
#endif
bool locked = qemu_mutex_iothread_locked();
#endif
return cur_val;
+#else
+ /* AArch32 does not have FEAT_HADFS. */
+ g_assert_not_reached();
+#endif
}
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
* regime, because the attribute will already be non-secure.
*/
result->f.attrs.secure = false;
+ result->f.attrs.space = ARMSS_NonSecure;
}
result->f.phys_addr = phys_addr;
return false;
* @xn: XN (execute-never) bits
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
*/
-static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+static int get_S2prot_noexecute(int s2ap)
{
int prot = 0;
if (s2ap & 2) {
prot |= PAGE_WRITE;
}
+ return prot;
+}
+
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+{
+ int prot = get_S2prot_noexecute(s2ap);
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
switch (xn) {
* @mmu_idx: MMU index indicating required translation regime
* @is_aa64: TRUE if AArch64
* @ap: The 2-bit simple AP (AP[2:1])
- * @ns: NS (non-secure) bit
* @xn: XN (execute-never) bit
* @pxn: PXN (privileged execute-never) bit
+ * @in_pa: The original input pa space
+ * @out_pa: The output pa space, modified by NSTable, NS, and NSE
*/
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
- int ap, int ns, int xn, int pxn)
+ int ap, int xn, int pxn,
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
{
+ ARMCPU *cpu = env_archcpu(env);
bool is_user = regime_is_user(env, mmu_idx);
int prot_rw, user_rw;
bool have_wxn;
if (is_user) {
prot_rw = user_rw;
} else {
+ /*
+ * PAN controls can forbid data accesses but don't affect insn fetch.
+ * Plain PAN forbids data accesses if EL0 has data permissions;
+ * PAN3 forbids data accesses if EL0 has either data or exec perms.
+ * Note that for AArch64 the 'user can exec' case is exactly !xn.
+ * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
+ * do not affect EPAN.
+ */
if (user_rw && regime_is_pan(env, mmu_idx)) {
- /* PAN forbids data accesses but doesn't affect insn fetch */
+ prot_rw = 0;
+ } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
+ regime_is_pan(env, mmu_idx) &&
+ (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
prot_rw = 0;
} else {
prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
}
}
- if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
- return prot_rw;
+ if (in_pa != out_pa) {
+ switch (in_pa) {
+ case ARMSS_Root:
+ /*
+ * R_ZWRVD: permission fault for insn fetched from non-Root,
+ * I_WWBFB: SIF has no effect in EL3.
+ */
+ return prot_rw;
+ case ARMSS_Realm:
+ /*
+ * R_PKTDS: permission fault for insn fetched from non-Realm,
+ * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
+ * happens during any stage2 translation.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return prot_rw;
+ default:
+ break;
+ }
+ break;
+ case ARMSS_Secure:
+ if (env->cp15.scr_el3 & SCR_SIF) {
+ return prot_rw;
+ }
+ break;
+ default:
+ /* Input NonSecure must have output NonSecure. */
+ g_assert_not_reached();
+ }
}
/* TODO have_wxn should be replaced with
* check_s2_mmu_setup
* @cpu: ARMCPU
* @is_aa64: True if the translation regime is in AArch64 state
- * @startlevel: Suggested starting level
- * @inputsize: Bitsize of IPAs
+ * @tcr: VTCR_EL2 or VSTCR_EL2
+ * @ds: Effective value of TCR.DS.
+ * @iasize: Bitsize of IPAs
* @stride: Page-table stride (See the ARM ARM)
*
- * Returns true if the suggested S2 translation parameters are OK and
- * false otherwise.
+ * Decode the starting level of the S2 lookup, returning INT_MIN if
+ * the configuration is invalid.
*/
-static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
- int inputsize, int stride, int outputsize)
+static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
+ bool ds, int iasize, int stride)
{
- const int grainsize = stride + 3;
- int startsizecheck;
-
- /*
- * Negative levels are usually not allowed...
- * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
- * begins with level -1. Note that previous feature tests will have
- * eliminated this combination if it is not enabled.
- */
- if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
- return false;
- }
-
- startsizecheck = inputsize - ((3 - level) * stride + grainsize);
- if (startsizecheck < 1 || startsizecheck > stride + 4) {
- return false;
- }
+ int sl0, sl2, startlevel, granulebits, levels;
+ int s1_min_iasize, s1_max_iasize;
+ sl0 = extract32(tcr, 6, 2);
if (is_aa64) {
+ /*
+ * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
+ * so interleave AArch64.S2StartLevel.
+ */
switch (stride) {
- case 13: /* 64KB Pages. */
- if (level == 0 || (level == 1 && outputsize <= 42)) {
- return false;
+ case 9: /* 4KB */
+ /* SL2 is RES0 unless DS=1 & 4KB granule. */
+ sl2 = extract64(tcr, 33, 1);
+ if (ds && sl2) {
+ if (sl0 != 0) {
+ goto fail;
+ }
+ startlevel = -1;
+ } else {
+ startlevel = 2 - sl0;
+ switch (sl0) {
+ case 2:
+ if (arm_pamax(cpu) < 44) {
+ goto fail;
+ }
+ break;
+ case 3:
+ if (!cpu_isar_feature(aa64_st, cpu)) {
+ goto fail;
+ }
+ startlevel = 3;
+ break;
+ }
}
break;
- case 11: /* 16KB Pages. */
- if (level == 0 || (level == 1 && outputsize <= 40)) {
- return false;
+ case 11: /* 16KB */
+ switch (sl0) {
+ case 2:
+ if (arm_pamax(cpu) < 42) {
+ goto fail;
+ }
+ break;
+ case 3:
+ if (!ds) {
+ goto fail;
+ }
+ break;
}
+ startlevel = 3 - sl0;
break;
- case 9: /* 4KB Pages. */
- if (level == 0 && outputsize <= 42) {
- return false;
+ case 13: /* 64KB */
+ switch (sl0) {
+ case 2:
+ if (arm_pamax(cpu) < 44) {
+ goto fail;
+ }
+ break;
+ case 3:
+ goto fail;
}
+ startlevel = 3 - sl0;
break;
default:
g_assert_not_reached();
}
-
- /* Inputsize checks. */
- if (inputsize > outputsize &&
- (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
- /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
- return false;
- }
} else {
- /* AArch32 only supports 4KB pages. Assert on that. */
+ /*
+ * Things are simpler for AArch32 EL2, with only 4k pages.
+ * There is no separate S2InvalidSL function, but AArch32.S2Walk
+ * begins with walkparms.sl0 in {'1x'}.
+ */
assert(stride == 9);
-
- if (level == 0) {
- return false;
+ if (sl0 >= 2) {
+ goto fail;
}
+ startlevel = 2 - sl0;
}
- return true;
+
+ /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
+ levels = 3 - startlevel;
+ granulebits = stride + 3;
+
+ s1_min_iasize = levels * stride + granulebits + 1;
+ s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
+
+ if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
+ return startlevel;
+ }
+
+ fail:
+ return INT_MIN;
}
/**
* @ptw: Current and next stage parameters for the walk.
* @address: virtual address to get physical address for
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
- * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
- * (so this is a stage 2 page table walk),
- * must be true if this is stage 2 of a stage 1+2
- * walk for an EL0 access. If @mmu_idx is anything else,
- * @s1_is_el0 is ignored.
* @result: set on translation success,
* @fi: set to fault info if the translation fails
*/
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t address,
- MMUAccessType access_type, bool s1_is_el0,
+ MMUAccessType access_type,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
- bool is_secure = ptw->in_secure;
- uint32_t level;
+ int32_t level;
ARMVAParameters param;
uint64_t ttbr;
hwaddr descaddr, indexmask, indexmask_grainsize;
int32_t stride;
int addrsize, inputsize, outputsize;
uint64_t tcr = regime_tcr(env, mmu_idx);
- int ap, ns, xn, pxn;
+ int ap, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
uint64_t descaddrmask;
bool aarch64 = arm_el_is_aa64(env, el);
uint64_t descriptor, new_descriptor;
- bool nstable;
+ ARMSecuritySpace out_space;
/* TODO: This code does not support shareability levels. */
if (aarch64) {
int ps;
param = aa64_va_parameters(env, address, mmu_idx,
- access_type != MMU_INST_FETCH);
+ access_type != MMU_INST_FETCH,
+ !arm_el_is_aa64(env, 1));
level = 0;
/*
ps = MIN(ps, param.ps);
assert(ps < ARRAY_SIZE(pamax_map));
outputsize = pamax_map[ps];
+
+ /*
+ * With LPA2, the effective output address (OA) size is at most 48 bits
+ * unless TCR.DS == 1
+ */
+ if (!param.ds && param.gran != Gran64K) {
+ outputsize = MIN(outputsize, 48);
+ }
} else {
param = aa32_va_parameters(env, address, mmu_idx);
level = 1;
*/
level = 4 - (inputsize - 4) / stride;
} else {
- /*
- * For stage 2 translations the starting level is specified by the
- * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
- */
- uint32_t sl0 = extract32(tcr, 6, 2);
- uint32_t sl2 = extract64(tcr, 33, 1);
- uint32_t startlevel;
- bool ok;
-
- /* SL2 is RES0 unless DS=1 & 4kb granule. */
- if (param.ds && stride == 9 && sl2) {
- if (sl0 != 0) {
- level = 0;
- goto do_translation_fault;
- }
- startlevel = -1;
- } else if (!aarch64 || stride == 9) {
- /* AArch32 or 4KB pages */
- startlevel = 2 - sl0;
-
- if (cpu_isar_feature(aa64_st, cpu)) {
- startlevel &= 3;
- }
- } else {
- /* 16KB or 64KB pages */
- startlevel = 3 - sl0;
- }
-
- /* Check that the starting level is valid. */
- ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
- inputsize, stride, outputsize);
- if (!ok) {
+ int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
+ inputsize, stride);
+ if (startlevel == INT_MIN) {
+ level = 0;
goto do_translation_fault;
}
level = startlevel;
descaddrmask = MAKE_64BIT_MASK(0, 40);
}
descaddrmask &= ~indexmask_grainsize;
-
- /*
- * Secure accesses start with the page table in secure memory and
- * can be downgraded to non-secure at any step. Non-secure accesses
- * remain non-secure. We implement this by just ORing in the NSTable/NS
- * bits at each step.
- */
- tableattrs = is_secure ? 0 : (1 << 4);
+ tableattrs = 0;
next_level:
descaddr |= (address >> (stride * (4 - level))) & indexmask;
descaddr &= ~7ULL;
- nstable = extract32(tableattrs, 4, 1);
- if (!nstable) {
+
+ /*
+ * Process the NSTable bit from the previous level. This changes
+ * the table address space and the output space from Secure to
+ * NonSecure. With RME, the EL3 translation regime does not change
+ * from Root to NonSecure.
+ */
+ if (ptw->in_space == ARMSS_Secure
+ && !regime_is_stage2(mmu_idx)
+ && extract32(tableattrs, 4, 1)) {
/*
* Stage2_S -> Stage2 or Phys_S -> Phys_NS
- * Assert that the non-secure idx are even, and relative order.
+ * Assert the relative order of the secure/non-secure indexes.
*/
- QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0);
- QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0);
- QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S);
- QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S);
- ptw->in_ptw_idx &= ~1;
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
+ ptw->in_ptw_idx += 1;
ptw->in_secure = false;
+ ptw->in_space = ARMSS_NonSecure;
}
+
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
goto do_fault;
}
*/
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
if (!regime_is_stage2(mmu_idx)) {
- attrs |= nstable << 5; /* NS */
+ attrs |= !ptw->in_secure << 5; /* NS */
if (!param.hpd) {
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
/*
}
ap = extract32(attrs, 6, 2);
+ out_space = ptw->in_space;
if (regime_is_stage2(mmu_idx)) {
- ns = mmu_idx == ARMMMUIdx_Stage2;
- xn = extract64(attrs, 53, 2);
- result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
+ /*
+ * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
+ * The bit remains ignored for other security states.
+ * R_YMCSL: Executing an insn fetched from non-Realm causes
+ * a stage2 permission fault.
+ */
+ if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
+ out_space = ARMSS_NonSecure;
+ result->f.prot = get_S2prot_noexecute(ap);
+ } else {
+ xn = extract64(attrs, 53, 2);
+ result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
+ }
} else {
- ns = extract32(attrs, 5, 1);
+ int nse, ns = extract32(attrs, 5, 1);
+ switch (out_space) {
+ case ARMSS_Root:
+ /*
+ * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
+ * R_XTYPW: NSE and NS together select the output pa space.
+ */
+ nse = extract32(attrs, 11, 1);
+ out_space = (nse << 1) | ns;
+ if (out_space == ARMSS_Secure &&
+ !cpu_isar_feature(aa64_sel2, cpu)) {
+ out_space = ARMSS_NonSecure;
+ }
+ break;
+ case ARMSS_Secure:
+ if (ns) {
+ out_space = ARMSS_NonSecure;
+ }
+ break;
+ case ARMSS_Realm:
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
+ break;
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ /*
+ * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
+ * NS changes the output to non-secure space.
+ */
+ if (ns) {
+ out_space = ARMSS_NonSecure;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case ARMSS_NonSecure:
+ /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
+ break;
+ default:
+ g_assert_not_reached();
+ }
xn = extract64(attrs, 54, 1);
pxn = extract64(attrs, 53, 1);
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
+
+ /*
+ * Note that we modified ptw->in_space earlier for NSTable, but
+ * result->f.attrs retains a copy of the original security space.
+ */
+ result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
+ result->f.attrs.space, out_space);
}
if (!(result->f.prot & (1 << access_type))) {
}
}
- if (ns) {
- /*
- * The NS bit will (as required by the architecture) have no effect if
- * the CPU doesn't support TZ or this is a non-secure translation
- * regime, because the attribute will already be non-secure.
- */
- result->f.attrs.secure = false;
- }
-
- /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
- if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.guarded = extract64(attrs, 50, 1); /* GP */
- }
+ result->f.attrs.space = out_space;
+ result->f.attrs.secure = arm_space_is_secure(out_space);
if (regime_is_stage2(mmu_idx)) {
result->cacheattrs.is_s2_format = true;
assert(attrindx <= 7);
result->cacheattrs.is_s2_format = false;
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
+
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+ result->f.guarded = extract64(attrs, 50, 1); /* GP */
+ }
}
/*
if (arm_feature(env, ARM_FEATURE_M)) {
return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
- } else {
- return regime_sctlr(env, mmu_idx) & SCTLR_BR;
}
+
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ return false;
+ }
+
+ return regime_sctlr(env, mmu_idx) & SCTLR_BR;
}
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
return !(result->f.prot & (1 << access_type));
}
+static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t secure)
+{
+ if (regime_el(env, mmu_idx) == 2) {
+ return env->pmsav8.hprbar;
+ } else {
+ return env->pmsav8.rbar[secure];
+ }
+}
+
+static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t secure)
+{
+ if (regime_el(env, mmu_idx) == 2) {
+ return env->pmsav8.hprlar;
+ } else {
+ return env->pmsav8.rlar[secure];
+ }
+}
+
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool secure, GetPhysAddrResult *result,
bool hit = false;
uint32_t addr_page_base = address & TARGET_PAGE_MASK;
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
+ int region_counter;
+
+ if (regime_el(env, mmu_idx) == 2) {
+ region_counter = cpu->pmsav8r_hdregion;
+ } else {
+ region_counter = cpu->pmsav7_dregion;
+ }
result->f.lg_page_size = TARGET_PAGE_BITS;
result->f.phys_addr = address;
*mregion = -1;
}
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ fi->stage2 = true;
+ }
+
/*
* Unlike the ARM ARM pseudocode, we don't need to check whether this
* was an exception vector read from the vector table (which is always
hit = true;
}
- for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ uint32_t bitmask;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ bitmask = 0x1f;
+ } else {
+ bitmask = 0x3f;
+ fi->level = 0;
+ }
+
+ for (n = region_counter - 1; n >= 0; n--) {
/* region search */
/*
- * Note that the base address is bits [31:5] from the register
- * with bits [4:0] all zeroes, but the limit address is bits
- * [31:5] from the register with bits [4:0] all ones.
+ * Note that the base address is bits [31:x] from the register
+ * with bits [x-1:0] all zeroes, but the limit address is bits
+ * [31:x] from the register with bits [x:0] all ones. Where x is
+ * 5 for Cortex-M and 6 for Cortex-R
*/
- uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
- uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
+ uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
+ uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
- if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
+ if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
/* Region disabled */
continue;
}
* PMSAv7 where highest-numbered-region wins)
*/
fi->type = ARMFault_Permission;
- fi->level = 1;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ fi->level = 1;
+ }
return true;
}
}
if (!hit) {
- /* background fault */
- fi->type = ARMFault_Background;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ fi->type = ARMFault_Background;
+ } else {
+ fi->type = ARMFault_Permission;
+ }
return true;
}
/* hit using the background region */
get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
} else {
- uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
- uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
+ uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
+ uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
+ uint32_t ap = extract32(matched_rbar, 1, 2);
+ uint32_t xn = extract32(matched_rbar, 0, 1);
bool pxn = false;
if (arm_feature(env, ARM_FEATURE_V8_1M)) {
- pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
+ pxn = extract32(matched_rlar, 4, 1);
}
if (m_is_system_region(env, address)) {
xn = 1;
}
- result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
+ if (regime_el(env, mmu_idx) == 2) {
+ result->f.prot = simple_ap_to_rw_prot_is_user(ap,
+ mmu_idx != ARMMMUIdx_E2);
+ } else {
+ result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ uint8_t attrindx = extract32(matched_rlar, 1, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ uint8_t sh = extract32(matched_rlar, 3, 2);
+
+ if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
+ result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
+ xn = 0x1;
+ }
+
+ if ((regime_el(env, mmu_idx) == 1) &&
+ regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
+ pxn = 0x1;
+ }
+
+ result->cacheattrs.is_s2_format = false;
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
+ result->cacheattrs.shareability = sh;
+ }
+
if (result->f.prot && !xn && !(pxn && !is_user)) {
result->f.prot |= PAGE_EXEC;
}
- /*
- * We don't need to look the attribute up in the MAIR0/MAIR1
- * registers because that only tells us about cacheability.
- */
+
if (mregion) {
*mregion = matchregion;
}
}
fi->type = ARMFault_Permission;
- fi->level = 1;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ fi->level = 1;
+ }
return !(result->f.prot & (1 << access_type));
}
*/
if (sattrs.ns) {
result->f.attrs.secure = false;
+ result->f.attrs.space = ARMSS_NonSecure;
} else if (!secure) {
/*
* NS access to S memory must fault.
{
uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
- s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
+ if (s2.is_s2_format) {
+ s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
+ } else {
+ s2_mair_attrs = s2.attrs;
+ }
s1lo = extract32(s1.attrs, 0, 4);
s2lo = extract32(s2_mair_attrs, 0, 4);
*/
static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
{
+ assert(s2.is_s2_format && !s1.is_s2_format);
+
switch (s2.attrs) {
case 7:
/* Use stage 1 attributes */
ARMCacheAttrs ret;
bool tagged = false;
- assert(s2.is_s2_format && !s1.is_s2_format);
+ assert(!s1.is_s2_format);
ret.is_s2_format = false;
+ ret.guarded = s1.guarded;
if (s1.attrs == 0xf0) {
tagged = true;
ARMMMUFaultInfo *fi)
{
uint8_t memattr = 0x00; /* Device nGnRnE */
- uint8_t shareability = 0; /* non-sharable */
+ uint8_t shareability = 0; /* non-shareable */
int r_el;
switch (mmu_idx) {
case ARMMMUIdx_Stage2:
case ARMMMUIdx_Stage2_S:
- case ARMMMUIdx_Phys_NS:
case ARMMMUIdx_Phys_S:
+ case ARMMMUIdx_Phys_NS:
+ case ARMMMUIdx_Phys_Root:
+ case ARMMMUIdx_Phys_Realm:
break;
default:
} else {
memattr = 0x44; /* Normal, NC, No */
}
- shareability = 2; /* outer sharable */
+ shareability = 2; /* outer shareable */
}
result->cacheattrs.is_s2_format = false;
break;
hwaddr ipa;
int s1_prot, s1_lgpgsz;
bool is_secure = ptw->in_secure;
- bool ret, ipa_secure, s2walk_secure;
+ bool ret, ipa_secure;
ARMCacheAttrs cacheattrs1;
- bool is_el0;
+ ARMSecuritySpace ipa_space;
uint64_t hcr;
- ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
- /* If S1 fails or S2 is disabled, return early. */
- if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
+ /* If S1 fails, return early. */
+ if (ret) {
return ret;
}
ipa = result->f.phys_addr;
ipa_secure = result->f.attrs.secure;
- if (is_secure) {
- /* Select TCR based on the NS bit from the S1 walk. */
- s2walk_secure = !(ipa_secure
- ? env->cp15.vstcr_el2 & VSTCR_SW
- : env->cp15.vtcr_el2 & VTCR_NSW);
- } else {
- assert(!ipa_secure);
- s2walk_secure = false;
- }
+ ipa_space = result->f.attrs.space;
- is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
- ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
- ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
- ptw->in_secure = s2walk_secure;
+ ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
+ ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
+ ptw->in_secure = ipa_secure;
+ ptw->in_space = ipa_space;
+ ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
/*
* S1 is done, now do S2 translation.
cacheattrs1 = result->cacheattrs;
memset(result, 0, sizeof(*result));
- ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
+ ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
}
/*
- * Use the maximum of the S1 & S2 page size, so that invalidation
- * of pages > TARGET_PAGE_SIZE works correctly.
+ * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
+ * this means "don't put this in the TLB"; in this case, return a
+ * result with lg_page_size == 0 to achieve that. Otherwise,
+ * use the maximum of the S1 & S2 page size, so that invalidation
+ * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
+ * we know the combined result permissions etc only cover the minimum
+ * of the S1 and S2 page size, because we know that the common TLB code
+ * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
+ * and passing a larger page size value only affects invalidations.)
*/
- if (result->f.lg_page_size < s1_lgpgsz) {
+ if (result->f.lg_page_size < TARGET_PAGE_BITS ||
+ s1_lgpgsz < TARGET_PAGE_BITS) {
+ result->f.lg_page_size = 0;
+ } else if (result->f.lg_page_size < s1_lgpgsz) {
result->f.lg_page_size = s1_lgpgsz;
}
return false;
}
-static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
+static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
target_ulong address,
MMUAccessType access_type,
GetPhysAddrResult *result,
bool is_secure = ptw->in_secure;
ARMMMUIdx s1_mmu_idx;
+ /*
+ * The page table entries may downgrade Secure to NonSecure, but
+ * cannot upgrade a NonSecure translation regime's attributes
+ * to Secure or Realm.
+ */
+ result->f.attrs.secure = is_secure;
+ result->f.attrs.space = ptw->in_space;
+
switch (mmu_idx) {
case ARMMMUIdx_Phys_S:
case ARMMMUIdx_Phys_NS:
+ case ARMMMUIdx_Phys_Root:
+ case ARMMMUIdx_Phys_Realm:
/* Checking Phys early avoids special casing later vs regime_el. */
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
is_secure, result, fi);
ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
break;
+ case ARMMMUIdx_Stage2:
+ case ARMMMUIdx_Stage2_S:
+ /*
+ * Second stage lookup uses physical for ptw; whether this is S or
+ * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
+ * the Secure EL2&0 regime.
+ */
+ ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
+ break;
+
case ARMMMUIdx_E10_0:
s1_mmu_idx = ARMMMUIdx_Stage1_E0;
goto do_twostage;
* Otherwise, a stage1+stage2 translation is just stage 1.
*/
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
- if (arm_feature(env, ARM_FEATURE_EL2)) {
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
+ !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
return get_phys_addr_twostage(env, ptw, address, access_type,
result, fi);
}
/* fall through */
default:
- /* Single stage and second stage uses physical for ptw. */
- ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
+ /* Single stage uses physical for ptw. */
+ ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
break;
}
- /*
- * The page table entries may downgrade secure to non-secure, but
- * cannot upgrade an non-secure translation regime's attributes
- * to secure.
- */
- result->f.attrs.secure = is_secure;
result->f.attrs.user = regime_is_user(env, mmu_idx);
/*
}
if (regime_using_lpae_format(env, mmu_idx)) {
- return get_phys_addr_lpae(env, ptw, address, access_type, false,
- result, fi);
+ return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
} else if (arm_feature(env, ARM_FEATURE_V7) ||
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
}
}
+static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
+ target_ulong address,
+ MMUAccessType access_type,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
+{
+ if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
+ return true;
+ }
+ if (!granule_protection_check(env, result->f.phys_addr,
+ result->f.attrs.space, fi)) {
+ fi->type = ARMFault_GPCFOnOutput;
+ return true;
+ }
+ return false;
+}
+
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool is_secure, GetPhysAddrResult *result,
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_secure = is_secure,
+ .in_space = arm_secure_to_space(is_secure),
};
- return get_phys_addr_with_struct(env, &ptw, address, access_type,
- result, fi);
+ return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
}
bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
- bool is_secure;
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ };
+ ARMSecuritySpace ss;
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E2:
- is_secure = arm_is_secure_below_el3(env);
+ ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
+ /*
+ * For Secure EL2, we need this index to be NonSecure;
+ * otherwise this will already be NonSecure or Realm.
+ */
+ ss = arm_security_space_below_el3(env);
+ if (ss == ARMSS_Secure) {
+ ss = ARMSS_NonSecure;
+ }
+ break;
case ARMMMUIdx_Phys_NS:
case ARMMMUIdx_MPrivNegPri:
case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
case ARMMMUIdx_MUser:
- is_secure = false;
+ ss = ARMSS_NonSecure;
break;
- case ARMMMUIdx_E3:
case ARMMMUIdx_Stage2_S:
case ARMMMUIdx_Phys_S:
case ARMMMUIdx_MSPrivNegPri:
case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
case ARMMMUIdx_MSUser:
- is_secure = true;
+ ss = ARMSS_Secure;
+ break;
+ case ARMMMUIdx_E3:
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
+ cpu_isar_feature(aa64_rme, env_archcpu(env))) {
+ ss = ARMSS_Root;
+ } else {
+ ss = ARMSS_Secure;
+ }
+ break;
+ case ARMMMUIdx_Phys_Root:
+ ss = ARMSS_Root;
+ break;
+ case ARMMMUIdx_Phys_Realm:
+ ss = ARMSS_Realm;
break;
default:
g_assert_not_reached();
}
- return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+
+ ptw.in_space = ss;
+ ptw.in_secure = arm_space_is_secure(ss);
+ return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
}
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ ARMSecuritySpace ss = arm_security_space(env);
S1Translate ptw = {
- .in_mmu_idx = arm_mmu_idx(env),
- .in_secure = arm_is_secure(env),
+ .in_mmu_idx = mmu_idx,
+ .in_space = ss,
+ .in_secure = arm_space_is_secure(ss),
.in_debug = true,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
bool ret;
- ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
+ ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
*attrs = res.f.attrs;
if (ret) {