#include "cpu.h"
#include "internals.h"
#include "idau.h"
-
+#ifdef CONFIG_TCG
+# include "tcg/oversized-guest.h"
+#endif
typedef struct S1Translate {
ARMMMUIdx in_mmu_idx;
ARMMMUIdx in_ptw_idx;
+ ARMSecuritySpace in_space;
bool in_secure;
bool in_debug;
bool out_secure;
bool out_rw;
bool out_be;
+ ARMSecuritySpace out_space;
hwaddr out_virt;
hwaddr out_phys;
void *out_host;
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t address,
MMUAccessType access_type, bool s1_is_el0,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
- __attribute__((nonnull));
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi);
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
target_ulong address,
MMUAccessType access_type,
GetPhysAddrResult *result,
- ARMMMUFaultInfo *fi)
- __attribute__((nonnull));
+ ARMMMUFaultInfo *fi);
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
static const uint8_t pamax_map[] = {
return stage_1_mmu_idx(arm_mmu_idx(env));
}
+/*
+ * Return where we should do ptw loads from for a stage 2 walk.
+ * This depends on whether the address we are looking up is a
+ * Secure IPA or a NonSecure IPA, which we know from whether this is
+ * Stage2 or Stage2_S.
+ * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
+ */
+static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
+{
+ bool s2walk_secure;
+
+ /*
+ * We're OK to check the current state of the CPU here because
+ * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
+ * (2) there's no way to do a lookup that cares about Stage 2 for a
+ * different security state to the current one for AArch64, and AArch32
+ * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
+ * an NS stage 1+2 lookup while the NS bit is 0.)
+ */
+ if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
+ return ARMMMUIdx_Phys_NS;
+ }
+ if (stage2idx == ARMMMUIdx_Stage2_S) {
+ s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
+ } else {
+ s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
+ }
+ return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
+
+}
+
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
{
return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
case ARMMMUIdx_E3:
break;
- case ARMMMUIdx_Phys_NS:
case ARMMMUIdx_Phys_S:
+ case ARMMMUIdx_Phys_NS:
+ case ARMMMUIdx_Phys_Root:
+ case ARMMMUIdx_Phys_Realm:
/* No translation for physical address spaces. */
return true;
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
hwaddr addr, ARMMMUFaultInfo *fi)
{
+ ARMSecuritySpace space = ptw->in_space;
bool is_secure = ptw->in_secure;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
uint8_t pte_attrs;
- bool pte_secure;
ptw->out_virt = addr;
if (regime_is_stage2(s2_mmu_idx)) {
S1Translate s2ptw = {
.in_mmu_idx = s2_mmu_idx,
- .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS,
- .in_secure = is_secure,
+ .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
+ .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
+ .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
+ : space == ARMSS_Realm ? ARMSS_Realm
+ : ARMSS_NonSecure),
.in_debug = true,
};
GetPhysAddrResult s2 = { };
}
ptw->out_phys = s2.f.phys_addr;
pte_attrs = s2.cacheattrs.attrs;
- pte_secure = s2.f.attrs.secure;
+ ptw->out_secure = s2.f.attrs.secure;
+ ptw->out_space = s2.f.attrs.space;
} else {
/* Regime is physical. */
ptw->out_phys = addr;
pte_attrs = 0;
- pte_secure = is_secure;
+ ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S;
+ ptw->out_space = (s2_mmu_idx == ARMMMUIdx_Phys_S ? ARMSS_Secure
+ : space == ARMSS_Realm ? ARMSS_Realm
+ : ARMSS_NonSecure);
}
ptw->out_host = NULL;
ptw->out_rw = false;
int flags;
env->tlb_fi = fi;
- flags = probe_access_full(env, addr, MMU_DATA_LOAD,
+ flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
arm_to_core_mmu_idx(s2_mmu_idx),
true, &ptw->out_host, &full, 0);
env->tlb_fi = NULL;
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE;
pte_attrs = full->pte_attrs;
- pte_secure = full->attrs.secure;
+ ptw->out_secure = full->attrs.secure;
+ ptw->out_space = full->attrs.space;
#else
g_assert_not_reached();
#endif
}
}
- /* Check if page table walk is to secure or non-secure PA space. */
- ptw->out_secure = (is_secure
- && !(pte_secure
- ? env->cp15.vstcr_el2 & VSTCR_SW
- : env->cp15.vtcr_el2 & VTCR_NSW));
ptw->out_be = regime_translation_big_endian(env, mmu_idx);
return true;
}
} else {
/* Page tables are in MMIO. */
- MemTxAttrs attrs = { .secure = ptw->out_secure };
+ MemTxAttrs attrs = {
+ .secure = ptw->out_secure,
+ .space = ptw->out_space,
+ };
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
#endif
} else {
/* Page tables are in MMIO. */
- MemTxAttrs attrs = { .secure = ptw->out_secure };
+ MemTxAttrs attrs = {
+ .secure = ptw->out_secure,
+ .space = ptw->out_space,
+ };
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
+#ifdef TARGET_AARCH64
uint64_t cur_val;
void *host = ptw->out_host;
void *discard;
env->tlb_fi = fi;
- flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE,
+ flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
arm_to_core_mmu_idx(ptw->in_ptw_idx),
true, &discard, 0);
env->tlb_fi = NULL;
* we know that TCG_OVERSIZED_GUEST is set, which means that we are
* running in round-robin mode and could only race with dma i/o.
*/
-#ifndef TCG_OVERSIZED_GUEST
+#if !TCG_OVERSIZED_GUEST
# error "Unexpected configuration"
#endif
bool locked = qemu_mutex_iothread_locked();
#endif
return cur_val;
+#else
+ /* AArch32 does not have FEAT_HADFS. */
+ g_assert_not_reached();
+#endif
}
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
* regime, because the attribute will already be non-secure.
*/
result->f.attrs.secure = false;
+ result->f.attrs.space = ARMSS_NonSecure;
}
result->f.phys_addr = phys_addr;
return false;
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
int ap, int ns, int xn, int pxn)
{
+ ARMCPU *cpu = env_archcpu(env);
bool is_user = regime_is_user(env, mmu_idx);
int prot_rw, user_rw;
bool have_wxn;
if (is_user) {
prot_rw = user_rw;
} else {
+ /*
+ * PAN controls can forbid data accesses but don't affect insn fetch.
+ * Plain PAN forbids data accesses if EL0 has data permissions;
+ * PAN3 forbids data accesses if EL0 has either data or exec perms.
+ * Note that for AArch64 the 'user can exec' case is exactly !xn.
+ * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
+ * do not affect EPAN.
+ */
if (user_rw && regime_is_pan(env, mmu_idx)) {
- /* PAN forbids data accesses but doesn't affect insn fetch */
+ prot_rw = 0;
+ } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
+ regime_is_pan(env, mmu_idx) &&
+ (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
prot_rw = 0;
} else {
prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
* check_s2_mmu_setup
* @cpu: ARMCPU
* @is_aa64: True if the translation regime is in AArch64 state
- * @startlevel: Suggested starting level
- * @inputsize: Bitsize of IPAs
+ * @tcr: VTCR_EL2 or VSTCR_EL2
+ * @ds: Effective value of TCR.DS.
+ * @iasize: Bitsize of IPAs
* @stride: Page-table stride (See the ARM ARM)
*
- * Returns true if the suggested S2 translation parameters are OK and
- * false otherwise.
+ * Decode the starting level of the S2 lookup, returning INT_MIN if
+ * the configuration is invalid.
*/
-static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
- int inputsize, int stride, int outputsize)
+static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
+ bool ds, int iasize, int stride)
{
- const int grainsize = stride + 3;
- int startsizecheck;
-
- /*
- * Negative levels are usually not allowed...
- * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
- * begins with level -1. Note that previous feature tests will have
- * eliminated this combination if it is not enabled.
- */
- if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
- return false;
- }
-
- startsizecheck = inputsize - ((3 - level) * stride + grainsize);
- if (startsizecheck < 1 || startsizecheck > stride + 4) {
- return false;
- }
+ int sl0, sl2, startlevel, granulebits, levels;
+ int s1_min_iasize, s1_max_iasize;
+ sl0 = extract32(tcr, 6, 2);
if (is_aa64) {
+ /*
+ * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
+ * so interleave AArch64.S2StartLevel.
+ */
switch (stride) {
- case 13: /* 64KB Pages. */
- if (level == 0 || (level == 1 && outputsize <= 42)) {
- return false;
+ case 9: /* 4KB */
+ /* SL2 is RES0 unless DS=1 & 4KB granule. */
+ sl2 = extract64(tcr, 33, 1);
+ if (ds && sl2) {
+ if (sl0 != 0) {
+ goto fail;
+ }
+ startlevel = -1;
+ } else {
+ startlevel = 2 - sl0;
+ switch (sl0) {
+ case 2:
+ if (arm_pamax(cpu) < 44) {
+ goto fail;
+ }
+ break;
+ case 3:
+ if (!cpu_isar_feature(aa64_st, cpu)) {
+ goto fail;
+ }
+ startlevel = 3;
+ break;
+ }
}
break;
- case 11: /* 16KB Pages. */
- if (level == 0 || (level == 1 && outputsize <= 40)) {
- return false;
+ case 11: /* 16KB */
+ switch (sl0) {
+ case 2:
+ if (arm_pamax(cpu) < 42) {
+ goto fail;
+ }
+ break;
+ case 3:
+ if (!ds) {
+ goto fail;
+ }
+ break;
}
+ startlevel = 3 - sl0;
break;
- case 9: /* 4KB Pages. */
- if (level == 0 && outputsize <= 42) {
- return false;
+ case 13: /* 64KB */
+ switch (sl0) {
+ case 2:
+ if (arm_pamax(cpu) < 44) {
+ goto fail;
+ }
+ break;
+ case 3:
+ goto fail;
}
+ startlevel = 3 - sl0;
break;
default:
g_assert_not_reached();
}
-
- /* Inputsize checks. */
- if (inputsize > outputsize &&
- (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
- /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
- return false;
- }
} else {
- /* AArch32 only supports 4KB pages. Assert on that. */
+ /*
+ * Things are simpler for AArch32 EL2, with only 4k pages.
+ * There is no separate S2InvalidSL function, but AArch32.S2Walk
+ * begins with walkparms.sl0 in {'1x'}.
+ */
assert(stride == 9);
-
- if (level == 0) {
- return false;
+ if (sl0 >= 2) {
+ goto fail;
}
+ startlevel = 2 - sl0;
}
- return true;
+
+ /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
+ levels = 3 - startlevel;
+ granulebits = stride + 3;
+
+ s1_min_iasize = levels * stride + granulebits + 1;
+ s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
+
+ if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
+ return startlevel;
+ }
+
+ fail:
+ return INT_MIN;
}
/**
int ps;
param = aa64_va_parameters(env, address, mmu_idx,
- access_type != MMU_INST_FETCH);
+ access_type != MMU_INST_FETCH,
+ !arm_el_is_aa64(env, 1));
level = 0;
/*
*/
level = 4 - (inputsize - 4) / stride;
} else {
- /*
- * For stage 2 translations the starting level is specified by the
- * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
- */
- uint32_t sl0 = extract32(tcr, 6, 2);
- uint32_t sl2 = extract64(tcr, 33, 1);
- int32_t startlevel;
- bool ok;
-
- /* SL2 is RES0 unless DS=1 & 4kb granule. */
- if (param.ds && stride == 9 && sl2) {
- if (sl0 != 0) {
- level = 0;
- goto do_translation_fault;
- }
- startlevel = -1;
- } else if (!aarch64 || stride == 9) {
- /* AArch32 or 4KB pages */
- startlevel = 2 - sl0;
-
- if (cpu_isar_feature(aa64_st, cpu)) {
- startlevel &= 3;
- }
- } else {
- /* 16KB or 64KB pages */
- startlevel = 3 - sl0;
- }
-
- /* Check that the starting level is valid. */
- ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
- inputsize, stride, outputsize);
- if (!ok) {
+ int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
+ inputsize, stride);
+ if (startlevel == INT_MIN) {
+ level = 0;
goto do_translation_fault;
}
level = startlevel;
descaddrmask &= ~indexmask_grainsize;
/*
- * Secure accesses start with the page table in secure memory and
+ * Secure stage 1 accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
* remain non-secure. We implement this by just ORing in the NSTable/NS
* bits at each step.
+ * Stage 2 never gets this kind of downgrade.
*/
tableattrs = is_secure ? 0 : (1 << 4);
next_level:
descaddr |= (address >> (stride * (4 - level))) & indexmask;
descaddr &= ~7ULL;
- nstable = extract32(tableattrs, 4, 1);
- if (nstable) {
+ nstable = !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1);
+ if (nstable && ptw->in_secure) {
/*
* Stage2_S -> Stage2 or Phys_S -> Phys_NS
- * Assert that the non-secure idx are even, and relative order.
+ * Assert the relative order of the secure/non-secure indexes.
*/
- QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0);
- QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0);
- QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S);
- QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S);
- ptw->in_ptw_idx &= ~1;
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
+ QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
+ ptw->in_ptw_idx += 1;
ptw->in_secure = false;
}
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
* regime, because the attribute will already be non-secure.
*/
result->f.attrs.secure = false;
- }
-
- /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
- if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.guarded = extract64(attrs, 50, 1); /* GP */
+ result->f.attrs.space = ARMSS_NonSecure;
}
if (regime_is_stage2(mmu_idx)) {
assert(attrindx <= 7);
result->cacheattrs.is_s2_format = false;
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
+
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+ result->f.guarded = extract64(attrs, 50, 1); /* GP */
+ }
}
/*
*/
if (sattrs.ns) {
result->f.attrs.secure = false;
+ result->f.attrs.space = ARMSS_NonSecure;
} else if (!secure) {
/*
* NS access to S memory must fault.
assert(!s1.is_s2_format);
ret.is_s2_format = false;
+ ret.guarded = s1.guarded;
if (s1.attrs == 0xf0) {
tagged = true;
switch (mmu_idx) {
case ARMMMUIdx_Stage2:
case ARMMMUIdx_Stage2_S:
- case ARMMMUIdx_Phys_NS:
case ARMMMUIdx_Phys_S:
+ case ARMMMUIdx_Phys_NS:
+ case ARMMMUIdx_Phys_Root:
+ case ARMMMUIdx_Phys_Realm:
break;
default:
hwaddr ipa;
int s1_prot, s1_lgpgsz;
bool is_secure = ptw->in_secure;
- bool ret, ipa_secure, s2walk_secure;
+ bool ret, ipa_secure;
ARMCacheAttrs cacheattrs1;
+ ARMSecuritySpace ipa_space;
bool is_el0;
uint64_t hcr;
ipa = result->f.phys_addr;
ipa_secure = result->f.attrs.secure;
- if (is_secure) {
- /* Select TCR based on the NS bit from the S1 walk. */
- s2walk_secure = !(ipa_secure
- ? env->cp15.vstcr_el2 & VSTCR_SW
- : env->cp15.vtcr_el2 & VTCR_NSW);
- } else {
- assert(!ipa_secure);
- s2walk_secure = false;
- }
+ ipa_space = result->f.attrs.space;
is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
- ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
- ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
- ptw->in_secure = s2walk_secure;
+ ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
+ ptw->in_secure = ipa_secure;
+ ptw->in_space = ipa_space;
+ ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
/*
* S1 is done, now do S2 translation.
ARMMMUIdx s1_mmu_idx;
/*
- * The page table entries may downgrade secure to non-secure, but
- * cannot upgrade an non-secure translation regime's attributes
- * to secure.
+ * The page table entries may downgrade Secure to NonSecure, but
+ * cannot upgrade a NonSecure translation regime's attributes
+ * to Secure or Realm.
*/
result->f.attrs.secure = is_secure;
+ result->f.attrs.space = ptw->in_space;
switch (mmu_idx) {
case ARMMMUIdx_Phys_S:
case ARMMMUIdx_Phys_NS:
+ case ARMMMUIdx_Phys_Root:
+ case ARMMMUIdx_Phys_Realm:
/* Checking Phys early avoids special casing later vs regime_el. */
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
is_secure, result, fi);
ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
break;
+ case ARMMMUIdx_Stage2:
+ case ARMMMUIdx_Stage2_S:
+ /*
+ * Second stage lookup uses physical for ptw; whether this is S or
+ * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
+ * the Secure EL2&0 regime.
+ */
+ ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
+ break;
+
case ARMMMUIdx_E10_0:
s1_mmu_idx = ARMMMUIdx_Stage1_E0;
goto do_twostage;
/* fall through */
default:
- /* Single stage and second stage uses physical for ptw. */
- ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
+ /* Single stage uses physical for ptw. */
+ ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
break;
}
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_secure = is_secure,
+ .in_space = arm_secure_to_space(is_secure),
};
return get_phys_addr_with_struct(env, &ptw, address, access_type,
result, fi);
MMUAccessType access_type, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
{
- bool is_secure;
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ };
+ ARMSecuritySpace ss;
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E2:
- is_secure = arm_is_secure_below_el3(env);
+ ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
+ /*
+ * For Secure EL2, we need this index to be NonSecure;
+ * otherwise this will already be NonSecure or Realm.
+ */
+ ss = arm_security_space_below_el3(env);
+ if (ss == ARMSS_Secure) {
+ ss = ARMSS_NonSecure;
+ }
+ break;
case ARMMMUIdx_Phys_NS:
case ARMMMUIdx_MPrivNegPri:
case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
case ARMMMUIdx_MUser:
- is_secure = false;
+ ss = ARMSS_NonSecure;
break;
- case ARMMMUIdx_E3:
case ARMMMUIdx_Stage2_S:
case ARMMMUIdx_Phys_S:
case ARMMMUIdx_MSPrivNegPri:
case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
case ARMMMUIdx_MSUser:
- is_secure = true;
+ ss = ARMSS_Secure;
+ break;
+ case ARMMMUIdx_E3:
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
+ cpu_isar_feature(aa64_rme, env_archcpu(env))) {
+ ss = ARMSS_Root;
+ } else {
+ ss = ARMSS_Secure;
+ }
+ break;
+ case ARMMMUIdx_Phys_Root:
+ ss = ARMSS_Root;
+ break;
+ case ARMMMUIdx_Phys_Realm:
+ ss = ARMSS_Realm;
break;
default:
g_assert_not_reached();
}
- return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
- is_secure, result, fi);
+
+ ptw.in_space = ss;
+ ptw.in_secure = arm_space_is_secure(ss);
+ return get_phys_addr_with_struct(env, &ptw, address, access_type,
+ result, fi);
}
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ ARMSecuritySpace ss = arm_security_space(env);
S1Translate ptw = {
- .in_mmu_idx = arm_mmu_idx(env),
- .in_secure = arm_is_secure(env),
+ .in_mmu_idx = mmu_idx,
+ .in_space = ss,
+ .in_secure = arm_space_is_secure(ss),
.in_debug = true,
};
GetPhysAddrResult res = {};