X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=target%2Farm%2Fptw.c;h=1762b058aecfc0e61e58a50160d0e2bef92787f4;hb=05470c3979d5485003e129ff4b0c2ef98af91d86;hp=bd75da8dbcfb29f88599a876b89d36543c287348;hpb=29c343a463450f1aa15609014fb87a0472403d70;p=mirror_qemu.git diff --git a/target/arm/ptw.c b/target/arm/ptw.c index bd75da8dbc..1762b058ae 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -13,34 +13,76 @@ #include "exec/exec-all.h" #include "cpu.h" #include "internals.h" +#include "cpu-features.h" #include "idau.h" - +#ifdef CONFIG_TCG +# include "tcg/oversized-guest.h" +#endif typedef struct S1Translate { + /* + * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. + * Together with in_space, specifies the architectural translation regime. + */ ARMMMUIdx in_mmu_idx; + /* + * in_ptw_idx: specifies which mmuidx to use for the actual + * page table descriptor load operations. This will be one of the + * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, + * this field is updated accordingly. + */ ARMMMUIdx in_ptw_idx; - bool in_secure; + /* + * in_space: the security space for this walk. This plus + * the in_mmu_idx specify the architectural translation regime. + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, + * this field is updated accordingly. + * + * Note that the security space for the in_ptw_idx may be different + * from that for the in_mmu_idx. We do not need to explicitly track + * the in_ptw_idx security space because: + * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx + * itself specifies the security space + * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security + * space used for ptw reads is the same as that of the security + * space of the stage 1 translation for all cases except where + * stage 1 is Secure; in that case the only possibilities for + * the ptw read are Secure and NonSecure, and the in_ptw_idx + * value being Stage2 vs Stage2_S distinguishes those. + */ + ARMSecuritySpace in_space; + /* + * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug + * accesses will not update the guest page table access flags + * and will not change the state of the softmmu TLBs. + */ bool in_debug; - bool out_secure; + /* + * If this is stage 2 of a stage 1+2 page table walk, then this must + * be true if stage 1 is an EL0 access; otherwise this is ignored. + * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}. + */ + bool in_s1_is_el0; bool out_rw; bool out_be; + ARMSecuritySpace out_space; hwaddr out_virt; hwaddr out_phys; void *out_host; } S1Translate; -static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, - uint64_t address, - MMUAccessType access_type, bool s1_is_el0, - GetPhysAddrResult *result, ARMMMUFaultInfo *fi) - __attribute__((nonnull)); +static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, + target_ulong address, + MMUAccessType access_type, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi); -static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, - GetPhysAddrResult *result, - ARMMMUFaultInfo *fi) - __attribute__((nonnull)); +static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, + target_ulong address, + MMUAccessType access_type, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi); /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ static const uint8_t pamax_map[] = { @@ -103,6 +145,47 @@ ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) return stage_1_mmu_idx(arm_mmu_idx(env)); } +/* + * Return where we should do ptw loads from for a stage 2 walk. + * This depends on whether the address we are looking up is a + * Secure IPA or a NonSecure IPA, which we know from whether this is + * Stage2 or Stage2_S. + * If this is the Secure EL1&0 regime we need to check the NSW and SW bits. + */ +static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx) +{ + bool s2walk_secure; + + /* + * We're OK to check the current state of the CPU here because + * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit + * changes. + * (2) there's no way to do a lookup that cares about Stage 2 for a + * different security state to the current one for AArch64, and AArch32 + * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do + * an NS stage 1+2 lookup while the NS bit is 0.) + */ + if (!arm_el_is_aa64(env, 3)) { + return ARMMMUIdx_Phys_NS; + } + + switch (arm_security_space_below_el3(env)) { + case ARMSS_NonSecure: + return ARMMMUIdx_Phys_NS; + case ARMSS_Realm: + return ARMMMUIdx_Phys_Realm; + case ARMSS_Secure: + if (stage2idx == ARMMMUIdx_Stage2_S) { + s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); + } else { + s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); + } + return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; + default: + g_assert_not_reached(); + } +} + static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) { return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; @@ -126,11 +209,12 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) /* Return true if the specified stage of address translation is disabled */ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, - bool is_secure) + ARMSecuritySpace space) { uint64_t hcr_el2; if (arm_feature(env, ARM_FEATURE_M)) { + bool is_secure = arm_space_is_secure(space); switch (env->v7m.mpu_ctrl[is_secure] & (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { case R_V7M_MPU_CTRL_ENABLE_MASK: @@ -149,18 +233,19 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, } } - hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure); switch (mmu_idx) { case ARMMMUIdx_Stage2: case ARMMMUIdx_Stage2_S: /* HCR.DC means HCR.VM behaves as 1 */ + hcr_el2 = arm_hcr_el2_eff_secstate(env, space); return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */ + hcr_el2 = arm_hcr_el2_eff_secstate(env, space); if (hcr_el2 & HCR_TGE) { return true; } @@ -170,6 +255,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: /* HCR.DC means SCTLR_EL1.M behaves as 0 */ + hcr_el2 = arm_hcr_el2_eff_secstate(env, space); if (hcr_el2 & HCR_DC) { return true; } @@ -182,8 +268,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, case ARMMMUIdx_E3: break; - case ARMMMUIdx_Phys_NS: case ARMMMUIdx_Phys_S: + case ARMMMUIdx_Phys_NS: + case ARMMMUIdx_Phys_Root: + case ARMMMUIdx_Phys_Realm: /* No translation for physical address spaces. */ return true; @@ -194,6 +282,197 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; } +static bool granule_protection_check(CPUARMState *env, uint64_t paddress, + ARMSecuritySpace pspace, + ARMMMUFaultInfo *fi) +{ + MemTxAttrs attrs = { + .secure = true, + .space = ARMSS_Root, + }; + ARMCPU *cpu = env_archcpu(env); + uint64_t gpccr = env->cp15.gpccr_el3; + unsigned pps, pgs, l0gptsz, level = 0; + uint64_t tableaddr, pps_mask, align, entry, index; + AddressSpace *as; + MemTxResult result; + int gpi; + + if (!FIELD_EX64(gpccr, GPCCR, GPC)) { + return true; + } + + /* + * GPC Priority 1 (R_GMGRR): + * R_JWCSM: If the configuration of GPCCR_EL3 is invalid, + * the access fails as GPT walk fault at level 0. + */ + + /* + * Configuration of PPS to a value exceeding the implemented + * physical address size is invalid. + */ + pps = FIELD_EX64(gpccr, GPCCR, PPS); + if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) { + goto fault_walk; + } + pps = pamax_map[pps]; + pps_mask = MAKE_64BIT_MASK(0, pps); + + switch (FIELD_EX64(gpccr, GPCCR, SH)) { + case 0b10: /* outer shareable */ + break; + case 0b00: /* non-shareable */ + case 0b11: /* inner shareable */ + /* Inner and Outer non-cacheable requires Outer shareable. */ + if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 && + FIELD_EX64(gpccr, GPCCR, IRGN) == 0) { + goto fault_walk; + } + break; + default: /* reserved */ + goto fault_walk; + } + + switch (FIELD_EX64(gpccr, GPCCR, PGS)) { + case 0b00: /* 4KB */ + pgs = 12; + break; + case 0b01: /* 64KB */ + pgs = 16; + break; + case 0b10: /* 16KB */ + pgs = 14; + break; + default: /* reserved */ + goto fault_walk; + } + + /* Note this field is read-only and fixed at reset. */ + l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ); + + /* + * GPC Priority 2: Secure, Realm or Root address exceeds PPS. + * R_CPDSB: A NonSecure physical address input exceeding PPS + * does not experience any fault. + */ + if (paddress & ~pps_mask) { + if (pspace == ARMSS_NonSecure) { + return true; + } + goto fault_size; + } + + /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */ + tableaddr = env->cp15.gptbr_el3 << 12; + if (tableaddr & ~pps_mask) { + goto fault_size; + } + + /* + * BADDR is aligned per a function of PPS and L0GPTSZ. + * These bits of GPTBR_EL3 are RES0, but are not a configuration error, + * unlike the RES0 bits of the GPT entries (R_XNKFZ). + */ + align = MAX(pps - l0gptsz + 3, 12); + align = MAKE_64BIT_MASK(0, align); + tableaddr &= ~align; + + as = arm_addressspace(env_cpu(env), attrs); + + /* Level 0 lookup. */ + index = extract64(paddress, l0gptsz, pps - l0gptsz); + tableaddr += index * 8; + entry = address_space_ldq_le(as, tableaddr, attrs, &result); + if (result != MEMTX_OK) { + goto fault_eabt; + } + + switch (extract32(entry, 0, 4)) { + case 1: /* block descriptor */ + if (entry >> 8) { + goto fault_walk; /* RES0 bits not 0 */ + } + gpi = extract32(entry, 4, 4); + goto found; + case 3: /* table descriptor */ + tableaddr = entry & ~0xf; + align = MAX(l0gptsz - pgs - 1, 12); + align = MAKE_64BIT_MASK(0, align); + if (tableaddr & (~pps_mask | align)) { + goto fault_walk; /* RES0 bits not 0 */ + } + break; + default: /* invalid */ + goto fault_walk; + } + + /* Level 1 lookup */ + level = 1; + index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4); + tableaddr += index * 8; + entry = address_space_ldq_le(as, tableaddr, attrs, &result); + if (result != MEMTX_OK) { + goto fault_eabt; + } + + switch (extract32(entry, 0, 4)) { + case 1: /* contiguous descriptor */ + if (entry >> 10) { + goto fault_walk; /* RES0 bits not 0 */ + } + /* + * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE, + * and because we cannot invalidate by pa, and thus will always + * flush entire tlbs, we don't actually care about the range here + * and can simply extract the GPI as the result. + */ + if (extract32(entry, 8, 2) == 0) { + goto fault_walk; /* reserved contig */ + } + gpi = extract32(entry, 4, 4); + break; + default: + index = extract64(paddress, pgs, 4); + gpi = extract64(entry, index * 4, 4); + break; + } + + found: + switch (gpi) { + case 0b0000: /* no access */ + break; + case 0b1111: /* all access */ + return true; + case 0b1000: + case 0b1001: + case 0b1010: + case 0b1011: + if (pspace == (gpi & 3)) { + return true; + } + break; + default: + goto fault_walk; /* reserved */ + } + + fi->gpcf = GPCF_Fail; + goto fault_common; + fault_eabt: + fi->gpcf = GPCF_EABT; + goto fault_common; + fault_size: + fi->gpcf = GPCF_AddressSize; + goto fault_common; + fault_walk: + fi->gpcf = GPCF_Walk; + fault_common: + fi->level = level; + fi->paddr = paddress; + fi->paddr_space = pspace; + return false; +} + static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) { /* @@ -212,15 +491,53 @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) } } +static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, + ARMMMUIdx s2_mmu_idx) +{ + /* + * Return the security space to use for stage 2 when doing + * the S1 page table descriptor load. + */ + if (regime_is_stage2(s2_mmu_idx)) { + /* + * The security space for ptw reads is almost always the same + * as that of the security space of the stage 1 translation. + * The only exception is when stage 1 is Secure; in that case + * the ptw read might be to the Secure or the NonSecure space + * (but never Realm or Root), and the s2_mmu_idx tells us which. + * Root translations are always single-stage. + */ + if (s1_space == ARMSS_Secure) { + return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); + } else { + assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); + assert(s1_space != ARMSS_Root); + return s1_space; + } + } else { + /* ptw loads are from phys: the mmu idx itself says which space */ + return arm_phys_to_space(s2_mmu_idx); + } +} + +static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx) +{ + /* + * For stage 2 faults in Secure EL22, S1NS indicates + * whether the faulting IPA is in the Secure or NonSecure + * IPA space. For all other kinds of fault, it is false. + */ + return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx) + && s2_mmu_idx == ARMMMUIdx_Stage2_S; +} + /* Translate a S1 pagetable walk through S2 if needed. */ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, hwaddr addr, ARMMMUFaultInfo *fi) { - bool is_secure = ptw->in_secure; ARMMMUIdx mmu_idx = ptw->in_mmu_idx; ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; uint8_t pte_attrs; - bool pte_secure; ptw->out_virt = addr; @@ -229,39 +546,33 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, * From gdbstub, do not use softmmu so that we don't modify the * state of the cpu at all, including softmmu tlb contents. */ - if (regime_is_stage2(s2_mmu_idx)) { - S1Translate s2ptw = { - .in_mmu_idx = s2_mmu_idx, - .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS, - .in_secure = is_secure, - .in_debug = true, - }; - GetPhysAddrResult s2 = { }; - - if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD, - false, &s2, fi)) { - goto fail; - } - ptw->out_phys = s2.f.phys_addr; - pte_attrs = s2.cacheattrs.attrs; - pte_secure = s2.f.attrs.secure; - } else { - /* Regime is physical. */ - ptw->out_phys = addr; - pte_attrs = 0; - pte_secure = is_secure; + ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); + S1Translate s2ptw = { + .in_mmu_idx = s2_mmu_idx, + .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), + .in_space = s2_space, + .in_debug = true, + }; + GetPhysAddrResult s2 = { }; + + if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) { + goto fail; } + + ptw->out_phys = s2.f.phys_addr; + pte_attrs = s2.cacheattrs.attrs; ptw->out_host = NULL; ptw->out_rw = false; + ptw->out_space = s2.f.attrs.space; } else { #ifdef CONFIG_TCG CPUTLBEntryFull *full; int flags; env->tlb_fi = fi; - flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD, - arm_to_core_mmu_idx(s2_mmu_idx), - true, &ptw->out_host, &full, 0); + flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD, + arm_to_core_mmu_idx(s2_mmu_idx), + &ptw->out_host, &full); env->tlb_fi = NULL; if (unlikely(flags & TLB_INVALID_MASK)) { @@ -269,15 +580,15 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, } ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); ptw->out_rw = full->prot & PAGE_WRITE; - pte_attrs = full->pte_attrs; - pte_secure = full->attrs.secure; + pte_attrs = full->extra.arm.pte_attrs; + ptw->out_space = full->attrs.space; #else g_assert_not_reached(); #endif } if (regime_is_stage2(s2_mmu_idx)) { - uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure); + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { /* @@ -288,25 +599,23 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, fi->s2addr = addr; fi->stage2 = true; fi->s1ptw = true; - fi->s1ns = !is_secure; + fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); return false; } } - /* Check if page table walk is to secure or non-secure PA space. */ - ptw->out_secure = (is_secure - && !(pte_secure - ? env->cp15.vstcr_el2 & VSTCR_SW - : env->cp15.vtcr_el2 & VTCR_NSW)); ptw->out_be = regime_translation_big_endian(env, mmu_idx); return true; fail: assert(fi->type != ARMFault_None); + if (fi->type == ARMFault_GPCFOnOutput) { + fi->type = ARMFault_GPCFOnWalk; + } fi->s2addr = addr; - fi->stage2 = true; - fi->s1ptw = true; - fi->s1ns = !is_secure; + fi->stage2 = regime_is_stage2(s2_mmu_idx); + fi->s1ptw = fi->stage2; + fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); return false; } @@ -328,7 +637,10 @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, } } else { /* Page tables are in MMIO. */ - MemTxAttrs attrs = { .secure = ptw->out_secure }; + MemTxAttrs attrs = { + .space = ptw->out_space, + .secure = arm_space_is_secure(ptw->out_space), + }; AddressSpace *as = arm_addressspace(cs, attrs); MemTxResult result = MEMTX_OK; @@ -371,7 +683,10 @@ static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, #endif } else { /* Page tables are in MMIO. */ - MemTxAttrs attrs = { .secure = ptw->out_secure }; + MemTxAttrs attrs = { + .space = ptw->out_space, + .secure = arm_space_is_secure(ptw->out_space), + }; AddressSpace *as = arm_addressspace(cs, attrs); MemTxResult result = MEMTX_OK; @@ -393,12 +708,12 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, uint64_t new_val, S1Translate *ptw, ARMMMUFaultInfo *fi) { +#if defined(TARGET_AARCH64) && defined(CONFIG_TCG) uint64_t cur_val; void *host = ptw->out_host; if (unlikely(!host)) { fi->type = ARMFault_UnsuppAtomicUpdate; - fi->s1ptw = true; return 0; } @@ -408,20 +723,26 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, */ if (unlikely(!ptw->out_rw)) { int flags; - void *discard; env->tlb_fi = fi; - flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE, - arm_to_core_mmu_idx(ptw->in_ptw_idx), - true, &discard, 0); + flags = probe_access_full_mmu(env, ptw->out_virt, 0, + MMU_DATA_STORE, + arm_to_core_mmu_idx(ptw->in_ptw_idx), + NULL, NULL); env->tlb_fi = NULL; if (unlikely(flags & TLB_INVALID_MASK)) { + /* + * We know this must be a stage 2 fault because the granule + * protection table does not separately track read and write + * permission, so all GPC faults are caught in S1_ptw_translate(): + * we only get here for "readable but not writeable". + */ assert(fi->type != ARMFault_None); fi->s2addr = ptw->out_virt; fi->stage2 = true; fi->s1ptw = true; - fi->s1ns = !ptw->in_secure; + fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx); return 0; } @@ -448,7 +769,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, * we know that TCG_OVERSIZED_GUEST is set, which means that we are * running in round-robin mode and could only race with dma i/o. */ -#ifndef TCG_OVERSIZED_GUEST +#if !TCG_OVERSIZED_GUEST # error "Unexpected configuration" #endif bool locked = qemu_mutex_iothread_locked(); @@ -472,6 +793,10 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, #endif return cur_val; +#else + /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ + g_assert_not_reached(); +#endif } static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, @@ -877,6 +1202,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, * regime, because the attribute will already be non-secure. */ result->f.attrs.secure = false; + result->f.attrs.space = ARMSS_NonSecure; } result->f.phys_addr = phys_addr; return false; @@ -893,7 +1219,7 @@ do_fault: * @xn: XN (execute-never) bits * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 */ -static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) +static int get_S2prot_noexecute(int s2ap) { int prot = 0; @@ -903,6 +1229,12 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) if (s2ap & 2) { prot |= PAGE_WRITE; } + return prot; +} + +static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) +{ + int prot = get_S2prot_noexecute(s2ap); if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { switch (xn) { @@ -940,12 +1272,14 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) * @mmu_idx: MMU index indicating required translation regime * @is_aa64: TRUE if AArch64 * @ap: The 2-bit simple AP (AP[2:1]) - * @ns: NS (non-secure) bit * @xn: XN (execute-never) bit * @pxn: PXN (privileged execute-never) bit + * @in_pa: The original input pa space + * @out_pa: The output pa space, modified by NSTable, NS, and NSE */ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, - int ap, int ns, int xn, int pxn) + int ap, int xn, int pxn, + ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) { ARMCPU *cpu = env_archcpu(env); bool is_user = regime_is_user(env, mmu_idx); @@ -978,8 +1312,39 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, } } - if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { - return prot_rw; + if (in_pa != out_pa) { + switch (in_pa) { + case ARMSS_Root: + /* + * R_ZWRVD: permission fault for insn fetched from non-Root, + * I_WWBFB: SIF has no effect in EL3. + */ + return prot_rw; + case ARMSS_Realm: + /* + * R_PKTDS: permission fault for insn fetched from non-Realm, + * for Realm EL2 or EL2&0. The corresponding fault for EL1&0 + * happens during any stage2 translation. + */ + switch (mmu_idx) { + case ARMMMUIdx_E2: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return prot_rw; + default: + break; + } + break; + case ARMSS_Secure: + if (env->cp15.scr_el3 & SCR_SIF) { + return prot_rw; + } + break; + default: + /* Input NonSecure must have output NonSecure. */ + g_assert_not_reached(); + } } /* TODO have_wxn should be replaced with @@ -1109,17 +1474,6 @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, sl0 = extract32(tcr, 6, 2); if (is_aa64) { - /* - * AArch64.S2InvalidTxSZ: While we checked tsz_oob near the top of - * get_phys_addr_lpae, that used aa64_va_parameters which apply - * to aarch64. If Stage1 is aarch32, the min_txsz is larger. - * See AArch64.S2MinTxSZ, where min_tsz is 24, translated to - * inputsize is 64 - 24 = 40. - */ - if (iasize < 40 && !arm_el_is_aa64(&cpu->env, 1)) { - goto fail; - } - /* * AArch64.S2InvalidSL: Interpretation of SL depends on the page size, * so interleave AArch64.S2StartLevel. @@ -1208,6 +1562,25 @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, return INT_MIN; } +static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, + ARMGranuleSize gran, int level) +{ + /* + * See pseudocode AArch46.BlockDescSupported(): block descriptors + * are not valid at all levels, depending on the page size. + */ + switch (gran) { + case Gran4K: + return (level == 0 && ds) || level == 1 || level == 2; + case Gran16K: + return (level == 1 && ds) || level == 2; + case Gran64K: + return (level == 1 && arm_pamax(cpu) == 52) || level == 2; + default: + g_assert_not_reached(); + } +} + /** * get_phys_addr_lpae: perform one stage of page table walk, LPAE format * @@ -1221,22 +1594,16 @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, * @ptw: Current and next stage parameters for the walk. * @address: virtual address to get physical address for * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH - * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2 - * (so this is a stage 2 page table walk), - * must be true if this is stage 2 of a stage 1+2 - * walk for an EL0 access. If @mmu_idx is anything else, - * @s1_is_el0 is ignored. * @result: set on translation success, * @fi: set to fault info if the translation fails */ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, uint64_t address, - MMUAccessType access_type, bool s1_is_el0, + MMUAccessType access_type, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { ARMCPU *cpu = env_archcpu(env); ARMMMUIdx mmu_idx = ptw->in_mmu_idx; - bool is_secure = ptw->in_secure; int32_t level; ARMVAParameters param; uint64_t ttbr; @@ -1247,19 +1614,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, int32_t stride; int addrsize, inputsize, outputsize; uint64_t tcr = regime_tcr(env, mmu_idx); - int ap, ns, xn, pxn; + int ap, xn, pxn; uint32_t el = regime_el(env, mmu_idx); uint64_t descaddrmask; bool aarch64 = arm_el_is_aa64(env, el); uint64_t descriptor, new_descriptor; - bool nstable; + ARMSecuritySpace out_space; /* TODO: This code does not support shareability levels. */ if (aarch64) { int ps; param = aa64_va_parameters(env, address, mmu_idx, - access_type != MMU_INST_FETCH); + access_type != MMU_INST_FETCH, + !arm_el_is_aa64(env, 1)); level = 0; /* @@ -1413,31 +1781,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, descaddrmask = MAKE_64BIT_MASK(0, 40); } descaddrmask &= ~indexmask_grainsize; - - /* - * Secure accesses start with the page table in secure memory and - * can be downgraded to non-secure at any step. Non-secure accesses - * remain non-secure. We implement this by just ORing in the NSTable/NS - * bits at each step. - */ - tableattrs = is_secure ? 0 : (1 << 4); + tableattrs = 0; next_level: descaddr |= (address >> (stride * (4 - level))) & indexmask; descaddr &= ~7ULL; - nstable = extract32(tableattrs, 4, 1); - if (nstable) { + + /* + * Process the NSTable bit from the previous level. This changes + * the table address space and the output space from Secure to + * NonSecure. With RME, the EL3 translation regime does not change + * from Root to NonSecure. + */ + if (ptw->in_space == ARMSS_Secure + && !regime_is_stage2(mmu_idx) + && extract32(tableattrs, 4, 1)) { /* * Stage2_S -> Stage2 or Phys_S -> Phys_NS - * Assert that the non-secure idx are even, and relative order. + * Assert the relative order of the secure/non-secure indexes. */ - QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0); - QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0); - QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S); - QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S); - ptw->in_ptw_idx &= ~1; - ptw->in_secure = false; + QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS); + QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2); + ptw->in_ptw_idx += 1; + ptw->in_space = ARMSS_NonSecure; } + if (!S1_ptw_translate(env, ptw, descaddr, fi)) { goto do_fault; } @@ -1448,8 +1816,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, new_descriptor = descriptor; restart_atomic_update: - if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) { - /* Invalid, or the Reserved level 3 encoding */ + if (!(descriptor & 1) || + (!(descriptor & 2) && + !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) { + /* Invalid, or a block descriptor at an invalid level */ goto do_translation_fault; } @@ -1536,11 +1906,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * Extract attributes from the (modified) descriptor, and apply * table descriptors. Stage 2 table descriptors do not include * any attribute fields. HPD disables all the table attributes - * except NSTable. + * except NSTable (which we have already handled). */ attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14)); if (!regime_is_stage2(mmu_idx)) { - attrs |= nstable << 5; /* NS */ if (!param.hpd) { attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ /* @@ -1553,15 +1922,79 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, } ap = extract32(attrs, 6, 2); + out_space = ptw->in_space; if (regime_is_stage2(mmu_idx)) { - ns = mmu_idx == ARMMMUIdx_Stage2; - xn = extract64(attrs, 53, 2); - result->f.prot = get_S2prot(env, ap, xn, s1_is_el0); + /* + * R_GYNXY: For stage2 in Realm security state, bit 55 is NS. + * The bit remains ignored for other security states. + * R_YMCSL: Executing an insn fetched from non-Realm causes + * a stage2 permission fault. + */ + if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) { + out_space = ARMSS_NonSecure; + result->f.prot = get_S2prot_noexecute(ap); + } else { + xn = extract64(attrs, 53, 2); + result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); + } } else { - ns = extract32(attrs, 5, 1); + int nse, ns = extract32(attrs, 5, 1); + switch (out_space) { + case ARMSS_Root: + /* + * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime. + * R_XTYPW: NSE and NS together select the output pa space. + */ + nse = extract32(attrs, 11, 1); + out_space = (nse << 1) | ns; + if (out_space == ARMSS_Secure && + !cpu_isar_feature(aa64_sel2, cpu)) { + out_space = ARMSS_NonSecure; + } + break; + case ARMSS_Secure: + if (ns) { + out_space = ARMSS_NonSecure; + } + break; + case ARMSS_Realm: + switch (mmu_idx) { + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */ + break; + case ARMMMUIdx_E2: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + /* + * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1, + * NS changes the output to non-secure space. + */ + if (ns) { + out_space = ARMSS_NonSecure; + } + break; + default: + g_assert_not_reached(); + } + break; + case ARMSS_NonSecure: + /* R_QRMFF: For NonSecure state, the NS bit is RES0. */ + break; + default: + g_assert_not_reached(); + } xn = extract64(attrs, 54, 1); pxn = extract64(attrs, 53, 1); - result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); + + /* + * Note that we modified ptw->in_space earlier for NSTable, but + * result->f.attrs retains a copy of the original security space. + */ + result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn, + result->f.attrs.space, out_space); } if (!(result->f.prot & (1 << access_type))) { @@ -1588,14 +2021,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, } } - if (ns) { - /* - * The NS bit will (as required by the architecture) have no effect if - * the CPU doesn't support TZ or this is a non-secure translation - * regime, because the attribute will already be non-secure. - */ - result->f.attrs.secure = false; - } + result->f.attrs.space = out_space; + result->f.attrs.secure = arm_space_is_secure(out_space); if (regime_is_stage2(mmu_idx)) { result->cacheattrs.is_s2_format = true; @@ -1610,7 +2037,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { - result->f.guarded = extract64(attrs, 50, 1); /* GP */ + result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ } } @@ -1632,24 +2059,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, do_translation_fault: fi->type = ARMFault_Translation; do_fault: - fi->level = level; - /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ - fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx); - fi->s1ns = mmu_idx == ARMMMUIdx_Stage2; + if (fi->s1ptw) { + /* Retain the existing stage 2 fi->level */ + assert(fi->stage2); + } else { + fi->level = level; + fi->stage2 = regime_is_stage2(mmu_idx); + } + fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx); return true; } -static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool is_secure, GetPhysAddrResult *result, +static bool get_phys_addr_pmsav5(CPUARMState *env, + S1Translate *ptw, + uint32_t address, + MMUAccessType access_type, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { int n; uint32_t mask; uint32_t base; + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; bool is_user = regime_is_user(env, mmu_idx); - if (regime_translation_disabled(env, mmu_idx, is_secure)) { + if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { /* MPU disabled. */ result->f.phys_addr = address; result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; @@ -1804,20 +2238,24 @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, return regime_sctlr(env, mmu_idx) & SCTLR_BR; } -static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool secure, GetPhysAddrResult *result, +static bool get_phys_addr_pmsav7(CPUARMState *env, + S1Translate *ptw, + uint32_t address, + MMUAccessType access_type, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { ARMCPU *cpu = env_archcpu(env); int n; + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; bool is_user = regime_is_user(env, mmu_idx); + bool secure = arm_space_is_secure(ptw->in_space); result->f.phys_addr = address; result->f.lg_page_size = TARGET_PAGE_BITS; result->f.prot = 0; - if (regime_translation_disabled(env, mmu_idx, secure) || + if (regime_translation_disabled(env, mmu_idx, ptw->in_space) || m_is_ppb_region(env, address)) { /* * MPU disabled or M profile PPB access: use default memory map. @@ -2061,7 +2499,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, * are done in arm_v7m_load_vector(), which always does a direct * read using address_space_ldl(), rather than going via this function. */ - if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */ + if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) { + /* MPU disabled */ hit = true; } else if (m_is_ppb_region(env, address)) { hit = true; @@ -2330,12 +2769,16 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address, } } -static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool secure, GetPhysAddrResult *result, +static bool get_phys_addr_pmsav8(CPUARMState *env, + S1Translate *ptw, + uint32_t address, + MMUAccessType access_type, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { V8M_SAttributes sattrs = {}; + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; + bool secure = arm_space_is_secure(ptw->in_space); bool ret; if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { @@ -2379,6 +2822,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, */ if (sattrs.ns) { result->f.attrs.secure = false; + result->f.attrs.space = ARMSS_NonSecure; } else if (!secure) { /* * NS access to S memory must fault. @@ -2588,7 +3032,6 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, assert(!s1.is_s2_format); ret.is_s2_format = false; - ret.guarded = s1.guarded; if (s1.attrs == 0xf0) { tagged = true; @@ -2638,21 +3081,25 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, * MMU disabled. S1 addresses within aa64 translation regimes are * still checked for bounds -- see AArch64.S1DisabledOutput(). */ -static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address, +static bool get_phys_addr_disabled(CPUARMState *env, + S1Translate *ptw, + target_ulong address, MMUAccessType access_type, - ARMMMUIdx mmu_idx, bool is_secure, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { + ARMMMUIdx mmu_idx = ptw->in_mmu_idx; uint8_t memattr = 0x00; /* Device nGnRnE */ - uint8_t shareability = 0; /* non-sharable */ + uint8_t shareability = 0; /* non-shareable */ int r_el; switch (mmu_idx) { case ARMMMUIdx_Stage2: case ARMMMUIdx_Stage2_S: - case ARMMMUIdx_Phys_NS: case ARMMMUIdx_Phys_S: + case ARMMMUIdx_Phys_NS: + case ARMMMUIdx_Phys_Root: + case ARMMMUIdx_Phys_Realm: break; default: @@ -2687,7 +3134,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address, /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ if (r_el == 1) { - uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure); + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); if (hcr & HCR_DC) { if (hcr & HCR_DCT) { memattr = 0xf0; /* Tagged, Normal, WB, RWA */ @@ -2696,13 +3143,15 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address, } } } - if (memattr == 0 && access_type == MMU_INST_FETCH) { - if (regime_sctlr(env, mmu_idx) & SCTLR_I) { - memattr = 0xee; /* Normal, WT, RA, NT */ - } else { - memattr = 0x44; /* Normal, NC, No */ + if (memattr == 0) { + if (access_type == MMU_INST_FETCH) { + if (regime_sctlr(env, mmu_idx) & SCTLR_I) { + memattr = 0xee; /* Normal, WT, RA, NT */ + } else { + memattr = 0x44; /* Normal, NC, No */ + } } - shareability = 2; /* outer sharable */ + shareability = 2; /* outer shareable */ } result->cacheattrs.is_s2_format = false; break; @@ -2724,13 +3173,13 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, { hwaddr ipa; int s1_prot, s1_lgpgsz; - bool is_secure = ptw->in_secure; - bool ret, ipa_secure, s2walk_secure; + ARMSecuritySpace in_space = ptw->in_space; + bool ret, ipa_secure, s1_guarded; ARMCacheAttrs cacheattrs1; - bool is_el0; + ARMSecuritySpace ipa_space; uint64_t hcr; - ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi); /* If S1 fails, return early. */ if (ret) { @@ -2739,20 +3188,12 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, ipa = result->f.phys_addr; ipa_secure = result->f.attrs.secure; - if (is_secure) { - /* Select TCR based on the NS bit from the S1 walk. */ - s2walk_secure = !(ipa_secure - ? env->cp15.vstcr_el2 & VSTCR_SW - : env->cp15.vtcr_el2 & VTCR_NSW); - } else { - assert(!ipa_secure); - s2walk_secure = false; - } + ipa_space = result->f.attrs.space; - is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; - ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; - ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; - ptw->in_secure = s2walk_secure; + ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; + ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; + ptw->in_space = ipa_space; + ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx); /* * S1 is done, now do S2 translation. @@ -2760,16 +3201,11 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, */ s1_prot = result->f.prot; s1_lgpgsz = result->f.lg_page_size; + s1_guarded = result->f.extra.arm.guarded; cacheattrs1 = result->cacheattrs; memset(result, 0, sizeof(*result)); - if (arm_feature(env, ARM_FEATURE_PMSA)) { - ret = get_phys_addr_pmsav8(env, ipa, access_type, - ptw->in_mmu_idx, is_secure, result, fi); - } else { - ret = get_phys_addr_lpae(env, ptw, ipa, access_type, - is_el0, result, fi); - } + ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi); fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ @@ -2799,7 +3235,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, } /* Combine the S1 and S2 cache attributes. */ - hcr = arm_hcr_el2_eff_secstate(env, is_secure); + hcr = arm_hcr_el2_eff_secstate(env, in_space); if (hcr & HCR_DC) { /* * HCR.DC forces the first stage attributes to @@ -2816,48 +3252,69 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1, result->cacheattrs); + /* No BTI GP information in stage 2, we just use the S1 value */ + result->f.extra.arm.guarded = s1_guarded; + /* * Check if IPA translates to secure or non-secure PA space. * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. */ - result->f.attrs.secure = - (is_secure - && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) - && (ipa_secure - || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); + if (in_space == ARMSS_Secure) { + result->f.attrs.secure = + !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) + && (ipa_secure + || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); + result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); + } return false; } -static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, +static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, target_ulong address, MMUAccessType access_type, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { ARMMMUIdx mmu_idx = ptw->in_mmu_idx; - bool is_secure = ptw->in_secure; ARMMMUIdx s1_mmu_idx; /* - * The page table entries may downgrade secure to non-secure, but - * cannot upgrade an non-secure translation regime's attributes - * to secure. + * The page table entries may downgrade Secure to NonSecure, but + * cannot upgrade a NonSecure translation regime's attributes + * to Secure or Realm. */ - result->f.attrs.secure = is_secure; + result->f.attrs.space = ptw->in_space; + result->f.attrs.secure = arm_space_is_secure(ptw->in_space); switch (mmu_idx) { case ARMMMUIdx_Phys_S: case ARMMMUIdx_Phys_NS: + case ARMMMUIdx_Phys_Root: + case ARMMMUIdx_Phys_Realm: /* Checking Phys early avoids special casing later vs regime_el. */ - return get_phys_addr_disabled(env, address, access_type, mmu_idx, - is_secure, result, fi); + return get_phys_addr_disabled(env, ptw, address, access_type, + result, fi); case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - /* First stage lookup uses second stage for ptw. */ - ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; + /* + * First stage lookup uses second stage for ptw; only + * Secure has both S and NS IPA and starts with Stage2_S. + */ + ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ? + ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; + break; + + case ARMMMUIdx_Stage2: + case ARMMMUIdx_Stage2_S: + /* + * Second stage lookup uses physical for ptw; whether this is S or + * NS may depend on the SW/NSW bits if this is a stage 2 lookup for + * the Secure EL2&0 regime. + */ + ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx); break; case ARMMMUIdx_E10_0: @@ -2876,15 +3333,15 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, */ ptw->in_mmu_idx = mmu_idx = s1_mmu_idx; if (arm_feature(env, ARM_FEATURE_EL2) && - !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) { + !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { return get_phys_addr_twostage(env, ptw, address, access_type, result, fi); } /* fall through */ default: - /* Single stage and second stage uses physical for ptw. */ - ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; + /* Single stage uses physical for ptw. */ + ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space); break; } @@ -2909,16 +3366,16 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ - ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, - is_secure, result, fi); + ret = get_phys_addr_pmsav8(env, ptw, address, access_type, + result, fi); } else if (arm_feature(env, ARM_FEATURE_V7)) { /* PMSAv7 */ - ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, - is_secure, result, fi); + ret = get_phys_addr_pmsav7(env, ptw, address, access_type, + result, fi); } else { /* Pre-v7 MPU */ - ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, - is_secure, result, fi); + ret = get_phys_addr_pmsav5(env, ptw, address, access_type, + result, fi); } qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 " mmu_idx %u -> %s (prot %c%c%c)\n", @@ -2935,14 +3392,13 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, /* Definitely a real MMU, not an MPU */ - if (regime_translation_disabled(env, mmu_idx, is_secure)) { - return get_phys_addr_disabled(env, address, access_type, mmu_idx, - is_secure, result, fi); + if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { + return get_phys_addr_disabled(env, ptw, address, access_type, + result, fi); } if (regime_using_lpae_format(env, mmu_idx)) { - return get_phys_addr_lpae(env, ptw, address, access_type, false, - result, fi); + return get_phys_addr_lpae(env, ptw, address, access_type, result, fi); } else if (arm_feature(env, ARM_FEATURE_V7) || regime_sctlr(env, mmu_idx) & SCTLR_XP) { return get_phys_addr_v6(env, ptw, address, access_type, result, fi); @@ -2951,24 +3407,44 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw, } } -bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool is_secure, GetPhysAddrResult *result, - ARMMMUFaultInfo *fi) +static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, + target_ulong address, + MMUAccessType access_type, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) +{ + if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) { + return true; + } + if (!granule_protection_check(env, result->f.phys_addr, + result->f.attrs.space, fi)) { + fi->type = ARMFault_GPCFOnOutput; + return true; + } + return false; +} + +bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, + MMUAccessType access_type, + ARMMMUIdx mmu_idx, ARMSecuritySpace space, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) { S1Translate ptw = { .in_mmu_idx = mmu_idx, - .in_secure = is_secure, + .in_space = space, }; - return get_phys_addr_with_struct(env, &ptw, address, access_type, - result, fi); + return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi); } bool get_phys_addr(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { - bool is_secure; + S1Translate ptw = { + .in_mmu_idx = mmu_idx, + }; + ARMSecuritySpace ss; switch (mmu_idx) { case ARMMMUIdx_E10_0: @@ -2981,30 +3457,53 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E2: - is_secure = arm_is_secure_below_el3(env); + ss = arm_security_space_below_el3(env); break; case ARMMMUIdx_Stage2: + /* + * For Secure EL2, we need this index to be NonSecure; + * otherwise this will already be NonSecure or Realm. + */ + ss = arm_security_space_below_el3(env); + if (ss == ARMSS_Secure) { + ss = ARMSS_NonSecure; + } + break; case ARMMMUIdx_Phys_NS: case ARMMMUIdx_MPrivNegPri: case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPriv: case ARMMMUIdx_MUser: - is_secure = false; + ss = ARMSS_NonSecure; break; - case ARMMMUIdx_E3: case ARMMMUIdx_Stage2_S: case ARMMMUIdx_Phys_S: case ARMMMUIdx_MSPrivNegPri: case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSPriv: case ARMMMUIdx_MSUser: - is_secure = true; + ss = ARMSS_Secure; + break; + case ARMMMUIdx_E3: + if (arm_feature(env, ARM_FEATURE_AARCH64) && + cpu_isar_feature(aa64_rme, env_archcpu(env))) { + ss = ARMSS_Root; + } else { + ss = ARMSS_Secure; + } + break; + case ARMMMUIdx_Phys_Root: + ss = ARMSS_Root; + break; + case ARMMMUIdx_Phys_Realm: + ss = ARMSS_Realm; break; default: g_assert_not_reached(); } - return get_phys_addr_with_secure(env, address, access_type, mmu_idx, - is_secure, result, fi); + + ptw.in_space = ss; + return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi); } hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, @@ -3012,16 +3511,18 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + ARMSecuritySpace ss = arm_security_space(env); S1Translate ptw = { - .in_mmu_idx = arm_mmu_idx(env), - .in_secure = arm_is_secure(env), + .in_mmu_idx = mmu_idx, + .in_space = ss, .in_debug = true, }; GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; bool ret; - ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi); + ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi); *attrs = res.f.attrs; if (ret) {