2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "qemu/range.h"
12 #include "qemu/main-loop.h"
13 #include "exec/exec-all.h"
15 #include "internals.h"
16 #include "cpu-features.h"
19 # include "tcg/oversized-guest.h"
22 typedef struct S1Translate
{
24 * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
25 * Together with in_space, specifies the architectural translation regime.
29 * in_ptw_idx: specifies which mmuidx to use for the actual
30 * page table descriptor load operations. This will be one of the
31 * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
32 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
33 * this field is updated accordingly.
37 * in_space: the security space for this walk. This plus
38 * the in_mmu_idx specify the architectural translation regime.
39 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
40 * this field is updated accordingly.
42 * Note that the security space for the in_ptw_idx may be different
43 * from that for the in_mmu_idx. We do not need to explicitly track
44 * the in_ptw_idx security space because:
45 * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
46 * itself specifies the security space
47 * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
48 * space used for ptw reads is the same as that of the security
49 * space of the stage 1 translation for all cases except where
50 * stage 1 is Secure; in that case the only possibilities for
51 * the ptw read are Secure and NonSecure, and the in_ptw_idx
52 * value being Stage2 vs Stage2_S distinguishes those.
54 ARMSecuritySpace in_space
;
56 * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
57 * accesses will not update the guest page table access flags
58 * and will not change the state of the softmmu TLBs.
62 * If this is stage 2 of a stage 1+2 page table walk, then this must
63 * be true if stage 1 is an EL0 access; otherwise this is ignored.
64 * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
69 ARMSecuritySpace out_space
;
75 static bool get_phys_addr_nogpc(CPUARMState
*env
, S1Translate
*ptw
,
77 MMUAccessType access_type
,
78 GetPhysAddrResult
*result
,
81 static bool get_phys_addr_gpc(CPUARMState
*env
, S1Translate
*ptw
,
83 MMUAccessType access_type
,
84 GetPhysAddrResult
*result
,
87 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
88 static const uint8_t pamax_map
[] = {
99 * The cpu-specific constant value of PAMax; also used by hw/arm/virt.
100 * Note that machvirt_init calls this on a CPU that is inited but not realized!
102 unsigned int arm_pamax(ARMCPU
*cpu
)
104 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
105 unsigned int parange
=
106 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
109 * id_aa64mmfr0 is a read-only register so values outside of the
110 * supported mappings can be considered an implementation error.
112 assert(parange
< ARRAY_SIZE(pamax_map
));
113 return pamax_map
[parange
];
116 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
117 /* v7 or v8 with LPAE */
125 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
127 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
130 case ARMMMUIdx_E10_0
:
131 return ARMMMUIdx_Stage1_E0
;
132 case ARMMMUIdx_E10_1
:
133 return ARMMMUIdx_Stage1_E1
;
134 case ARMMMUIdx_E10_1_PAN
:
135 return ARMMMUIdx_Stage1_E1_PAN
;
141 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
143 return stage_1_mmu_idx(arm_mmu_idx(env
));
147 * Return where we should do ptw loads from for a stage 2 walk.
148 * This depends on whether the address we are looking up is a
149 * Secure IPA or a NonSecure IPA, which we know from whether this is
150 * Stage2 or Stage2_S.
151 * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
153 static ARMMMUIdx
ptw_idx_for_stage_2(CPUARMState
*env
, ARMMMUIdx stage2idx
)
158 * We're OK to check the current state of the CPU here because
159 * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
161 * (2) there's no way to do a lookup that cares about Stage 2 for a
162 * different security state to the current one for AArch64, and AArch32
163 * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
164 * an NS stage 1+2 lookup while the NS bit is 0.)
166 if (!arm_el_is_aa64(env
, 3)) {
167 return ARMMMUIdx_Phys_NS
;
170 switch (arm_security_space_below_el3(env
)) {
171 case ARMSS_NonSecure
:
172 return ARMMMUIdx_Phys_NS
;
174 return ARMMMUIdx_Phys_Realm
;
176 if (stage2idx
== ARMMMUIdx_Stage2_S
) {
177 s2walk_secure
= !(env
->cp15
.vstcr_el2
& VSTCR_SW
);
179 s2walk_secure
= !(env
->cp15
.vtcr_el2
& VTCR_NSW
);
181 return s2walk_secure
? ARMMMUIdx_Phys_S
: ARMMMUIdx_Phys_NS
;
183 g_assert_not_reached();
187 static bool regime_translation_big_endian(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
189 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
192 /* Return the TTBR associated with this translation regime */
193 static uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ttbrn
)
195 if (mmu_idx
== ARMMMUIdx_Stage2
) {
196 return env
->cp15
.vttbr_el2
;
198 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
199 return env
->cp15
.vsttbr_el2
;
202 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
204 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
208 /* Return true if the specified stage of address translation is disabled */
209 static bool regime_translation_disabled(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
210 ARMSecuritySpace space
)
214 if (arm_feature(env
, ARM_FEATURE_M
)) {
215 bool is_secure
= arm_space_is_secure(space
);
216 switch (env
->v7m
.mpu_ctrl
[is_secure
] &
217 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
218 case R_V7M_MPU_CTRL_ENABLE_MASK
:
219 /* Enabled, but not for HardFault and NMI */
220 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
221 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
222 /* Enabled for all cases */
227 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
228 * we warned about that in armv7m_nvic.c when the guest set it.
236 case ARMMMUIdx_Stage2
:
237 case ARMMMUIdx_Stage2_S
:
238 /* HCR.DC means HCR.VM behaves as 1 */
239 hcr_el2
= arm_hcr_el2_eff_secstate(env
, space
);
240 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
242 case ARMMMUIdx_E10_0
:
243 case ARMMMUIdx_E10_1
:
244 case ARMMMUIdx_E10_1_PAN
:
245 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
246 hcr_el2
= arm_hcr_el2_eff_secstate(env
, space
);
247 if (hcr_el2
& HCR_TGE
) {
252 case ARMMMUIdx_Stage1_E0
:
253 case ARMMMUIdx_Stage1_E1
:
254 case ARMMMUIdx_Stage1_E1_PAN
:
255 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
256 hcr_el2
= arm_hcr_el2_eff_secstate(env
, space
);
257 if (hcr_el2
& HCR_DC
) {
262 case ARMMMUIdx_E20_0
:
263 case ARMMMUIdx_E20_2
:
264 case ARMMMUIdx_E20_2_PAN
:
269 case ARMMMUIdx_Phys_S
:
270 case ARMMMUIdx_Phys_NS
:
271 case ARMMMUIdx_Phys_Root
:
272 case ARMMMUIdx_Phys_Realm
:
273 /* No translation for physical address spaces. */
277 g_assert_not_reached();
280 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
283 static bool granule_protection_check(CPUARMState
*env
, uint64_t paddress
,
284 ARMSecuritySpace pspace
,
291 ARMCPU
*cpu
= env_archcpu(env
);
292 uint64_t gpccr
= env
->cp15
.gpccr_el3
;
293 unsigned pps
, pgs
, l0gptsz
, level
= 0;
294 uint64_t tableaddr
, pps_mask
, align
, entry
, index
;
299 if (!FIELD_EX64(gpccr
, GPCCR
, GPC
)) {
304 * GPC Priority 1 (R_GMGRR):
305 * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
306 * the access fails as GPT walk fault at level 0.
310 * Configuration of PPS to a value exceeding the implemented
311 * physical address size is invalid.
313 pps
= FIELD_EX64(gpccr
, GPCCR
, PPS
);
314 if (pps
> FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
)) {
317 pps
= pamax_map
[pps
];
318 pps_mask
= MAKE_64BIT_MASK(0, pps
);
320 switch (FIELD_EX64(gpccr
, GPCCR
, SH
)) {
321 case 0b10: /* outer shareable */
323 case 0b00: /* non-shareable */
324 case 0b11: /* inner shareable */
325 /* Inner and Outer non-cacheable requires Outer shareable. */
326 if (FIELD_EX64(gpccr
, GPCCR
, ORGN
) == 0 &&
327 FIELD_EX64(gpccr
, GPCCR
, IRGN
) == 0) {
331 default: /* reserved */
335 switch (FIELD_EX64(gpccr
, GPCCR
, PGS
)) {
339 case 0b01: /* 64KB */
342 case 0b10: /* 16KB */
345 default: /* reserved */
349 /* Note this field is read-only and fixed at reset. */
350 l0gptsz
= 30 + FIELD_EX64(gpccr
, GPCCR
, L0GPTSZ
);
353 * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
354 * R_CPDSB: A NonSecure physical address input exceeding PPS
355 * does not experience any fault.
357 if (paddress
& ~pps_mask
) {
358 if (pspace
== ARMSS_NonSecure
) {
364 /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
365 tableaddr
= env
->cp15
.gptbr_el3
<< 12;
366 if (tableaddr
& ~pps_mask
) {
371 * BADDR is aligned per a function of PPS and L0GPTSZ.
372 * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
373 * unlike the RES0 bits of the GPT entries (R_XNKFZ).
375 align
= MAX(pps
- l0gptsz
+ 3, 12);
376 align
= MAKE_64BIT_MASK(0, align
);
379 as
= arm_addressspace(env_cpu(env
), attrs
);
381 /* Level 0 lookup. */
382 index
= extract64(paddress
, l0gptsz
, pps
- l0gptsz
);
383 tableaddr
+= index
* 8;
384 entry
= address_space_ldq_le(as
, tableaddr
, attrs
, &result
);
385 if (result
!= MEMTX_OK
) {
389 switch (extract32(entry
, 0, 4)) {
390 case 1: /* block descriptor */
392 goto fault_walk
; /* RES0 bits not 0 */
394 gpi
= extract32(entry
, 4, 4);
396 case 3: /* table descriptor */
397 tableaddr
= entry
& ~0xf;
398 align
= MAX(l0gptsz
- pgs
- 1, 12);
399 align
= MAKE_64BIT_MASK(0, align
);
400 if (tableaddr
& (~pps_mask
| align
)) {
401 goto fault_walk
; /* RES0 bits not 0 */
404 default: /* invalid */
410 index
= extract64(paddress
, pgs
+ 4, l0gptsz
- pgs
- 4);
411 tableaddr
+= index
* 8;
412 entry
= address_space_ldq_le(as
, tableaddr
, attrs
, &result
);
413 if (result
!= MEMTX_OK
) {
417 switch (extract32(entry
, 0, 4)) {
418 case 1: /* contiguous descriptor */
420 goto fault_walk
; /* RES0 bits not 0 */
423 * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
424 * and because we cannot invalidate by pa, and thus will always
425 * flush entire tlbs, we don't actually care about the range here
426 * and can simply extract the GPI as the result.
428 if (extract32(entry
, 8, 2) == 0) {
429 goto fault_walk
; /* reserved contig */
431 gpi
= extract32(entry
, 4, 4);
434 index
= extract64(paddress
, pgs
, 4);
435 gpi
= extract64(entry
, index
* 4, 4);
441 case 0b0000: /* no access */
443 case 0b1111: /* all access */
449 if (pspace
== (gpi
& 3)) {
454 goto fault_walk
; /* reserved */
457 fi
->gpcf
= GPCF_Fail
;
460 fi
->gpcf
= GPCF_EABT
;
463 fi
->gpcf
= GPCF_AddressSize
;
466 fi
->gpcf
= GPCF_Walk
;
469 fi
->paddr
= paddress
;
470 fi
->paddr_space
= pspace
;
474 static bool S2_attrs_are_device(uint64_t hcr
, uint8_t attrs
)
477 * For an S1 page table walk, the stage 1 attributes are always
478 * some form of "this is Normal memory". The combined S1+S2
479 * attributes are therefore only Device if stage 2 specifies Device.
480 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
481 * ie when cacheattrs.attrs bits [3:2] are 0b00.
482 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
483 * when cacheattrs.attrs bit [2] is 0.
486 return (attrs
& 0x4) == 0;
488 return (attrs
& 0xc) == 0;
492 static ARMSecuritySpace
S2_security_space(ARMSecuritySpace s1_space
,
493 ARMMMUIdx s2_mmu_idx
)
496 * Return the security space to use for stage 2 when doing
497 * the S1 page table descriptor load.
499 if (regime_is_stage2(s2_mmu_idx
)) {
501 * The security space for ptw reads is almost always the same
502 * as that of the security space of the stage 1 translation.
503 * The only exception is when stage 1 is Secure; in that case
504 * the ptw read might be to the Secure or the NonSecure space
505 * (but never Realm or Root), and the s2_mmu_idx tells us which.
506 * Root translations are always single-stage.
508 if (s1_space
== ARMSS_Secure
) {
509 return arm_secure_to_space(s2_mmu_idx
== ARMMMUIdx_Stage2_S
);
511 assert(s2_mmu_idx
!= ARMMMUIdx_Stage2_S
);
512 assert(s1_space
!= ARMSS_Root
);
516 /* ptw loads are from phys: the mmu idx itself says which space */
517 return arm_phys_to_space(s2_mmu_idx
);
521 static bool fault_s1ns(ARMSecuritySpace space
, ARMMMUIdx s2_mmu_idx
)
524 * For stage 2 faults in Secure EL22, S1NS indicates
525 * whether the faulting IPA is in the Secure or NonSecure
526 * IPA space. For all other kinds of fault, it is false.
528 return space
== ARMSS_Secure
&& regime_is_stage2(s2_mmu_idx
)
529 && s2_mmu_idx
== ARMMMUIdx_Stage2_S
;
532 /* Translate a S1 pagetable walk through S2 if needed. */
533 static bool S1_ptw_translate(CPUARMState
*env
, S1Translate
*ptw
,
534 hwaddr addr
, ARMMMUFaultInfo
*fi
)
536 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
537 ARMMMUIdx s2_mmu_idx
= ptw
->in_ptw_idx
;
540 ptw
->out_virt
= addr
;
542 if (unlikely(ptw
->in_debug
)) {
544 * From gdbstub, do not use softmmu so that we don't modify the
545 * state of the cpu at all, including softmmu tlb contents.
547 ARMSecuritySpace s2_space
= S2_security_space(ptw
->in_space
, s2_mmu_idx
);
548 S1Translate s2ptw
= {
549 .in_mmu_idx
= s2_mmu_idx
,
550 .in_ptw_idx
= ptw_idx_for_stage_2(env
, s2_mmu_idx
),
551 .in_space
= s2_space
,
554 GetPhysAddrResult s2
= { };
556 if (get_phys_addr_gpc(env
, &s2ptw
, addr
, MMU_DATA_LOAD
, &s2
, fi
)) {
560 ptw
->out_phys
= s2
.f
.phys_addr
;
561 pte_attrs
= s2
.cacheattrs
.attrs
;
562 ptw
->out_host
= NULL
;
564 ptw
->out_space
= s2
.f
.attrs
.space
;
567 CPUTLBEntryFull
*full
;
571 flags
= probe_access_full_mmu(env
, addr
, 0, MMU_DATA_LOAD
,
572 arm_to_core_mmu_idx(s2_mmu_idx
),
573 &ptw
->out_host
, &full
);
576 if (unlikely(flags
& TLB_INVALID_MASK
)) {
579 ptw
->out_phys
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
580 ptw
->out_rw
= full
->prot
& PAGE_WRITE
;
581 pte_attrs
= full
->extra
.arm
.pte_attrs
;
582 ptw
->out_space
= full
->attrs
.space
;
584 g_assert_not_reached();
588 if (regime_is_stage2(s2_mmu_idx
)) {
589 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, ptw
->in_space
);
591 if ((hcr
& HCR_PTW
) && S2_attrs_are_device(hcr
, pte_attrs
)) {
593 * PTW set and S1 walk touched S2 Device memory:
594 * generate Permission fault.
596 fi
->type
= ARMFault_Permission
;
600 fi
->s1ns
= fault_s1ns(ptw
->in_space
, s2_mmu_idx
);
605 ptw
->out_be
= regime_translation_big_endian(env
, mmu_idx
);
609 assert(fi
->type
!= ARMFault_None
);
610 if (fi
->type
== ARMFault_GPCFOnOutput
) {
611 fi
->type
= ARMFault_GPCFOnWalk
;
614 fi
->stage2
= regime_is_stage2(s2_mmu_idx
);
615 fi
->s1ptw
= fi
->stage2
;
616 fi
->s1ns
= fault_s1ns(ptw
->in_space
, s2_mmu_idx
);
620 /* All loads done in the course of a page table walk go through here. */
621 static uint32_t arm_ldl_ptw(CPUARMState
*env
, S1Translate
*ptw
,
624 CPUState
*cs
= env_cpu(env
);
625 void *host
= ptw
->out_host
;
629 /* Page tables are in RAM, and we have the host address. */
630 data
= qatomic_read((uint32_t *)host
);
632 data
= be32_to_cpu(data
);
634 data
= le32_to_cpu(data
);
637 /* Page tables are in MMIO. */
639 .space
= ptw
->out_space
,
640 .secure
= arm_space_is_secure(ptw
->out_space
),
642 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
643 MemTxResult result
= MEMTX_OK
;
646 data
= address_space_ldl_be(as
, ptw
->out_phys
, attrs
, &result
);
648 data
= address_space_ldl_le(as
, ptw
->out_phys
, attrs
, &result
);
650 if (unlikely(result
!= MEMTX_OK
)) {
651 fi
->type
= ARMFault_SyncExternalOnWalk
;
652 fi
->ea
= arm_extabort_type(result
);
659 static uint64_t arm_ldq_ptw(CPUARMState
*env
, S1Translate
*ptw
,
662 CPUState
*cs
= env_cpu(env
);
663 void *host
= ptw
->out_host
;
667 /* Page tables are in RAM, and we have the host address. */
668 #ifdef CONFIG_ATOMIC64
669 data
= qatomic_read__nocheck((uint64_t *)host
);
671 data
= be64_to_cpu(data
);
673 data
= le64_to_cpu(data
);
677 data
= ldq_be_p(host
);
679 data
= ldq_le_p(host
);
683 /* Page tables are in MMIO. */
685 .space
= ptw
->out_space
,
686 .secure
= arm_space_is_secure(ptw
->out_space
),
688 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
689 MemTxResult result
= MEMTX_OK
;
692 data
= address_space_ldq_be(as
, ptw
->out_phys
, attrs
, &result
);
694 data
= address_space_ldq_le(as
, ptw
->out_phys
, attrs
, &result
);
696 if (unlikely(result
!= MEMTX_OK
)) {
697 fi
->type
= ARMFault_SyncExternalOnWalk
;
698 fi
->ea
= arm_extabort_type(result
);
705 static uint64_t arm_casq_ptw(CPUARMState
*env
, uint64_t old_val
,
706 uint64_t new_val
, S1Translate
*ptw
,
709 #if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
711 void *host
= ptw
->out_host
;
713 if (unlikely(!host
)) {
714 fi
->type
= ARMFault_UnsuppAtomicUpdate
;
719 * Raising a stage2 Protection fault for an atomic update to a read-only
720 * page is delayed until it is certain that there is a change to make.
722 if (unlikely(!ptw
->out_rw
)) {
726 flags
= probe_access_full_mmu(env
, ptw
->out_virt
, 0,
728 arm_to_core_mmu_idx(ptw
->in_ptw_idx
),
732 if (unlikely(flags
& TLB_INVALID_MASK
)) {
734 * We know this must be a stage 2 fault because the granule
735 * protection table does not separately track read and write
736 * permission, so all GPC faults are caught in S1_ptw_translate():
737 * we only get here for "readable but not writeable".
739 assert(fi
->type
!= ARMFault_None
);
740 fi
->s2addr
= ptw
->out_virt
;
743 fi
->s1ns
= fault_s1ns(ptw
->in_space
, ptw
->in_ptw_idx
);
747 /* In case CAS mismatches and we loop, remember writability. */
751 #ifdef CONFIG_ATOMIC64
753 old_val
= cpu_to_be64(old_val
);
754 new_val
= cpu_to_be64(new_val
);
755 cur_val
= qatomic_cmpxchg__nocheck((uint64_t *)host
, old_val
, new_val
);
756 cur_val
= be64_to_cpu(cur_val
);
758 old_val
= cpu_to_le64(old_val
);
759 new_val
= cpu_to_le64(new_val
);
760 cur_val
= qatomic_cmpxchg__nocheck((uint64_t *)host
, old_val
, new_val
);
761 cur_val
= le64_to_cpu(cur_val
);
765 * We can't support the full 64-bit atomic cmpxchg on the host.
766 * Because this is only used for FEAT_HAFDBS, which is only for AA64,
767 * we know that TCG_OVERSIZED_GUEST is set, which means that we are
768 * running in round-robin mode and could only race with dma i/o.
770 #if !TCG_OVERSIZED_GUEST
771 # error "Unexpected configuration"
773 bool locked
= bql_locked();
778 cur_val
= ldq_be_p(host
);
779 if (cur_val
== old_val
) {
780 stq_be_p(host
, new_val
);
783 cur_val
= ldq_le_p(host
);
784 if (cur_val
== old_val
) {
785 stq_le_p(host
, new_val
);
795 /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
796 g_assert_not_reached();
800 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
801 uint32_t *table
, uint32_t address
)
803 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
804 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
805 int maskshift
= extract32(tcr
, 0, 3);
806 uint32_t mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
809 if (address
& mask
) {
810 if (tcr
& TTBCR_PD1
) {
811 /* Translation table walk disabled for TTBR1 */
814 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
816 if (tcr
& TTBCR_PD0
) {
817 /* Translation table walk disabled for TTBR0 */
820 base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
821 *table
= regime_ttbr(env
, mmu_idx
, 0) & base_mask
;
823 *table
|= (address
>> 18) & 0x3ffc;
828 * Translate section/page access permissions to page R/W protection flags
830 * @mmu_idx: MMU index indicating required translation regime
831 * @ap: The 3-bit access permissions (AP[2:0])
832 * @domain_prot: The 2-bit domain access permissions
833 * @is_user: TRUE if accessing from PL0
835 static int ap_to_rw_prot_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
836 int ap
, int domain_prot
, bool is_user
)
838 if (domain_prot
== 3) {
839 return PAGE_READ
| PAGE_WRITE
;
844 if (arm_feature(env
, ARM_FEATURE_V7
)) {
847 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
849 return is_user
? 0 : PAGE_READ
;
856 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
861 return PAGE_READ
| PAGE_WRITE
;
864 return PAGE_READ
| PAGE_WRITE
;
865 case 4: /* Reserved. */
868 return is_user
? 0 : PAGE_READ
;
872 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
877 g_assert_not_reached();
882 * Translate section/page access permissions to page R/W protection flags
884 * @mmu_idx: MMU index indicating required translation regime
885 * @ap: The 3-bit access permissions (AP[2:0])
886 * @domain_prot: The 2-bit domain access permissions
888 static int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
889 int ap
, int domain_prot
)
891 return ap_to_rw_prot_is_user(env
, mmu_idx
, ap
, domain_prot
,
892 regime_is_user(env
, mmu_idx
));
896 * Translate section/page access permissions to page R/W protection flags.
897 * @ap: The 2-bit simple AP (AP[2:1])
898 * @is_user: TRUE if accessing from PL0
900 static int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
904 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
906 return PAGE_READ
| PAGE_WRITE
;
908 return is_user
? 0 : PAGE_READ
;
912 g_assert_not_reached();
916 static int simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
918 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
921 static bool get_phys_addr_v5(CPUARMState
*env
, S1Translate
*ptw
,
922 uint32_t address
, MMUAccessType access_type
,
923 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
935 /* Pagetable walk. */
936 /* Lookup l1 descriptor. */
937 if (!get_level1_table_address(env
, ptw
->in_mmu_idx
, &table
, address
)) {
938 /* Section translation fault if page walk is disabled by PD0 or PD1 */
939 fi
->type
= ARMFault_Translation
;
942 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
945 desc
= arm_ldl_ptw(env
, ptw
, fi
);
946 if (fi
->type
!= ARMFault_None
) {
950 domain
= (desc
>> 5) & 0x0f;
951 if (regime_el(env
, ptw
->in_mmu_idx
) == 1) {
952 dacr
= env
->cp15
.dacr_ns
;
954 dacr
= env
->cp15
.dacr_s
;
956 domain_prot
= (dacr
>> (domain
* 2)) & 3;
958 /* Section translation fault. */
959 fi
->type
= ARMFault_Translation
;
965 if (domain_prot
== 0 || domain_prot
== 2) {
966 fi
->type
= ARMFault_Domain
;
971 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
972 ap
= (desc
>> 10) & 3;
973 result
->f
.lg_page_size
= 20; /* 1MB */
975 /* Lookup l2 entry. */
977 /* Coarse pagetable. */
978 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
980 /* Fine pagetable. */
981 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
983 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
986 desc
= arm_ldl_ptw(env
, ptw
, fi
);
987 if (fi
->type
!= ARMFault_None
) {
991 case 0: /* Page translation fault. */
992 fi
->type
= ARMFault_Translation
;
994 case 1: /* 64k page. */
995 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
996 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
997 result
->f
.lg_page_size
= 16;
999 case 2: /* 4k page. */
1000 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
1001 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
1002 result
->f
.lg_page_size
= 12;
1004 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
1006 /* ARMv6/XScale extended small page format */
1007 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
1008 || arm_feature(env
, ARM_FEATURE_V6
)) {
1009 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
1010 result
->f
.lg_page_size
= 12;
1013 * UNPREDICTABLE in ARMv5; we choose to take a
1014 * page translation fault.
1016 fi
->type
= ARMFault_Translation
;
1020 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
1021 result
->f
.lg_page_size
= 10;
1023 ap
= (desc
>> 4) & 3;
1026 /* Never happens, but compiler isn't smart enough to tell. */
1027 g_assert_not_reached();
1030 result
->f
.prot
= ap_to_rw_prot(env
, ptw
->in_mmu_idx
, ap
, domain_prot
);
1031 result
->f
.prot
|= result
->f
.prot
? PAGE_EXEC
: 0;
1032 if (!(result
->f
.prot
& (1 << access_type
))) {
1033 /* Access permission fault. */
1034 fi
->type
= ARMFault_Permission
;
1037 result
->f
.phys_addr
= phys_addr
;
1040 fi
->domain
= domain
;
1045 static bool get_phys_addr_v6(CPUARMState
*env
, S1Translate
*ptw
,
1046 uint32_t address
, MMUAccessType access_type
,
1047 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1049 ARMCPU
*cpu
= env_archcpu(env
);
1050 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
1065 /* Pagetable walk. */
1066 /* Lookup l1 descriptor. */
1067 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
1068 /* Section translation fault if page walk is disabled by PD0 or PD1 */
1069 fi
->type
= ARMFault_Translation
;
1072 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
1075 desc
= arm_ldl_ptw(env
, ptw
, fi
);
1076 if (fi
->type
!= ARMFault_None
) {
1080 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
1081 /* Section translation fault, or attempt to use the encoding
1082 * which is Reserved on implementations without PXN.
1084 fi
->type
= ARMFault_Translation
;
1087 if ((type
== 1) || !(desc
& (1 << 18))) {
1088 /* Page or Section. */
1089 domain
= (desc
>> 5) & 0x0f;
1091 if (regime_el(env
, mmu_idx
) == 1) {
1092 dacr
= env
->cp15
.dacr_ns
;
1094 dacr
= env
->cp15
.dacr_s
;
1099 domain_prot
= (dacr
>> (domain
* 2)) & 3;
1100 if (domain_prot
== 0 || domain_prot
== 2) {
1101 /* Section or Page domain fault */
1102 fi
->type
= ARMFault_Domain
;
1106 if (desc
& (1 << 18)) {
1108 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
1109 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
1110 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
1111 result
->f
.lg_page_size
= 24; /* 16MB */
1114 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
1115 result
->f
.lg_page_size
= 20; /* 1MB */
1117 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
1118 xn
= desc
& (1 << 4);
1120 ns
= extract32(desc
, 19, 1);
1122 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
1123 pxn
= (desc
>> 2) & 1;
1125 ns
= extract32(desc
, 3, 1);
1126 /* Lookup l2 entry. */
1127 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
1128 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
1131 desc
= arm_ldl_ptw(env
, ptw
, fi
);
1132 if (fi
->type
!= ARMFault_None
) {
1135 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
1137 case 0: /* Page translation fault. */
1138 fi
->type
= ARMFault_Translation
;
1140 case 1: /* 64k page. */
1141 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
1142 xn
= desc
& (1 << 15);
1143 result
->f
.lg_page_size
= 16;
1145 case 2: case 3: /* 4k page. */
1146 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
1148 result
->f
.lg_page_size
= 12;
1151 /* Never happens, but compiler isn't smart enough to tell. */
1152 g_assert_not_reached();
1155 if (domain_prot
== 3) {
1156 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1158 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
1161 if (xn
&& access_type
== MMU_INST_FETCH
) {
1162 fi
->type
= ARMFault_Permission
;
1166 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
1167 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
1168 /* The simplified model uses AP[0] as an access control bit. */
1169 if ((ap
& 1) == 0) {
1170 /* Access flag fault. */
1171 fi
->type
= ARMFault_AccessFlag
;
1174 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
1175 user_prot
= simple_ap_to_rw_prot_is_user(ap
>> 1, 1);
1177 result
->f
.prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
1178 user_prot
= ap_to_rw_prot_is_user(env
, mmu_idx
, ap
, domain_prot
, 1);
1180 if (result
->f
.prot
&& !xn
) {
1181 result
->f
.prot
|= PAGE_EXEC
;
1183 if (!(result
->f
.prot
& (1 << access_type
))) {
1184 /* Access permission fault. */
1185 fi
->type
= ARMFault_Permission
;
1188 if (regime_is_pan(env
, mmu_idx
) &&
1189 !regime_is_user(env
, mmu_idx
) &&
1191 access_type
!= MMU_INST_FETCH
) {
1192 /* Privileged Access Never fault */
1193 fi
->type
= ARMFault_Permission
;
1198 /* The NS bit will (as required by the architecture) have no effect if
1199 * the CPU doesn't support TZ or this is a non-secure translation
1200 * regime, because the attribute will already be non-secure.
1202 result
->f
.attrs
.secure
= false;
1203 result
->f
.attrs
.space
= ARMSS_NonSecure
;
1205 result
->f
.phys_addr
= phys_addr
;
1208 fi
->domain
= domain
;
1214 * Translate S2 section/page access permissions to protection flags
1216 * @s2ap: The 2-bit stage2 access permissions (S2AP)
1217 * @xn: XN (execute-never) bits
1218 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
1220 static int get_S2prot_noexecute(int s2ap
)
1233 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
1235 int prot
= get_S2prot_noexecute(s2ap
);
1237 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
1255 g_assert_not_reached();
1258 if (!extract32(xn
, 1, 1)) {
1259 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
1268 * Translate section/page access permissions to protection flags
1270 * @mmu_idx: MMU index indicating required translation regime
1271 * @is_aa64: TRUE if AArch64
1272 * @ap: The 2-bit simple AP (AP[2:1])
1273 * @xn: XN (execute-never) bit
1274 * @pxn: PXN (privileged execute-never) bit
1275 * @in_pa: The original input pa space
1276 * @out_pa: The output pa space, modified by NSTable, NS, and NSE
1278 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
1279 int ap
, int xn
, int pxn
,
1280 ARMSecuritySpace in_pa
, ARMSecuritySpace out_pa
)
1282 ARMCPU
*cpu
= env_archcpu(env
);
1283 bool is_user
= regime_is_user(env
, mmu_idx
);
1284 int prot_rw
, user_rw
;
1288 assert(!regime_is_stage2(mmu_idx
));
1290 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
1295 * PAN controls can forbid data accesses but don't affect insn fetch.
1296 * Plain PAN forbids data accesses if EL0 has data permissions;
1297 * PAN3 forbids data accesses if EL0 has either data or exec perms.
1298 * Note that for AArch64 the 'user can exec' case is exactly !xn.
1299 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1300 * do not affect EPAN.
1302 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
1304 } else if (cpu_isar_feature(aa64_pan3
, cpu
) && is_aa64
&&
1305 regime_is_pan(env
, mmu_idx
) &&
1306 (regime_sctlr(env
, mmu_idx
) & SCTLR_EPAN
) && !xn
) {
1309 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
1313 if (in_pa
!= out_pa
) {
1317 * R_ZWRVD: permission fault for insn fetched from non-Root,
1318 * I_WWBFB: SIF has no effect in EL3.
1323 * R_PKTDS: permission fault for insn fetched from non-Realm,
1324 * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
1325 * happens during any stage2 translation.
1329 case ARMMMUIdx_E20_0
:
1330 case ARMMMUIdx_E20_2
:
1331 case ARMMMUIdx_E20_2_PAN
:
1338 if (env
->cp15
.scr_el3
& SCR_SIF
) {
1343 /* Input NonSecure must have output NonSecure. */
1344 g_assert_not_reached();
1348 /* TODO have_wxn should be replaced with
1349 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1350 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1351 * compatible processors have EL2, which is required for [U]WXN.
1353 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
1356 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
1360 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
1361 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
1363 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1364 switch (regime_el(env
, mmu_idx
)) {
1368 xn
= xn
|| !(user_rw
& PAGE_READ
);
1372 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
1374 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
1375 (uwxn
&& (user_rw
& PAGE_WRITE
));
1385 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
1388 return prot_rw
| PAGE_EXEC
;
1391 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
1394 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1395 uint32_t el
= regime_el(env
, mmu_idx
);
1399 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
1401 if (mmu_idx
== ARMMMUIdx_Stage2
) {
1403 bool sext
= extract32(tcr
, 4, 1);
1404 bool sign
= extract32(tcr
, 3, 1);
1407 * If the sign-extend bit is not the same as t0sz[3], the result
1408 * is unpredictable. Flag this as a guest error.
1411 qemu_log_mask(LOG_GUEST_ERROR
,
1412 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1414 tsz
= sextract32(tcr
, 0, 4) + 8;
1418 } else if (el
== 2) {
1420 tsz
= extract32(tcr
, 0, 3);
1422 hpd
= extract64(tcr
, 24, 1);
1425 int t0sz
= extract32(tcr
, 0, 3);
1426 int t1sz
= extract32(tcr
, 16, 3);
1429 select
= va
> (0xffffffffu
>> t0sz
);
1431 /* Note that we will detect errors later. */
1432 select
= va
>= ~(0xffffffffu
>> t1sz
);
1436 epd
= extract32(tcr
, 7, 1);
1437 hpd
= extract64(tcr
, 41, 1);
1440 epd
= extract32(tcr
, 23, 1);
1441 hpd
= extract64(tcr
, 42, 1);
1443 /* For aarch32, hpd0 is not enabled without t2e as well. */
1444 hpd
&= extract32(tcr
, 6, 1);
1447 return (ARMVAParameters
) {
1456 * check_s2_mmu_setup
1458 * @is_aa64: True if the translation regime is in AArch64 state
1459 * @tcr: VTCR_EL2 or VSTCR_EL2
1460 * @ds: Effective value of TCR.DS.
1461 * @iasize: Bitsize of IPAs
1462 * @stride: Page-table stride (See the ARM ARM)
1464 * Decode the starting level of the S2 lookup, returning INT_MIN if
1465 * the configuration is invalid.
1467 static int check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, uint64_t tcr
,
1468 bool ds
, int iasize
, int stride
)
1470 int sl0
, sl2
, startlevel
, granulebits
, levels
;
1471 int s1_min_iasize
, s1_max_iasize
;
1473 sl0
= extract32(tcr
, 6, 2);
1476 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
1477 * so interleave AArch64.S2StartLevel.
1481 /* SL2 is RES0 unless DS=1 & 4KB granule. */
1482 sl2
= extract64(tcr
, 33, 1);
1489 startlevel
= 2 - sl0
;
1492 if (arm_pamax(cpu
) < 44) {
1497 if (!cpu_isar_feature(aa64_st
, cpu
)) {
1508 if (arm_pamax(cpu
) < 42) {
1518 startlevel
= 3 - sl0
;
1523 if (arm_pamax(cpu
) < 44) {
1530 startlevel
= 3 - sl0
;
1533 g_assert_not_reached();
1537 * Things are simpler for AArch32 EL2, with only 4k pages.
1538 * There is no separate S2InvalidSL function, but AArch32.S2Walk
1539 * begins with walkparms.sl0 in {'1x'}.
1541 assert(stride
== 9);
1545 startlevel
= 2 - sl0
;
1548 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
1549 levels
= 3 - startlevel
;
1550 granulebits
= stride
+ 3;
1552 s1_min_iasize
= levels
* stride
+ granulebits
+ 1;
1553 s1_max_iasize
= s1_min_iasize
+ (stride
- 1) + 4;
1555 if (iasize
>= s1_min_iasize
&& iasize
<= s1_max_iasize
) {
1563 static bool lpae_block_desc_valid(ARMCPU
*cpu
, bool ds
,
1564 ARMGranuleSize gran
, int level
)
1567 * See pseudocode AArch46.BlockDescSupported(): block descriptors
1568 * are not valid at all levels, depending on the page size.
1572 return (level
== 0 && ds
) || level
== 1 || level
== 2;
1574 return (level
== 1 && ds
) || level
== 2;
1576 return (level
== 1 && arm_pamax(cpu
) == 52) || level
== 2;
1578 g_assert_not_reached();
1582 static bool nv_nv1_enabled(CPUARMState
*env
, S1Translate
*ptw
)
1584 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, ptw
->in_space
);
1585 return (hcr
& (HCR_NV
| HCR_NV1
)) == (HCR_NV
| HCR_NV1
);
1589 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1591 * Returns false if the translation was successful. Otherwise, phys_ptr,
1592 * attrs, prot and page_size may not be filled in, and the populated fsr
1593 * value provides information on why the translation aborted, in the format
1594 * of a long-format DFSR/IFSR fault register, with the following caveat:
1595 * the WnR bit is never set (the caller must do this).
1598 * @ptw: Current and next stage parameters for the walk.
1599 * @address: virtual address to get physical address for
1600 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1601 * @result: set on translation success,
1602 * @fi: set to fault info if the translation fails
1604 static bool get_phys_addr_lpae(CPUARMState
*env
, S1Translate
*ptw
,
1606 MMUAccessType access_type
,
1607 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1609 ARMCPU
*cpu
= env_archcpu(env
);
1610 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
1612 ARMVAParameters param
;
1614 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
1615 uint32_t tableattrs
;
1616 target_ulong page_size
;
1619 int addrsize
, inputsize
, outputsize
;
1620 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1622 uint32_t el
= regime_el(env
, mmu_idx
);
1623 uint64_t descaddrmask
;
1624 bool aarch64
= arm_el_is_aa64(env
, el
);
1625 uint64_t descriptor
, new_descriptor
;
1626 ARMSecuritySpace out_space
;
1628 /* TODO: This code does not support shareability levels. */
1632 param
= aa64_va_parameters(env
, address
, mmu_idx
,
1633 access_type
!= MMU_INST_FETCH
,
1634 !arm_el_is_aa64(env
, 1));
1638 * If TxSZ is programmed to a value larger than the maximum,
1639 * or smaller than the effective minimum, it is IMPLEMENTATION
1640 * DEFINED whether we behave as if the field were programmed
1641 * within bounds, or if a level 0 Translation fault is generated.
1643 * With FEAT_LVA, fault on less than minimum becomes required,
1644 * so our choice is to always raise the fault.
1646 if (param
.tsz_oob
) {
1647 goto do_translation_fault
;
1650 addrsize
= 64 - 8 * param
.tbi
;
1651 inputsize
= 64 - param
.tsz
;
1654 * Bound PS by PARANGE to find the effective output address size.
1655 * ID_AA64MMFR0 is a read-only register so values outside of the
1656 * supported mappings can be considered an implementation error.
1658 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
1659 ps
= MIN(ps
, param
.ps
);
1660 assert(ps
< ARRAY_SIZE(pamax_map
));
1661 outputsize
= pamax_map
[ps
];
1664 * With LPA2, the effective output address (OA) size is at most 48 bits
1665 * unless TCR.DS == 1
1667 if (!param
.ds
&& param
.gran
!= Gran64K
) {
1668 outputsize
= MIN(outputsize
, 48);
1671 param
= aa32_va_parameters(env
, address
, mmu_idx
);
1673 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
1674 inputsize
= addrsize
- param
.tsz
;
1679 * We determined the region when collecting the parameters, but we
1680 * have not yet validated that the address is valid for the region.
1681 * Extract the top bits and verify that they all match select.
1683 * For aa32, if inputsize == addrsize, then we have selected the
1684 * region by exclusion in aa32_va_parameters and there is no more
1685 * validation to do here.
1687 if (inputsize
< addrsize
) {
1688 target_ulong top_bits
= sextract64(address
, inputsize
,
1689 addrsize
- inputsize
);
1690 if (-top_bits
!= param
.select
) {
1691 /* The gap between the two regions is a Translation fault */
1692 goto do_translation_fault
;
1696 stride
= arm_granule_bits(param
.gran
) - 3;
1699 * Note that QEMU ignores shareability and cacheability attributes,
1700 * so we don't need to do anything with the SH, ORGN, IRGN fields
1701 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1702 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1703 * implement any ASID-like capability so we can ignore it (instead
1704 * we will always flush the TLB any time the ASID is changed).
1706 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
1709 * Here we should have set up all the parameters for the translation:
1710 * inputsize, ttbr, epd, stride, tbi
1715 * Translation table walk disabled => Translation fault on TLB miss
1716 * Note: This is always 0 on 64-bit EL2 and EL3.
1718 goto do_translation_fault
;
1721 if (!regime_is_stage2(mmu_idx
)) {
1723 * The starting level depends on the virtual address size (which can
1724 * be up to 48 bits) and the translation granule size. It indicates
1725 * the number of strides (stride bits at a time) needed to
1726 * consume the bits of the input address. In the pseudocode this is:
1727 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1728 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1729 * our 'stride + 3' and 'stride' is our 'stride'.
1730 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1731 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1732 * = 4 - (inputsize - 4) / stride;
1734 level
= 4 - (inputsize
- 4) / stride
;
1736 int startlevel
= check_s2_mmu_setup(cpu
, aarch64
, tcr
, param
.ds
,
1738 if (startlevel
== INT_MIN
) {
1740 goto do_translation_fault
;
1745 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
1746 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
1748 /* Now we can extract the actual base address from the TTBR */
1749 descaddr
= extract64(ttbr
, 0, 48);
1752 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1754 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1755 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1756 * but we've just cleared the bits above 47, so simplify the test.
1758 if (outputsize
> 48) {
1759 descaddr
|= extract64(ttbr
, 2, 4) << 48;
1760 } else if (descaddr
>> outputsize
) {
1762 fi
->type
= ARMFault_AddressSize
;
1767 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1768 * and also to mask out CnP (bit 0) which could validly be non-zero.
1770 descaddr
&= ~indexmask
;
1773 * For AArch32, the address field in the descriptor goes up to bit 39
1774 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1775 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1776 * bits as part of the address, which will be checked via outputsize.
1777 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1778 * the highest bits of a 52-bit output are placed elsewhere.
1781 descaddrmask
= MAKE_64BIT_MASK(0, 50);
1782 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
1783 descaddrmask
= MAKE_64BIT_MASK(0, 48);
1785 descaddrmask
= MAKE_64BIT_MASK(0, 40);
1787 descaddrmask
&= ~indexmask_grainsize
;
1791 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
1795 * Process the NSTable bit from the previous level. This changes
1796 * the table address space and the output space from Secure to
1797 * NonSecure. With RME, the EL3 translation regime does not change
1798 * from Root to NonSecure.
1800 if (ptw
->in_space
== ARMSS_Secure
1801 && !regime_is_stage2(mmu_idx
)
1802 && extract32(tableattrs
, 4, 1)) {
1804 * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1805 * Assert the relative order of the secure/non-secure indexes.
1807 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S
+ 1 != ARMMMUIdx_Phys_NS
);
1808 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S
+ 1 != ARMMMUIdx_Stage2
);
1809 ptw
->in_ptw_idx
+= 1;
1810 ptw
->in_space
= ARMSS_NonSecure
;
1813 if (!S1_ptw_translate(env
, ptw
, descaddr
, fi
)) {
1816 descriptor
= arm_ldq_ptw(env
, ptw
, fi
);
1817 if (fi
->type
!= ARMFault_None
) {
1820 new_descriptor
= descriptor
;
1822 restart_atomic_update
:
1823 if (!(descriptor
& 1) ||
1824 (!(descriptor
& 2) &&
1825 !lpae_block_desc_valid(cpu
, param
.ds
, param
.gran
, level
))) {
1826 /* Invalid, or a block descriptor at an invalid level */
1827 goto do_translation_fault
;
1830 descaddr
= descriptor
& descaddrmask
;
1833 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1834 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1835 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1836 * raise AddressSizeFault.
1838 if (outputsize
> 48) {
1840 descaddr
|= extract64(descriptor
, 8, 2) << 50;
1842 descaddr
|= extract64(descriptor
, 12, 4) << 48;
1844 } else if (descaddr
>> outputsize
) {
1845 fi
->type
= ARMFault_AddressSize
;
1849 if ((descriptor
& 2) && (level
< 3)) {
1851 * Table entry. The top five bits are attributes which may
1852 * propagate down through lower levels of the table (and
1853 * which are all arranged so that 0 means "no effect", so
1854 * we can gather them up by ORing in the bits at each level).
1856 tableattrs
|= extract64(descriptor
, 59, 5);
1858 indexmask
= indexmask_grainsize
;
1863 * Block entry at level 1 or 2, or page entry at level 3.
1864 * These are basically the same thing, although the number
1865 * of bits we pull in from the vaddr varies. Note that although
1866 * descaddrmask masks enough of the low bits of the descriptor
1867 * to give a correct page or table address, the address field
1868 * in a block descriptor is smaller; so we need to explicitly
1869 * clear the lower bits here before ORing in the low vaddr bits.
1871 * Afterward, descaddr is the final physical address.
1873 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
1874 descaddr
&= ~(hwaddr
)(page_size
- 1);
1875 descaddr
|= (address
& (page_size
- 1));
1877 if (likely(!ptw
->in_debug
)) {
1880 * If HA is enabled, prepare to update the descriptor below.
1881 * Otherwise, pass the access fault on to software.
1883 if (!(descriptor
& (1 << 10))) {
1885 new_descriptor
|= 1 << 10; /* AF */
1887 fi
->type
= ARMFault_AccessFlag
;
1894 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1895 * bit for writeback. The actual write protection test may still be
1896 * overridden by tableattrs, to be merged below.
1899 && extract64(descriptor
, 51, 1) /* DBM */
1900 && access_type
== MMU_DATA_STORE
) {
1901 if (regime_is_stage2(mmu_idx
)) {
1902 new_descriptor
|= 1ull << 7; /* set S2AP[1] */
1904 new_descriptor
&= ~(1ull << 7); /* clear AP[2] */
1910 * Extract attributes from the (modified) descriptor, and apply
1911 * table descriptors. Stage 2 table descriptors do not include
1912 * any attribute fields. HPD disables all the table attributes
1913 * except NSTable (which we have already handled).
1915 attrs
= new_descriptor
& (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1916 if (!regime_is_stage2(mmu_idx
)) {
1918 attrs
|= extract64(tableattrs
, 0, 2) << 53; /* XN, PXN */
1920 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1921 * means "force PL1 access only", which means forcing AP[1] to 0.
1923 attrs
&= ~(extract64(tableattrs
, 2, 1) << 6); /* !APT[0] => AP[1] */
1924 attrs
|= extract32(tableattrs
, 3, 1) << 7; /* APT[1] => AP[2] */
1928 ap
= extract32(attrs
, 6, 2);
1929 out_space
= ptw
->in_space
;
1930 if (regime_is_stage2(mmu_idx
)) {
1932 * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
1933 * The bit remains ignored for other security states.
1934 * R_YMCSL: Executing an insn fetched from non-Realm causes
1935 * a stage2 permission fault.
1937 if (out_space
== ARMSS_Realm
&& extract64(attrs
, 55, 1)) {
1938 out_space
= ARMSS_NonSecure
;
1939 result
->f
.prot
= get_S2prot_noexecute(ap
);
1941 xn
= extract64(attrs
, 53, 2);
1942 result
->f
.prot
= get_S2prot(env
, ap
, xn
, ptw
->in_s1_is_el0
);
1945 int nse
, ns
= extract32(attrs
, 5, 1);
1946 switch (out_space
) {
1949 * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
1950 * R_XTYPW: NSE and NS together select the output pa space.
1952 nse
= extract32(attrs
, 11, 1);
1953 out_space
= (nse
<< 1) | ns
;
1954 if (out_space
== ARMSS_Secure
&&
1955 !cpu_isar_feature(aa64_sel2
, cpu
)) {
1956 out_space
= ARMSS_NonSecure
;
1961 out_space
= ARMSS_NonSecure
;
1966 case ARMMMUIdx_Stage1_E0
:
1967 case ARMMMUIdx_Stage1_E1
:
1968 case ARMMMUIdx_Stage1_E1_PAN
:
1969 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
1972 case ARMMMUIdx_E20_0
:
1973 case ARMMMUIdx_E20_2
:
1974 case ARMMMUIdx_E20_2_PAN
:
1976 * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
1977 * NS changes the output to non-secure space.
1980 out_space
= ARMSS_NonSecure
;
1984 g_assert_not_reached();
1987 case ARMSS_NonSecure
:
1988 /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
1991 g_assert_not_reached();
1993 xn
= extract64(attrs
, 54, 1);
1994 pxn
= extract64(attrs
, 53, 1);
1996 if (el
== 1 && nv_nv1_enabled(env
, ptw
)) {
1998 * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
1999 * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
2000 * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
2001 * (which we have already folded into bits 53 and 54 of attrs).
2002 * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
2003 * Similarly, APTable[0] from the table descriptor is treated as 0;
2004 * we already folded this into AP[1] and squashing that to 0 does
2012 * Note that we modified ptw->in_space earlier for NSTable, but
2013 * result->f.attrs retains a copy of the original security space.
2015 result
->f
.prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, xn
, pxn
,
2016 result
->f
.attrs
.space
, out_space
);
2019 if (!(result
->f
.prot
& (1 << access_type
))) {
2020 fi
->type
= ARMFault_Permission
;
2024 /* If FEAT_HAFDBS has made changes, update the PTE. */
2025 if (new_descriptor
!= descriptor
) {
2026 new_descriptor
= arm_casq_ptw(env
, descriptor
, new_descriptor
, ptw
, fi
);
2027 if (fi
->type
!= ARMFault_None
) {
2031 * I_YZSVV says that if the in-memory descriptor has changed,
2032 * then we must use the information in that new value
2033 * (which might include a different output address, different
2034 * attributes, or generate a fault).
2035 * Restart the handling of the descriptor value from scratch.
2037 if (new_descriptor
!= descriptor
) {
2038 descriptor
= new_descriptor
;
2039 goto restart_atomic_update
;
2043 result
->f
.attrs
.space
= out_space
;
2044 result
->f
.attrs
.secure
= arm_space_is_secure(out_space
);
2046 if (regime_is_stage2(mmu_idx
)) {
2047 result
->cacheattrs
.is_s2_format
= true;
2048 result
->cacheattrs
.attrs
= extract32(attrs
, 2, 4);
2050 /* Index into MAIR registers for cache attributes */
2051 uint8_t attrindx
= extract32(attrs
, 2, 3);
2052 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
2053 assert(attrindx
<= 7);
2054 result
->cacheattrs
.is_s2_format
= false;
2055 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
2057 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
2058 if (aarch64
&& cpu_isar_feature(aa64_bti
, cpu
)) {
2059 result
->f
.extra
.arm
.guarded
= extract64(attrs
, 50, 1); /* GP */
2064 * For FEAT_LPA2 and effective DS, the SH field in the attributes
2065 * was re-purposed for output address bits. The SH attribute in
2066 * that case comes from TCR_ELx, which we extracted earlier.
2069 result
->cacheattrs
.shareability
= param
.sh
;
2071 result
->cacheattrs
.shareability
= extract32(attrs
, 8, 2);
2074 result
->f
.phys_addr
= descaddr
;
2075 result
->f
.lg_page_size
= ctz64(page_size
);
2078 do_translation_fault
:
2079 fi
->type
= ARMFault_Translation
;
2082 /* Retain the existing stage 2 fi->level */
2086 fi
->stage2
= regime_is_stage2(mmu_idx
);
2088 fi
->s1ns
= fault_s1ns(ptw
->in_space
, mmu_idx
);
2092 static bool get_phys_addr_pmsav5(CPUARMState
*env
,
2095 MMUAccessType access_type
,
2096 GetPhysAddrResult
*result
,
2097 ARMMMUFaultInfo
*fi
)
2102 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2103 bool is_user
= regime_is_user(env
, mmu_idx
);
2105 if (regime_translation_disabled(env
, mmu_idx
, ptw
->in_space
)) {
2107 result
->f
.phys_addr
= address
;
2108 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2112 result
->f
.phys_addr
= address
;
2113 for (n
= 7; n
>= 0; n
--) {
2114 base
= env
->cp15
.c6_region
[n
];
2115 if ((base
& 1) == 0) {
2118 mask
= 1 << ((base
>> 1) & 0x1f);
2119 /* Keep this shift separate from the above to avoid an
2120 (undefined) << 32. */
2121 mask
= (mask
<< 1) - 1;
2122 if (((base
^ address
) & ~mask
) == 0) {
2127 fi
->type
= ARMFault_Background
;
2131 if (access_type
== MMU_INST_FETCH
) {
2132 mask
= env
->cp15
.pmsav5_insn_ap
;
2134 mask
= env
->cp15
.pmsav5_data_ap
;
2136 mask
= (mask
>> (n
* 4)) & 0xf;
2139 fi
->type
= ARMFault_Permission
;
2144 fi
->type
= ARMFault_Permission
;
2148 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
2151 result
->f
.prot
= PAGE_READ
;
2153 result
->f
.prot
|= PAGE_WRITE
;
2157 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
2161 fi
->type
= ARMFault_Permission
;
2165 result
->f
.prot
= PAGE_READ
;
2168 result
->f
.prot
= PAGE_READ
;
2171 /* Bad permission. */
2172 fi
->type
= ARMFault_Permission
;
2176 result
->f
.prot
|= PAGE_EXEC
;
2180 static void get_phys_addr_pmsav7_default(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2181 int32_t address
, uint8_t *prot
)
2183 if (!arm_feature(env
, ARM_FEATURE_M
)) {
2184 *prot
= PAGE_READ
| PAGE_WRITE
;
2186 case 0xF0000000 ... 0xFFFFFFFF:
2187 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
2188 /* hivecs execing is ok */
2192 case 0x00000000 ... 0x7FFFFFFF:
2197 /* Default system address map for M profile cores.
2198 * The architecture specifies which regions are execute-never;
2199 * at the MPU level no other checks are defined.
2202 case 0x00000000 ... 0x1fffffff: /* ROM */
2203 case 0x20000000 ... 0x3fffffff: /* SRAM */
2204 case 0x60000000 ... 0x7fffffff: /* RAM */
2205 case 0x80000000 ... 0x9fffffff: /* RAM */
2206 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2208 case 0x40000000 ... 0x5fffffff: /* Peripheral */
2209 case 0xa0000000 ... 0xbfffffff: /* Device */
2210 case 0xc0000000 ... 0xdfffffff: /* Device */
2211 case 0xe0000000 ... 0xffffffff: /* System */
2212 *prot
= PAGE_READ
| PAGE_WRITE
;
2215 g_assert_not_reached();
2220 static bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
2222 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
2223 return arm_feature(env
, ARM_FEATURE_M
) &&
2224 extract32(address
, 20, 12) == 0xe00;
2227 static bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
2230 * True if address is in the M profile system region
2231 * 0xe0000000 - 0xffffffff
2233 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
2236 static bool pmsav7_use_background_region(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
2237 bool is_secure
, bool is_user
)
2240 * Return true if we should use the default memory map as a
2241 * "background" region if there are no hits against any MPU regions.
2243 CPUARMState
*env
= &cpu
->env
;
2249 if (arm_feature(env
, ARM_FEATURE_M
)) {
2250 return env
->v7m
.mpu_ctrl
[is_secure
] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
2253 if (mmu_idx
== ARMMMUIdx_Stage2
) {
2257 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
2260 static bool get_phys_addr_pmsav7(CPUARMState
*env
,
2263 MMUAccessType access_type
,
2264 GetPhysAddrResult
*result
,
2265 ARMMMUFaultInfo
*fi
)
2267 ARMCPU
*cpu
= env_archcpu(env
);
2269 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2270 bool is_user
= regime_is_user(env
, mmu_idx
);
2271 bool secure
= arm_space_is_secure(ptw
->in_space
);
2273 result
->f
.phys_addr
= address
;
2274 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2277 if (regime_translation_disabled(env
, mmu_idx
, ptw
->in_space
) ||
2278 m_is_ppb_region(env
, address
)) {
2280 * MPU disabled or M profile PPB access: use default memory map.
2281 * The other case which uses the default memory map in the
2282 * v7M ARM ARM pseudocode is exception vector reads from the vector
2283 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
2284 * which always does a direct read using address_space_ldl(), rather
2285 * than going via this function, so we don't need to check that here.
2287 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
2288 } else { /* MPU enabled */
2289 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
2291 uint32_t base
= env
->pmsav7
.drbar
[n
];
2292 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
2296 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
2301 qemu_log_mask(LOG_GUEST_ERROR
,
2302 "DRSR[%d]: Rsize field cannot be 0\n", n
);
2306 rmask
= (1ull << rsize
) - 1;
2309 qemu_log_mask(LOG_GUEST_ERROR
,
2310 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
2311 "to DRSR region size, mask = 0x%" PRIx32
"\n",
2316 if (address
< base
|| address
> base
+ rmask
) {
2318 * Address not in this region. We must check whether the
2319 * region covers addresses in the same page as our address.
2320 * In that case we must not report a size that covers the
2321 * whole page for a subsequent hit against a different MPU
2322 * region or the background region, because it would result in
2323 * incorrect TLB hits for subsequent accesses to addresses that
2324 * are in this MPU region.
2326 if (ranges_overlap(base
, rmask
,
2327 address
& TARGET_PAGE_MASK
,
2328 TARGET_PAGE_SIZE
)) {
2329 result
->f
.lg_page_size
= 0;
2334 /* Region matched */
2336 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
2338 uint32_t srdis_mask
;
2340 rsize
-= 3; /* sub region size (power of 2) */
2341 snd
= ((address
- base
) >> rsize
) & 0x7;
2342 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
2344 srdis_mask
= srdis
? 0x3 : 0x0;
2345 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
2347 * This will check in groups of 2, 4 and then 8, whether
2348 * the subregion bits are consistent. rsize is incremented
2349 * back up to give the region size, considering consistent
2350 * adjacent subregions as one region. Stop testing if rsize
2351 * is already big enough for an entire QEMU page.
2353 int snd_rounded
= snd
& ~(i
- 1);
2354 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
2355 snd_rounded
+ 8, i
);
2356 if (srdis_mask
^ srdis_multi
) {
2359 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
2366 if (rsize
< TARGET_PAGE_BITS
) {
2367 result
->f
.lg_page_size
= rsize
;
2372 if (n
== -1) { /* no hits */
2373 if (!pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
2374 /* background fault */
2375 fi
->type
= ARMFault_Background
;
2378 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
,
2380 } else { /* a MPU hit! */
2381 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
2382 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
2384 if (m_is_system_region(env
, address
)) {
2385 /* System space is always execute never */
2389 if (is_user
) { /* User mode AP bit decoding */
2394 break; /* no access */
2396 result
->f
.prot
|= PAGE_WRITE
;
2400 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2403 /* for v7M, same as 6; for R profile a reserved value */
2404 if (arm_feature(env
, ARM_FEATURE_M
)) {
2405 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2410 qemu_log_mask(LOG_GUEST_ERROR
,
2411 "DRACR[%d]: Bad value for AP bits: 0x%"
2412 PRIx32
"\n", n
, ap
);
2414 } else { /* Priv. mode AP bits decoding */
2417 break; /* no access */
2421 result
->f
.prot
|= PAGE_WRITE
;
2425 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2428 /* for v7M, same as 6; for R profile a reserved value */
2429 if (arm_feature(env
, ARM_FEATURE_M
)) {
2430 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2435 qemu_log_mask(LOG_GUEST_ERROR
,
2436 "DRACR[%d]: Bad value for AP bits: 0x%"
2437 PRIx32
"\n", n
, ap
);
2443 result
->f
.prot
&= ~PAGE_EXEC
;
2448 fi
->type
= ARMFault_Permission
;
2450 return !(result
->f
.prot
& (1 << access_type
));
2453 static uint32_t *regime_rbar(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2456 if (regime_el(env
, mmu_idx
) == 2) {
2457 return env
->pmsav8
.hprbar
;
2459 return env
->pmsav8
.rbar
[secure
];
2463 static uint32_t *regime_rlar(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2466 if (regime_el(env
, mmu_idx
) == 2) {
2467 return env
->pmsav8
.hprlar
;
2469 return env
->pmsav8
.rlar
[secure
];
2473 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
2474 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2475 bool secure
, GetPhysAddrResult
*result
,
2476 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
2479 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2480 * that a full phys-to-virt translation does).
2481 * mregion is (if not NULL) set to the region number which matched,
2482 * or -1 if no region number is returned (MPU off, address did not
2483 * hit a region, address hit in multiple regions).
2484 * If the region hit doesn't cover the entire TARGET_PAGE the address
2485 * is within, then we set the result page_size to 1 to force the
2486 * memory system to use a subpage.
2488 ARMCPU
*cpu
= env_archcpu(env
);
2489 bool is_user
= regime_is_user(env
, mmu_idx
);
2491 int matchregion
= -1;
2493 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
2494 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
2497 if (regime_el(env
, mmu_idx
) == 2) {
2498 region_counter
= cpu
->pmsav8r_hdregion
;
2500 region_counter
= cpu
->pmsav7_dregion
;
2503 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2504 result
->f
.phys_addr
= address
;
2510 if (mmu_idx
== ARMMMUIdx_Stage2
) {
2515 * Unlike the ARM ARM pseudocode, we don't need to check whether this
2516 * was an exception vector read from the vector table (which is always
2517 * done using the default system address map), because those accesses
2518 * are done in arm_v7m_load_vector(), which always does a direct
2519 * read using address_space_ldl(), rather than going via this function.
2521 if (regime_translation_disabled(env
, mmu_idx
, arm_secure_to_space(secure
))) {
2524 } else if (m_is_ppb_region(env
, address
)) {
2527 if (pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
2532 if (arm_feature(env
, ARM_FEATURE_M
)) {
2539 for (n
= region_counter
- 1; n
>= 0; n
--) {
2542 * Note that the base address is bits [31:x] from the register
2543 * with bits [x-1:0] all zeroes, but the limit address is bits
2544 * [31:x] from the register with bits [x:0] all ones. Where x is
2545 * 5 for Cortex-M and 6 for Cortex-R
2547 uint32_t base
= regime_rbar(env
, mmu_idx
, secure
)[n
] & ~bitmask
;
2548 uint32_t limit
= regime_rlar(env
, mmu_idx
, secure
)[n
] | bitmask
;
2550 if (!(regime_rlar(env
, mmu_idx
, secure
)[n
] & 0x1)) {
2551 /* Region disabled */
2555 if (address
< base
|| address
> limit
) {
2557 * Address not in this region. We must check whether the
2558 * region covers addresses in the same page as our address.
2559 * In that case we must not report a size that covers the
2560 * whole page for a subsequent hit against a different MPU
2561 * region or the background region, because it would result in
2562 * incorrect TLB hits for subsequent accesses to addresses that
2563 * are in this MPU region.
2565 if (limit
>= base
&&
2566 ranges_overlap(base
, limit
- base
+ 1,
2568 TARGET_PAGE_SIZE
)) {
2569 result
->f
.lg_page_size
= 0;
2574 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2575 result
->f
.lg_page_size
= 0;
2578 if (matchregion
!= -1) {
2580 * Multiple regions match -- always a failure (unlike
2581 * PMSAv7 where highest-numbered-region wins)
2583 fi
->type
= ARMFault_Permission
;
2584 if (arm_feature(env
, ARM_FEATURE_M
)) {
2596 if (arm_feature(env
, ARM_FEATURE_M
)) {
2597 fi
->type
= ARMFault_Background
;
2599 fi
->type
= ARMFault_Permission
;
2604 if (matchregion
== -1) {
2605 /* hit using the background region */
2606 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
2608 uint32_t matched_rbar
= regime_rbar(env
, mmu_idx
, secure
)[matchregion
];
2609 uint32_t matched_rlar
= regime_rlar(env
, mmu_idx
, secure
)[matchregion
];
2610 uint32_t ap
= extract32(matched_rbar
, 1, 2);
2611 uint32_t xn
= extract32(matched_rbar
, 0, 1);
2614 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
2615 pxn
= extract32(matched_rlar
, 4, 1);
2618 if (m_is_system_region(env
, address
)) {
2619 /* System space is always execute never */
2623 if (regime_el(env
, mmu_idx
) == 2) {
2624 result
->f
.prot
= simple_ap_to_rw_prot_is_user(ap
,
2625 mmu_idx
!= ARMMMUIdx_E2
);
2627 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
2630 if (!arm_feature(env
, ARM_FEATURE_M
)) {
2631 uint8_t attrindx
= extract32(matched_rlar
, 1, 3);
2632 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
2633 uint8_t sh
= extract32(matched_rlar
, 3, 2);
2635 if (regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
&&
2636 result
->f
.prot
& PAGE_WRITE
&& mmu_idx
!= ARMMMUIdx_Stage2
) {
2640 if ((regime_el(env
, mmu_idx
) == 1) &&
2641 regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
&& ap
== 0x1) {
2645 result
->cacheattrs
.is_s2_format
= false;
2646 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
2647 result
->cacheattrs
.shareability
= sh
;
2650 if (result
->f
.prot
&& !xn
&& !(pxn
&& !is_user
)) {
2651 result
->f
.prot
|= PAGE_EXEC
;
2655 *mregion
= matchregion
;
2659 fi
->type
= ARMFault_Permission
;
2660 if (arm_feature(env
, ARM_FEATURE_M
)) {
2663 return !(result
->f
.prot
& (1 << access_type
));
2666 static bool v8m_is_sau_exempt(CPUARMState
*env
,
2667 uint32_t address
, MMUAccessType access_type
)
2670 * The architecture specifies that certain address ranges are
2671 * exempt from v8M SAU/IDAU checks.
2674 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
2675 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
2676 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
2677 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
2678 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
2679 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
2682 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
2683 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2684 bool is_secure
, V8M_SAttributes
*sattrs
)
2687 * Look up the security attributes for this address. Compare the
2688 * pseudocode SecurityCheck() function.
2689 * We assume the caller has zero-initialized *sattrs.
2691 ARMCPU
*cpu
= env_archcpu(env
);
2693 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
2694 int idau_region
= IREGION_NOTVALID
;
2695 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
2696 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
2699 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
2700 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
2702 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
2706 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
2707 /* 0xf0000000..0xffffffff is always S for insn fetches */
2711 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
2712 sattrs
->ns
= !is_secure
;
2716 if (idau_region
!= IREGION_NOTVALID
) {
2717 sattrs
->irvalid
= true;
2718 sattrs
->iregion
= idau_region
;
2721 switch (env
->sau
.ctrl
& 3) {
2722 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2724 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2727 default: /* SAU.ENABLE == 1 */
2728 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
2729 if (env
->sau
.rlar
[r
] & 1) {
2730 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
2731 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
2733 if (base
<= address
&& limit
>= address
) {
2734 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2735 sattrs
->subpage
= true;
2737 if (sattrs
->srvalid
) {
2739 * If we hit in more than one region then we must report
2740 * as Secure, not NS-Callable, with no valid region
2744 sattrs
->nsc
= false;
2745 sattrs
->sregion
= 0;
2746 sattrs
->srvalid
= false;
2749 if (env
->sau
.rlar
[r
] & 2) {
2754 sattrs
->srvalid
= true;
2755 sattrs
->sregion
= r
;
2759 * Address not in this region. We must check whether the
2760 * region covers addresses in the same page as our address.
2761 * In that case we must not report a size that covers the
2762 * whole page for a subsequent hit against a different MPU
2763 * region or the background region, because it would result
2764 * in incorrect TLB hits for subsequent accesses to
2765 * addresses that are in this MPU region.
2767 if (limit
>= base
&&
2768 ranges_overlap(base
, limit
- base
+ 1,
2770 TARGET_PAGE_SIZE
)) {
2771 sattrs
->subpage
= true;
2780 * The IDAU will override the SAU lookup results if it specifies
2781 * higher security than the SAU does.
2784 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
2786 sattrs
->nsc
= idau_nsc
;
2791 static bool get_phys_addr_pmsav8(CPUARMState
*env
,
2794 MMUAccessType access_type
,
2795 GetPhysAddrResult
*result
,
2796 ARMMMUFaultInfo
*fi
)
2798 V8M_SAttributes sattrs
= {};
2799 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2800 bool secure
= arm_space_is_secure(ptw
->in_space
);
2803 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2804 v8m_security_lookup(env
, address
, access_type
, mmu_idx
,
2806 if (access_type
== MMU_INST_FETCH
) {
2808 * Instruction fetches always use the MMU bank and the
2809 * transaction attribute determined by the fetch address,
2810 * regardless of CPU state. This is painful for QEMU
2811 * to handle, because it would mean we need to encode
2812 * into the mmu_idx not just the (user, negpri) information
2813 * for the current security state but also that for the
2814 * other security state, which would balloon the number
2815 * of mmu_idx values needed alarmingly.
2816 * Fortunately we can avoid this because it's not actually
2817 * possible to arbitrarily execute code from memory with
2818 * the wrong security attribute: it will always generate
2819 * an exception of some kind or another, apart from the
2820 * special case of an NS CPU executing an SG instruction
2821 * in S&NSC memory. So we always just fail the translation
2822 * here and sort things out in the exception handler
2823 * (including possibly emulating an SG instruction).
2825 if (sattrs
.ns
!= !secure
) {
2827 fi
->type
= ARMFault_QEMU_NSCExec
;
2829 fi
->type
= ARMFault_QEMU_SFault
;
2831 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2832 result
->f
.phys_addr
= address
;
2838 * For data accesses we always use the MMU bank indicated
2839 * by the current CPU state, but the security attributes
2840 * might downgrade a secure access to nonsecure.
2843 result
->f
.attrs
.secure
= false;
2844 result
->f
.attrs
.space
= ARMSS_NonSecure
;
2845 } else if (!secure
) {
2847 * NS access to S memory must fault.
2848 * Architecturally we should first check whether the
2849 * MPU information for this address indicates that we
2850 * are doing an unaligned access to Device memory, which
2851 * should generate a UsageFault instead. QEMU does not
2852 * currently check for that kind of unaligned access though.
2853 * If we added it we would need to do so as a special case
2854 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2856 fi
->type
= ARMFault_QEMU_SFault
;
2857 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2858 result
->f
.phys_addr
= address
;
2865 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, secure
,
2867 if (sattrs
.subpage
) {
2868 result
->f
.lg_page_size
= 0;
2874 * Translate from the 4-bit stage 2 representation of
2875 * memory attributes (without cache-allocation hints) to
2876 * the 8-bit representation of the stage 1 MAIR registers
2877 * (which includes allocation hints).
2879 * ref: shared/translation/attrs/S2AttrDecode()
2880 * .../S2ConvertAttrsHints()
2882 static uint8_t convert_stage2_attrs(uint64_t hcr
, uint8_t s2attrs
)
2884 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
2885 uint8_t loattr
= extract32(s2attrs
, 0, 2);
2886 uint8_t hihint
= 0, lohint
= 0;
2888 if (hiattr
!= 0) { /* normal memory */
2889 if (hcr
& HCR_CD
) { /* cache disabled */
2890 hiattr
= loattr
= 1; /* non-cacheable */
2892 if (hiattr
!= 1) { /* Write-through or write-back */
2893 hihint
= 3; /* RW allocate */
2895 if (loattr
!= 1) { /* Write-through or write-back */
2896 lohint
= 3; /* RW allocate */
2901 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
2905 * Combine either inner or outer cacheability attributes for normal
2906 * memory, according to table D4-42 and pseudocode procedure
2907 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2909 * NB: only stage 1 includes allocation hints (RW bits), leading to
2912 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
2914 if (s1
== 4 || s2
== 4) {
2915 /* non-cacheable has precedence */
2917 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
2918 /* stage 1 write-through takes precedence */
2920 } else if (extract32(s2
, 2, 2) == 2) {
2921 /* stage 2 write-through takes precedence, but the allocation hint
2922 * is still taken from stage 1
2924 return (2 << 2) | extract32(s1
, 0, 2);
2925 } else { /* write-back */
2931 * Combine the memory type and cacheability attributes of
2932 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2933 * combined attributes in MAIR_EL1 format.
2935 static uint8_t combined_attrs_nofwb(uint64_t hcr
,
2936 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2938 uint8_t s1lo
, s2lo
, s1hi
, s2hi
, s2_mair_attrs
, ret_attrs
;
2940 if (s2
.is_s2_format
) {
2941 s2_mair_attrs
= convert_stage2_attrs(hcr
, s2
.attrs
);
2943 s2_mair_attrs
= s2
.attrs
;
2946 s1lo
= extract32(s1
.attrs
, 0, 4);
2947 s2lo
= extract32(s2_mair_attrs
, 0, 4);
2948 s1hi
= extract32(s1
.attrs
, 4, 4);
2949 s2hi
= extract32(s2_mair_attrs
, 4, 4);
2951 /* Combine memory type and cacheability attributes */
2952 if (s1hi
== 0 || s2hi
== 0) {
2953 /* Device has precedence over normal */
2954 if (s1lo
== 0 || s2lo
== 0) {
2955 /* nGnRnE has precedence over anything */
2957 } else if (s1lo
== 4 || s2lo
== 4) {
2958 /* non-Reordering has precedence over Reordering */
2959 ret_attrs
= 4; /* nGnRE */
2960 } else if (s1lo
== 8 || s2lo
== 8) {
2961 /* non-Gathering has precedence over Gathering */
2962 ret_attrs
= 8; /* nGRE */
2964 ret_attrs
= 0xc; /* GRE */
2966 } else { /* Normal memory */
2967 /* Outer/inner cacheability combine independently */
2968 ret_attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
2969 | combine_cacheattr_nibble(s1lo
, s2lo
);
2974 static uint8_t force_cacheattr_nibble_wb(uint8_t attr
)
2977 * Given the 4 bits specifying the outer or inner cacheability
2978 * in MAIR format, return a value specifying Normal Write-Back,
2979 * with the allocation and transient hints taken from the input
2980 * if the input specified some kind of cacheable attribute.
2982 if (attr
== 0 || attr
== 4) {
2984 * 0 == an UNPREDICTABLE encoding
2985 * 4 == Non-cacheable
2986 * Either way, force Write-Back RW allocate non-transient
2990 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2995 * Combine the memory type and cacheability attributes of
2996 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2997 * combined attributes in MAIR_EL1 format.
2999 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
3001 assert(s2
.is_s2_format
&& !s1
.is_s2_format
);
3005 /* Use stage 1 attributes */
3009 * Force Normal Write-Back. Note that if S1 is Normal cacheable
3010 * then we take the allocation hints from it; otherwise it is
3011 * RW allocate, non-transient.
3013 if ((s1
.attrs
& 0xf0) == 0) {
3017 /* Need to check the Inner and Outer nibbles separately */
3018 return force_cacheattr_nibble_wb(s1
.attrs
& 0xf) |
3019 force_cacheattr_nibble_wb(s1
.attrs
>> 4) << 4;
3021 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
3022 if ((s1
.attrs
& 0xf0) == 0) {
3027 /* Force Device, of subtype specified by S2 */
3028 return s2
.attrs
<< 2;
3031 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
3032 * arbitrarily force Device.
3039 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
3040 * and CombineS1S2Desc()
3043 * @s1: Attributes from stage 1 walk
3044 * @s2: Attributes from stage 2 walk
3046 static ARMCacheAttrs
combine_cacheattrs(uint64_t hcr
,
3047 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
3050 bool tagged
= false;
3052 assert(!s1
.is_s2_format
);
3053 ret
.is_s2_format
= false;
3055 if (s1
.attrs
== 0xf0) {
3060 /* Combine shareability attributes (table D4-43) */
3061 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
3062 /* if either are outer-shareable, the result is outer-shareable */
3063 ret
.shareability
= 2;
3064 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
3065 /* if either are inner-shareable, the result is inner-shareable */
3066 ret
.shareability
= 3;
3068 /* both non-shareable */
3069 ret
.shareability
= 0;
3072 /* Combine memory type and cacheability attributes */
3073 if (hcr
& HCR_FWB
) {
3074 ret
.attrs
= combined_attrs_fwb(s1
, s2
);
3076 ret
.attrs
= combined_attrs_nofwb(hcr
, s1
, s2
);
3080 * Any location for which the resultant memory type is any
3081 * type of Device memory is always treated as Outer Shareable.
3082 * Any location for which the resultant memory type is Normal
3083 * Inner Non-cacheable, Outer Non-cacheable is always treated
3084 * as Outer Shareable.
3085 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
3087 if ((ret
.attrs
& 0xf0) == 0 || ret
.attrs
== 0x44) {
3088 ret
.shareability
= 2;
3091 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
3092 if (tagged
&& ret
.attrs
== 0xff) {
3100 * MMU disabled. S1 addresses within aa64 translation regimes are
3101 * still checked for bounds -- see AArch64.S1DisabledOutput().
3103 static bool get_phys_addr_disabled(CPUARMState
*env
,
3105 target_ulong address
,
3106 MMUAccessType access_type
,
3107 GetPhysAddrResult
*result
,
3108 ARMMMUFaultInfo
*fi
)
3110 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
3111 uint8_t memattr
= 0x00; /* Device nGnRnE */
3112 uint8_t shareability
= 0; /* non-shareable */
3116 case ARMMMUIdx_Stage2
:
3117 case ARMMMUIdx_Stage2_S
:
3118 case ARMMMUIdx_Phys_S
:
3119 case ARMMMUIdx_Phys_NS
:
3120 case ARMMMUIdx_Phys_Root
:
3121 case ARMMMUIdx_Phys_Realm
:
3125 r_el
= regime_el(env
, mmu_idx
);
3126 if (arm_el_is_aa64(env
, r_el
)) {
3127 int pamax
= arm_pamax(env_archcpu(env
));
3128 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
];
3131 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
3132 if (access_type
== MMU_INST_FETCH
) {
3133 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
3135 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
3136 addrtop
= (tbi
? 55 : 63);
3138 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
3139 fi
->type
= ARMFault_AddressSize
;
3146 * When TBI is disabled, we've just validated that all of the
3147 * bits above PAMax are zero, so logically we only need to
3148 * clear the top byte for TBI. But it's clearer to follow
3149 * the pseudocode set of addrdesc.paddress.
3151 address
= extract64(address
, 0, 52);
3154 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
3156 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, ptw
->in_space
);
3158 if (hcr
& HCR_DCT
) {
3159 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
3161 memattr
= 0xff; /* Normal, WB, RWA */
3166 if (access_type
== MMU_INST_FETCH
) {
3167 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
3168 memattr
= 0xee; /* Normal, WT, RA, NT */
3170 memattr
= 0x44; /* Normal, NC, No */
3173 shareability
= 2; /* outer shareable */
3175 result
->cacheattrs
.is_s2_format
= false;
3179 result
->f
.phys_addr
= address
;
3180 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3181 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
3182 result
->cacheattrs
.shareability
= shareability
;
3183 result
->cacheattrs
.attrs
= memattr
;
3187 static bool get_phys_addr_twostage(CPUARMState
*env
, S1Translate
*ptw
,
3188 target_ulong address
,
3189 MMUAccessType access_type
,
3190 GetPhysAddrResult
*result
,
3191 ARMMMUFaultInfo
*fi
)
3194 int s1_prot
, s1_lgpgsz
;
3195 ARMSecuritySpace in_space
= ptw
->in_space
;
3196 bool ret
, ipa_secure
, s1_guarded
;
3197 ARMCacheAttrs cacheattrs1
;
3198 ARMSecuritySpace ipa_space
;
3201 ret
= get_phys_addr_nogpc(env
, ptw
, address
, access_type
, result
, fi
);
3203 /* If S1 fails, return early. */
3208 ipa
= result
->f
.phys_addr
;
3209 ipa_secure
= result
->f
.attrs
.secure
;
3210 ipa_space
= result
->f
.attrs
.space
;
3212 ptw
->in_s1_is_el0
= ptw
->in_mmu_idx
== ARMMMUIdx_Stage1_E0
;
3213 ptw
->in_mmu_idx
= ipa_secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
3214 ptw
->in_space
= ipa_space
;
3215 ptw
->in_ptw_idx
= ptw_idx_for_stage_2(env
, ptw
->in_mmu_idx
);
3218 * S1 is done, now do S2 translation.
3219 * Save the stage1 results so that we may merge prot and cacheattrs later.
3221 s1_prot
= result
->f
.prot
;
3222 s1_lgpgsz
= result
->f
.lg_page_size
;
3223 s1_guarded
= result
->f
.extra
.arm
.guarded
;
3224 cacheattrs1
= result
->cacheattrs
;
3225 memset(result
, 0, sizeof(*result
));
3227 ret
= get_phys_addr_nogpc(env
, ptw
, ipa
, access_type
, result
, fi
);
3230 /* Combine the S1 and S2 perms. */
3231 result
->f
.prot
&= s1_prot
;
3233 /* If S2 fails, return early. */
3239 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
3240 * this means "don't put this in the TLB"; in this case, return a
3241 * result with lg_page_size == 0 to achieve that. Otherwise,
3242 * use the maximum of the S1 & S2 page size, so that invalidation
3243 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
3244 * we know the combined result permissions etc only cover the minimum
3245 * of the S1 and S2 page size, because we know that the common TLB code
3246 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
3247 * and passing a larger page size value only affects invalidations.)
3249 if (result
->f
.lg_page_size
< TARGET_PAGE_BITS
||
3250 s1_lgpgsz
< TARGET_PAGE_BITS
) {
3251 result
->f
.lg_page_size
= 0;
3252 } else if (result
->f
.lg_page_size
< s1_lgpgsz
) {
3253 result
->f
.lg_page_size
= s1_lgpgsz
;
3256 /* Combine the S1 and S2 cache attributes. */
3257 hcr
= arm_hcr_el2_eff_secstate(env
, in_space
);
3260 * HCR.DC forces the first stage attributes to
3261 * Normal Non-Shareable,
3262 * Inner Write-Back Read-Allocate Write-Allocate,
3263 * Outer Write-Back Read-Allocate Write-Allocate.
3264 * Do not overwrite Tagged within attrs.
3266 if (cacheattrs1
.attrs
!= 0xf0) {
3267 cacheattrs1
.attrs
= 0xff;
3269 cacheattrs1
.shareability
= 0;
3271 result
->cacheattrs
= combine_cacheattrs(hcr
, cacheattrs1
,
3272 result
->cacheattrs
);
3274 /* No BTI GP information in stage 2, we just use the S1 value */
3275 result
->f
.extra
.arm
.guarded
= s1_guarded
;
3278 * Check if IPA translates to secure or non-secure PA space.
3279 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
3281 if (in_space
== ARMSS_Secure
) {
3282 result
->f
.attrs
.secure
=
3283 !(env
->cp15
.vstcr_el2
& (VSTCR_SA
| VSTCR_SW
))
3285 || !(env
->cp15
.vtcr_el2
& (VTCR_NSA
| VTCR_NSW
)));
3286 result
->f
.attrs
.space
= arm_secure_to_space(result
->f
.attrs
.secure
);
3292 static bool get_phys_addr_nogpc(CPUARMState
*env
, S1Translate
*ptw
,
3293 target_ulong address
,
3294 MMUAccessType access_type
,
3295 GetPhysAddrResult
*result
,
3296 ARMMMUFaultInfo
*fi
)
3298 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
3299 ARMMMUIdx s1_mmu_idx
;
3302 * The page table entries may downgrade Secure to NonSecure, but
3303 * cannot upgrade a NonSecure translation regime's attributes
3304 * to Secure or Realm.
3306 result
->f
.attrs
.space
= ptw
->in_space
;
3307 result
->f
.attrs
.secure
= arm_space_is_secure(ptw
->in_space
);
3310 case ARMMMUIdx_Phys_S
:
3311 case ARMMMUIdx_Phys_NS
:
3312 case ARMMMUIdx_Phys_Root
:
3313 case ARMMMUIdx_Phys_Realm
:
3314 /* Checking Phys early avoids special casing later vs regime_el. */
3315 return get_phys_addr_disabled(env
, ptw
, address
, access_type
,
3318 case ARMMMUIdx_Stage1_E0
:
3319 case ARMMMUIdx_Stage1_E1
:
3320 case ARMMMUIdx_Stage1_E1_PAN
:
3322 * First stage lookup uses second stage for ptw; only
3323 * Secure has both S and NS IPA and starts with Stage2_S.
3325 ptw
->in_ptw_idx
= (ptw
->in_space
== ARMSS_Secure
) ?
3326 ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
3329 case ARMMMUIdx_Stage2
:
3330 case ARMMMUIdx_Stage2_S
:
3332 * Second stage lookup uses physical for ptw; whether this is S or
3333 * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
3334 * the Secure EL2&0 regime.
3336 ptw
->in_ptw_idx
= ptw_idx_for_stage_2(env
, mmu_idx
);
3339 case ARMMMUIdx_E10_0
:
3340 s1_mmu_idx
= ARMMMUIdx_Stage1_E0
;
3342 case ARMMMUIdx_E10_1
:
3343 s1_mmu_idx
= ARMMMUIdx_Stage1_E1
;
3345 case ARMMMUIdx_E10_1_PAN
:
3346 s1_mmu_idx
= ARMMMUIdx_Stage1_E1_PAN
;
3349 * Call ourselves recursively to do the stage 1 and then stage 2
3350 * translations if mmu_idx is a two-stage regime, and EL2 present.
3351 * Otherwise, a stage1+stage2 translation is just stage 1.
3353 ptw
->in_mmu_idx
= mmu_idx
= s1_mmu_idx
;
3354 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
3355 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
, ptw
->in_space
)) {
3356 return get_phys_addr_twostage(env
, ptw
, address
, access_type
,
3362 /* Single stage uses physical for ptw. */
3363 ptw
->in_ptw_idx
= arm_space_to_phys(ptw
->in_space
);
3367 result
->f
.attrs
.user
= regime_is_user(env
, mmu_idx
);
3370 * Fast Context Switch Extension. This doesn't exist at all in v8.
3371 * In v7 and earlier it affects all stage 1 translations.
3373 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
3374 && !arm_feature(env
, ARM_FEATURE_V8
)) {
3375 if (regime_el(env
, mmu_idx
) == 3) {
3376 address
+= env
->cp15
.fcseidr_s
;
3378 address
+= env
->cp15
.fcseidr_ns
;
3382 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
3384 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
3386 if (arm_feature(env
, ARM_FEATURE_V8
)) {
3388 ret
= get_phys_addr_pmsav8(env
, ptw
, address
, access_type
,
3390 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3392 ret
= get_phys_addr_pmsav7(env
, ptw
, address
, access_type
,
3396 ret
= get_phys_addr_pmsav5(env
, ptw
, address
, access_type
,
3399 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
3400 " mmu_idx %u -> %s (prot %c%c%c)\n",
3401 access_type
== MMU_DATA_LOAD
? "reading" :
3402 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
3403 (uint32_t)address
, mmu_idx
,
3404 ret
? "Miss" : "Hit",
3405 result
->f
.prot
& PAGE_READ
? 'r' : '-',
3406 result
->f
.prot
& PAGE_WRITE
? 'w' : '-',
3407 result
->f
.prot
& PAGE_EXEC
? 'x' : '-');
3412 /* Definitely a real MMU, not an MPU */
3414 if (regime_translation_disabled(env
, mmu_idx
, ptw
->in_space
)) {
3415 return get_phys_addr_disabled(env
, ptw
, address
, access_type
,
3419 if (regime_using_lpae_format(env
, mmu_idx
)) {
3420 return get_phys_addr_lpae(env
, ptw
, address
, access_type
, result
, fi
);
3421 } else if (arm_feature(env
, ARM_FEATURE_V7
) ||
3422 regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
3423 return get_phys_addr_v6(env
, ptw
, address
, access_type
, result
, fi
);
3425 return get_phys_addr_v5(env
, ptw
, address
, access_type
, result
, fi
);
3429 static bool get_phys_addr_gpc(CPUARMState
*env
, S1Translate
*ptw
,
3430 target_ulong address
,
3431 MMUAccessType access_type
,
3432 GetPhysAddrResult
*result
,
3433 ARMMMUFaultInfo
*fi
)
3435 if (get_phys_addr_nogpc(env
, ptw
, address
, access_type
, result
, fi
)) {
3438 if (!granule_protection_check(env
, result
->f
.phys_addr
,
3439 result
->f
.attrs
.space
, fi
)) {
3440 fi
->type
= ARMFault_GPCFOnOutput
;
3446 bool get_phys_addr_with_space_nogpc(CPUARMState
*env
, target_ulong address
,
3447 MMUAccessType access_type
,
3448 ARMMMUIdx mmu_idx
, ARMSecuritySpace space
,
3449 GetPhysAddrResult
*result
,
3450 ARMMMUFaultInfo
*fi
)
3453 .in_mmu_idx
= mmu_idx
,
3456 return get_phys_addr_nogpc(env
, &ptw
, address
, access_type
, result
, fi
);
3459 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
3460 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
3461 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
3464 .in_mmu_idx
= mmu_idx
,
3466 ARMSecuritySpace ss
;
3469 case ARMMMUIdx_E10_0
:
3470 case ARMMMUIdx_E10_1
:
3471 case ARMMMUIdx_E10_1_PAN
:
3472 case ARMMMUIdx_E20_0
:
3473 case ARMMMUIdx_E20_2
:
3474 case ARMMMUIdx_E20_2_PAN
:
3475 case ARMMMUIdx_Stage1_E0
:
3476 case ARMMMUIdx_Stage1_E1
:
3477 case ARMMMUIdx_Stage1_E1_PAN
:
3479 ss
= arm_security_space_below_el3(env
);
3481 case ARMMMUIdx_Stage2
:
3483 * For Secure EL2, we need this index to be NonSecure;
3484 * otherwise this will already be NonSecure or Realm.
3486 ss
= arm_security_space_below_el3(env
);
3487 if (ss
== ARMSS_Secure
) {
3488 ss
= ARMSS_NonSecure
;
3491 case ARMMMUIdx_Phys_NS
:
3492 case ARMMMUIdx_MPrivNegPri
:
3493 case ARMMMUIdx_MUserNegPri
:
3494 case ARMMMUIdx_MPriv
:
3495 case ARMMMUIdx_MUser
:
3496 ss
= ARMSS_NonSecure
;
3498 case ARMMMUIdx_Stage2_S
:
3499 case ARMMMUIdx_Phys_S
:
3500 case ARMMMUIdx_MSPrivNegPri
:
3501 case ARMMMUIdx_MSUserNegPri
:
3502 case ARMMMUIdx_MSPriv
:
3503 case ARMMMUIdx_MSUser
:
3507 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
3508 cpu_isar_feature(aa64_rme
, env_archcpu(env
))) {
3514 case ARMMMUIdx_Phys_Root
:
3517 case ARMMMUIdx_Phys_Realm
:
3521 g_assert_not_reached();
3525 return get_phys_addr_gpc(env
, &ptw
, address
, access_type
, result
, fi
);
3528 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
3531 ARMCPU
*cpu
= ARM_CPU(cs
);
3532 CPUARMState
*env
= &cpu
->env
;
3533 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
3534 ARMSecuritySpace ss
= arm_security_space(env
);
3536 .in_mmu_idx
= mmu_idx
,
3540 GetPhysAddrResult res
= {};
3541 ARMMMUFaultInfo fi
= {};
3544 ret
= get_phys_addr_gpc(env
, &ptw
, addr
, MMU_DATA_LOAD
, &res
, &fi
);
3545 *attrs
= res
.f
.attrs
;
3550 return res
.f
.phys_addr
;