]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/ptw.c
target/arm: Adjust the order of Phys and Stage2 ARMMMUIdx
[mirror_qemu.git] / target / arm / ptw.c
CommitLineData
8ae08860
RH
1/*
2 * ARM page table walking.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9#include "qemu/osdep.h"
10#include "qemu/log.h"
1f2e87e5 11#include "qemu/range.h"
71943a1e 12#include "qemu/main-loop.h"
f3639a64 13#include "exec/exec-all.h"
8ae08860
RH
14#include "cpu.h"
15#include "internals.h"
2c1f429d 16#include "idau.h"
007cd176
RH
17#ifdef CONFIG_TCG
18# include "tcg/oversized-guest.h"
19#endif
8ae08860 20
6d2654ff
RH
21typedef struct S1Translate {
22 ARMMMUIdx in_mmu_idx;
48da29e4 23 ARMMMUIdx in_ptw_idx;
6d2654ff 24 bool in_secure;
4a358556 25 bool in_debug;
6d2654ff 26 bool out_secure;
71943a1e 27 bool out_rw;
4e7a2c98 28 bool out_be;
71943a1e 29 hwaddr out_virt;
6d2654ff 30 hwaddr out_phys;
f3639a64 31 void *out_host;
6d2654ff
RH
32} S1Translate;
33
34static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
35 uint64_t address,
36 MMUAccessType access_type, bool s1_is_el0,
c23f08a5 37 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
11552bb0
RH
38 __attribute__((nonnull));
39
3f5a74c5
RH
40static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
41 target_ulong address,
42 MMUAccessType access_type,
43 GetPhysAddrResult *result,
44 ARMMMUFaultInfo *fi)
45 __attribute__((nonnull));
46
1c73d848
RH
47/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
48static const uint8_t pamax_map[] = {
49 [0] = 32,
50 [1] = 36,
51 [2] = 40,
52 [3] = 42,
53 [4] = 44,
54 [5] = 48,
55 [6] = 52,
56};
57
58/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
59unsigned int arm_pamax(ARMCPU *cpu)
60{
22536b13
RH
61 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
62 unsigned int parange =
63 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1c73d848 64
22536b13
RH
65 /*
66 * id_aa64mmfr0 is a read-only register so values outside of the
67 * supported mappings can be considered an implementation error.
68 */
69 assert(parange < ARRAY_SIZE(pamax_map));
70 return pamax_map[parange];
71 }
59e1b8a2
RH
72
73 /*
74 * In machvirt_init, we call arm_pamax on a cpu that is not fully
75 * initialized, so we can't rely on the propagation done in realize.
76 */
77 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
78 arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
22536b13
RH
79 /* v7 with LPAE */
80 return 40;
81 }
82 /* Anything else */
83 return 32;
1c73d848
RH
84}
85
1d261255
RH
86/*
87 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
88 */
89ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
90{
91 switch (mmu_idx) {
1d261255
RH
92 case ARMMMUIdx_E10_0:
93 return ARMMMUIdx_Stage1_E0;
94 case ARMMMUIdx_E10_1:
95 return ARMMMUIdx_Stage1_E1;
96 case ARMMMUIdx_E10_1_PAN:
97 return ARMMMUIdx_Stage1_E1_PAN;
98 default:
99 return mmu_idx;
100 }
101}
102
103ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
104{
105 return stage_1_mmu_idx(arm_mmu_idx(env));
106}
107
fcc0b041
PM
108/*
109 * Return where we should do ptw loads from for a stage 2 walk.
110 * This depends on whether the address we are looking up is a
111 * Secure IPA or a NonSecure IPA, which we know from whether this is
112 * Stage2 or Stage2_S.
113 * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
114 */
115static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
116{
117 bool s2walk_secure;
118
119 /*
120 * We're OK to check the current state of the CPU here because
121 * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
122 * (2) there's no way to do a lookup that cares about Stage 2 for a
123 * different security state to the current one for AArch64, and AArch32
124 * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
125 * an NS stage 1+2 lookup while the NS bit is 0.)
126 */
127 if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
128 return ARMMMUIdx_Phys_NS;
129 }
130 if (stage2idx == ARMMMUIdx_Stage2_S) {
131 s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
132 } else {
133 s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
134 }
135 return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
136
137}
138
11552bb0
RH
139static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
140{
141 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
142}
143
3b318aae
RH
144/* Return the TTBR associated with this translation regime */
145static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
146{
147 if (mmu_idx == ARMMMUIdx_Stage2) {
148 return env->cp15.vttbr_el2;
149 }
150 if (mmu_idx == ARMMMUIdx_Stage2_S) {
151 return env->cp15.vsttbr_el2;
152 }
153 if (ttbrn == 0) {
154 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
155 } else {
156 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
157 }
158}
159
8db1a3a0 160/* Return true if the specified stage of address translation is disabled */
7e80c0a4
RH
161static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
162 bool is_secure)
8db1a3a0
RH
163{
164 uint64_t hcr_el2;
165
166 if (arm_feature(env, ARM_FEATURE_M)) {
7e80c0a4 167 switch (env->v7m.mpu_ctrl[is_secure] &
8db1a3a0
RH
168 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
169 case R_V7M_MPU_CTRL_ENABLE_MASK:
170 /* Enabled, but not for HardFault and NMI */
171 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
172 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
173 /* Enabled for all cases */
174 return false;
175 case 0:
176 default:
177 /*
178 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
179 * we warned about that in armv7m_nvic.c when the guest set it.
180 */
181 return true;
182 }
183 }
184
2189c798 185 hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
8db1a3a0 186
3b2af993
RH
187 switch (mmu_idx) {
188 case ARMMMUIdx_Stage2:
189 case ARMMMUIdx_Stage2_S:
8db1a3a0
RH
190 /* HCR.DC means HCR.VM behaves as 1 */
191 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
8db1a3a0 192
3b2af993
RH
193 case ARMMMUIdx_E10_0:
194 case ARMMMUIdx_E10_1:
195 case ARMMMUIdx_E10_1_PAN:
fdf12933
RH
196 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
197 if (hcr_el2 & HCR_TGE) {
8db1a3a0
RH
198 return true;
199 }
3b2af993 200 break;
8db1a3a0 201
3b2af993
RH
202 case ARMMMUIdx_Stage1_E0:
203 case ARMMMUIdx_Stage1_E1:
204 case ARMMMUIdx_Stage1_E1_PAN:
8db1a3a0 205 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
3b2af993
RH
206 if (hcr_el2 & HCR_DC) {
207 return true;
208 }
209 break;
210
211 case ARMMMUIdx_E20_0:
212 case ARMMMUIdx_E20_2:
213 case ARMMMUIdx_E20_2_PAN:
214 case ARMMMUIdx_E2:
215 case ARMMMUIdx_E3:
216 break;
217
a1ce3084
RH
218 case ARMMMUIdx_Phys_NS:
219 case ARMMMUIdx_Phys_S:
220 /* No translation for physical address spaces. */
221 return true;
222
3b2af993
RH
223 default:
224 g_assert_not_reached();
8db1a3a0
RH
225 }
226
227 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
228}
229
f3639a64 230static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
11552bb0
RH
231{
232 /*
233 * For an S1 page table walk, the stage 1 attributes are always
234 * some form of "this is Normal memory". The combined S1+S2
235 * attributes are therefore only Device if stage 2 specifies Device.
236 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
237 * ie when cacheattrs.attrs bits [3:2] are 0b00.
238 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
239 * when cacheattrs.attrs bit [2] is 0.
240 */
ac76c2e5 241 if (hcr & HCR_FWB) {
f3639a64 242 return (attrs & 0x4) == 0;
11552bb0 243 } else {
f3639a64 244 return (attrs & 0xc) == 0;
11552bb0
RH
245 }
246}
247
248/* Translate a S1 pagetable walk through S2 if needed. */
6d2654ff
RH
249static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
250 hwaddr addr, ARMMMUFaultInfo *fi)
11552bb0 251{
6d2654ff 252 bool is_secure = ptw->in_secure;
f3639a64 253 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
48da29e4 254 ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
f3639a64 255 uint8_t pte_attrs;
f3639a64 256
71943a1e
RH
257 ptw->out_virt = addr;
258
f3639a64
RH
259 if (unlikely(ptw->in_debug)) {
260 /*
261 * From gdbstub, do not use softmmu so that we don't modify the
262 * state of the cpu at all, including softmmu tlb contents.
263 */
48da29e4 264 if (regime_is_stage2(s2_mmu_idx)) {
f3639a64
RH
265 S1Translate s2ptw = {
266 .in_mmu_idx = s2_mmu_idx,
fcc0b041
PM
267 .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
268 .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
f3639a64
RH
269 .in_debug = true,
270 };
271 GetPhysAddrResult s2 = { };
48da29e4 272
4a1103af
RH
273 if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
274 false, &s2, fi)) {
f3639a64
RH
275 goto fail;
276 }
277 ptw->out_phys = s2.f.phys_addr;
278 pte_attrs = s2.cacheattrs.attrs;
fcc0b041 279 ptw->out_secure = s2.f.attrs.secure;
48da29e4
RH
280 } else {
281 /* Regime is physical. */
282 ptw->out_phys = addr;
283 pte_attrs = 0;
fcc0b041 284 ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S;
11552bb0 285 }
f3639a64 286 ptw->out_host = NULL;
71943a1e 287 ptw->out_rw = false;
f3639a64 288 } else {
0d3de77a 289#ifdef CONFIG_TCG
f3639a64
RH
290 CPUTLBEntryFull *full;
291 int flags;
292
293 env->tlb_fi = fi;
d507e6c5 294 flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
f3639a64
RH
295 arm_to_core_mmu_idx(s2_mmu_idx),
296 true, &ptw->out_host, &full, 0);
297 env->tlb_fi = NULL;
298
299 if (unlikely(flags & TLB_INVALID_MASK)) {
300 goto fail;
301 }
9d2617ac 302 ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
71943a1e 303 ptw->out_rw = full->prot & PAGE_WRITE;
f3639a64 304 pte_attrs = full->pte_attrs;
fcc0b041 305 ptw->out_secure = full->attrs.secure;
0d3de77a
FR
306#else
307 g_assert_not_reached();
308#endif
f3639a64 309 }
ac76c2e5 310
48da29e4 311 if (regime_is_stage2(s2_mmu_idx)) {
f3639a64
RH
312 uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
313
314 if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
11552bb0
RH
315 /*
316 * PTW set and S1 walk touched S2 Device memory:
317 * generate Permission fault.
318 */
319 fi->type = ARMFault_Permission;
320 fi->s2addr = addr;
321 fi->stage2 = true;
322 fi->s1ptw = true;
ab1f7885 323 fi->s1ns = !is_secure;
6d2654ff 324 return false;
11552bb0 325 }
11552bb0 326 }
6d2654ff 327
f3639a64 328 ptw->out_be = regime_translation_big_endian(env, mmu_idx);
6d2654ff 329 return true;
f3639a64
RH
330
331 fail:
332 assert(fi->type != ARMFault_None);
333 fi->s2addr = addr;
334 fi->stage2 = true;
335 fi->s1ptw = true;
336 fi->s1ns = !is_secure;
337 return false;
11552bb0
RH
338}
339
340/* All loads done in the course of a page table walk go through here. */
93e5b3a6 341static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
6d2654ff 342 ARMMMUFaultInfo *fi)
11552bb0 343{
5e79887b 344 CPUState *cs = env_cpu(env);
71943a1e 345 void *host = ptw->out_host;
11552bb0
RH
346 uint32_t data;
347
71943a1e 348 if (likely(host)) {
f3639a64 349 /* Page tables are in RAM, and we have the host address. */
71943a1e 350 data = qatomic_read((uint32_t *)host);
f3639a64 351 if (ptw->out_be) {
71943a1e 352 data = be32_to_cpu(data);
f3639a64 353 } else {
71943a1e 354 data = le32_to_cpu(data);
f3639a64 355 }
11552bb0 356 } else {
f3639a64
RH
357 /* Page tables are in MMIO. */
358 MemTxAttrs attrs = { .secure = ptw->out_secure };
359 AddressSpace *as = arm_addressspace(cs, attrs);
360 MemTxResult result = MEMTX_OK;
361
362 if (ptw->out_be) {
363 data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
364 } else {
365 data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
366 }
367 if (unlikely(result != MEMTX_OK)) {
368 fi->type = ARMFault_SyncExternalOnWalk;
369 fi->ea = arm_extabort_type(result);
370 return 0;
371 }
11552bb0 372 }
f3639a64 373 return data;
11552bb0
RH
374}
375
93e5b3a6 376static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
6d2654ff 377 ARMMMUFaultInfo *fi)
11552bb0 378{
5e79887b 379 CPUState *cs = env_cpu(env);
71943a1e 380 void *host = ptw->out_host;
11552bb0
RH
381 uint64_t data;
382
71943a1e 383 if (likely(host)) {
f3639a64 384 /* Page tables are in RAM, and we have the host address. */
71943a1e
RH
385#ifdef CONFIG_ATOMIC64
386 data = qatomic_read__nocheck((uint64_t *)host);
387 if (ptw->out_be) {
388 data = be64_to_cpu(data);
389 } else {
390 data = le64_to_cpu(data);
391 }
392#else
f3639a64 393 if (ptw->out_be) {
71943a1e 394 data = ldq_be_p(host);
f3639a64 395 } else {
71943a1e 396 data = ldq_le_p(host);
f3639a64 397 }
71943a1e 398#endif
11552bb0 399 } else {
f3639a64
RH
400 /* Page tables are in MMIO. */
401 MemTxAttrs attrs = { .secure = ptw->out_secure };
402 AddressSpace *as = arm_addressspace(cs, attrs);
403 MemTxResult result = MEMTX_OK;
404
405 if (ptw->out_be) {
406 data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
407 } else {
408 data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
409 }
410 if (unlikely(result != MEMTX_OK)) {
411 fi->type = ARMFault_SyncExternalOnWalk;
412 fi->ea = arm_extabort_type(result);
413 return 0;
414 }
11552bb0 415 }
f3639a64 416 return data;
11552bb0
RH
417}
418
71943a1e
RH
419static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
420 uint64_t new_val, S1Translate *ptw,
421 ARMMMUFaultInfo *fi)
422{
d3ae5f5d 423#ifdef TARGET_AARCH64
71943a1e
RH
424 uint64_t cur_val;
425 void *host = ptw->out_host;
426
427 if (unlikely(!host)) {
428 fi->type = ARMFault_UnsuppAtomicUpdate;
429 fi->s1ptw = true;
430 return 0;
431 }
432
433 /*
434 * Raising a stage2 Protection fault for an atomic update to a read-only
435 * page is delayed until it is certain that there is a change to make.
436 */
437 if (unlikely(!ptw->out_rw)) {
438 int flags;
439 void *discard;
440
441 env->tlb_fi = fi;
1770b2f2 442 flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
71943a1e
RH
443 arm_to_core_mmu_idx(ptw->in_ptw_idx),
444 true, &discard, 0);
445 env->tlb_fi = NULL;
446
447 if (unlikely(flags & TLB_INVALID_MASK)) {
448 assert(fi->type != ARMFault_None);
449 fi->s2addr = ptw->out_virt;
450 fi->stage2 = true;
451 fi->s1ptw = true;
452 fi->s1ns = !ptw->in_secure;
453 return 0;
454 }
455
456 /* In case CAS mismatches and we loop, remember writability. */
457 ptw->out_rw = true;
458 }
459
460#ifdef CONFIG_ATOMIC64
461 if (ptw->out_be) {
462 old_val = cpu_to_be64(old_val);
463 new_val = cpu_to_be64(new_val);
464 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
465 cur_val = be64_to_cpu(cur_val);
466 } else {
467 old_val = cpu_to_le64(old_val);
468 new_val = cpu_to_le64(new_val);
469 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
470 cur_val = le64_to_cpu(cur_val);
471 }
472#else
473 /*
474 * We can't support the full 64-bit atomic cmpxchg on the host.
475 * Because this is only used for FEAT_HAFDBS, which is only for AA64,
476 * we know that TCG_OVERSIZED_GUEST is set, which means that we are
477 * running in round-robin mode and could only race with dma i/o.
478 */
d3ae5f5d 479#if !TCG_OVERSIZED_GUEST
71943a1e
RH
480# error "Unexpected configuration"
481#endif
482 bool locked = qemu_mutex_iothread_locked();
483 if (!locked) {
484 qemu_mutex_lock_iothread();
485 }
486 if (ptw->out_be) {
487 cur_val = ldq_be_p(host);
488 if (cur_val == old_val) {
489 stq_be_p(host, new_val);
490 }
491 } else {
492 cur_val = ldq_le_p(host);
493 if (cur_val == old_val) {
494 stq_le_p(host, new_val);
495 }
496 }
497 if (!locked) {
498 qemu_mutex_unlock_iothread();
499 }
500#endif
501
502 return cur_val;
d3ae5f5d
RH
503#else
504 /* AArch32 does not have FEAT_HADFS. */
505 g_assert_not_reached();
506#endif
71943a1e
RH
507}
508
4c74ab15
RH
509static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
510 uint32_t *table, uint32_t address)
511{
512 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
c1547bba 513 uint64_t tcr = regime_tcr(env, mmu_idx);
9e70e26c
PM
514 int maskshift = extract32(tcr, 0, 3);
515 uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
516 uint32_t base_mask;
4c74ab15 517
9e70e26c
PM
518 if (address & mask) {
519 if (tcr & TTBCR_PD1) {
4c74ab15
RH
520 /* Translation table walk disabled for TTBR1 */
521 return false;
522 }
523 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
524 } else {
9e70e26c 525 if (tcr & TTBCR_PD0) {
4c74ab15
RH
526 /* Translation table walk disabled for TTBR0 */
527 return false;
528 }
9e70e26c
PM
529 base_mask = ~((uint32_t)0x3fffu >> maskshift);
530 *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
4c74ab15
RH
531 }
532 *table |= (address >> 18) & 0x3ffc;
533 return true;
534}
535
4845d3be
RH
536/*
537 * Translate section/page access permissions to page R/W protection flags
538 * @env: CPUARMState
539 * @mmu_idx: MMU index indicating required translation regime
540 * @ap: The 3-bit access permissions (AP[2:0])
541 * @domain_prot: The 2-bit domain access permissions
6f2d9d74 542 * @is_user: TRUE if accessing from PL0
4845d3be 543 */
6f2d9d74
TK
544static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx,
545 int ap, int domain_prot, bool is_user)
4845d3be 546{
4845d3be
RH
547 if (domain_prot == 3) {
548 return PAGE_READ | PAGE_WRITE;
549 }
550
551 switch (ap) {
552 case 0:
553 if (arm_feature(env, ARM_FEATURE_V7)) {
554 return 0;
555 }
556 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
557 case SCTLR_S:
558 return is_user ? 0 : PAGE_READ;
559 case SCTLR_R:
560 return PAGE_READ;
561 default:
562 return 0;
563 }
564 case 1:
565 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
566 case 2:
567 if (is_user) {
568 return PAGE_READ;
569 } else {
570 return PAGE_READ | PAGE_WRITE;
571 }
572 case 3:
573 return PAGE_READ | PAGE_WRITE;
574 case 4: /* Reserved. */
575 return 0;
576 case 5:
577 return is_user ? 0 : PAGE_READ;
578 case 6:
579 return PAGE_READ;
580 case 7:
581 if (!arm_feature(env, ARM_FEATURE_V6K)) {
582 return 0;
583 }
584 return PAGE_READ;
585 default:
586 g_assert_not_reached();
587 }
588}
589
6f2d9d74
TK
590/*
591 * Translate section/page access permissions to page R/W protection flags
592 * @env: CPUARMState
593 * @mmu_idx: MMU index indicating required translation regime
594 * @ap: The 3-bit access permissions (AP[2:0])
595 * @domain_prot: The 2-bit domain access permissions
596 */
597static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
598 int ap, int domain_prot)
599{
600 return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
601 regime_is_user(env, mmu_idx));
602}
603
4845d3be
RH
604/*
605 * Translate section/page access permissions to page R/W protection flags.
606 * @ap: The 2-bit simple AP (AP[2:1])
607 * @is_user: TRUE if accessing from PL0
608 */
609static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
610{
611 switch (ap) {
612 case 0:
613 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
614 case 1:
615 return PAGE_READ | PAGE_WRITE;
616 case 2:
617 return is_user ? 0 : PAGE_READ;
618 case 3:
619 return PAGE_READ;
620 default:
621 g_assert_not_reached();
622 }
623}
624
625static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
626{
627 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
628}
629
6d2654ff
RH
630static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
631 uint32_t address, MMUAccessType access_type,
632 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
f2d2f5ce 633{
f2d2f5ce
RH
634 int level = 1;
635 uint32_t table;
636 uint32_t desc;
637 int type;
638 int ap;
639 int domain = 0;
640 int domain_prot;
641 hwaddr phys_addr;
642 uint32_t dacr;
643
644 /* Pagetable walk. */
645 /* Lookup l1 descriptor. */
6d2654ff 646 if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
f2d2f5ce
RH
647 /* Section translation fault if page walk is disabled by PD0 or PD1 */
648 fi->type = ARMFault_Translation;
649 goto do_fault;
650 }
93e5b3a6
RH
651 if (!S1_ptw_translate(env, ptw, table, fi)) {
652 goto do_fault;
653 }
654 desc = arm_ldl_ptw(env, ptw, fi);
f2d2f5ce
RH
655 if (fi->type != ARMFault_None) {
656 goto do_fault;
657 }
658 type = (desc & 3);
659 domain = (desc >> 5) & 0x0f;
6d2654ff 660 if (regime_el(env, ptw->in_mmu_idx) == 1) {
f2d2f5ce
RH
661 dacr = env->cp15.dacr_ns;
662 } else {
663 dacr = env->cp15.dacr_s;
664 }
665 domain_prot = (dacr >> (domain * 2)) & 3;
666 if (type == 0) {
667 /* Section translation fault. */
668 fi->type = ARMFault_Translation;
669 goto do_fault;
670 }
671 if (type != 2) {
672 level = 2;
673 }
674 if (domain_prot == 0 || domain_prot == 2) {
675 fi->type = ARMFault_Domain;
676 goto do_fault;
677 }
678 if (type == 2) {
679 /* 1Mb section. */
680 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
681 ap = (desc >> 10) & 3;
7fa7ea8f 682 result->f.lg_page_size = 20; /* 1MB */
f2d2f5ce
RH
683 } else {
684 /* Lookup l2 entry. */
685 if (type == 1) {
686 /* Coarse pagetable. */
687 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
688 } else {
689 /* Fine pagetable. */
690 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
691 }
93e5b3a6
RH
692 if (!S1_ptw_translate(env, ptw, table, fi)) {
693 goto do_fault;
694 }
695 desc = arm_ldl_ptw(env, ptw, fi);
f2d2f5ce
RH
696 if (fi->type != ARMFault_None) {
697 goto do_fault;
698 }
699 switch (desc & 3) {
700 case 0: /* Page translation fault. */
701 fi->type = ARMFault_Translation;
702 goto do_fault;
703 case 1: /* 64k page. */
704 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
705 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
7fa7ea8f 706 result->f.lg_page_size = 16;
f2d2f5ce
RH
707 break;
708 case 2: /* 4k page. */
709 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
710 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
7fa7ea8f 711 result->f.lg_page_size = 12;
f2d2f5ce
RH
712 break;
713 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
714 if (type == 1) {
715 /* ARMv6/XScale extended small page format */
716 if (arm_feature(env, ARM_FEATURE_XSCALE)
717 || arm_feature(env, ARM_FEATURE_V6)) {
718 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7fa7ea8f 719 result->f.lg_page_size = 12;
f2d2f5ce
RH
720 } else {
721 /*
722 * UNPREDICTABLE in ARMv5; we choose to take a
723 * page translation fault.
724 */
725 fi->type = ARMFault_Translation;
726 goto do_fault;
727 }
728 } else {
729 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
7fa7ea8f 730 result->f.lg_page_size = 10;
f2d2f5ce
RH
731 }
732 ap = (desc >> 4) & 3;
733 break;
734 default:
735 /* Never happens, but compiler isn't smart enough to tell. */
736 g_assert_not_reached();
737 }
738 }
6d2654ff 739 result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
7fa7ea8f
RH
740 result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
741 if (!(result->f.prot & (1 << access_type))) {
f2d2f5ce
RH
742 /* Access permission fault. */
743 fi->type = ARMFault_Permission;
744 goto do_fault;
745 }
7fa7ea8f 746 result->f.phys_addr = phys_addr;
f2d2f5ce 747 return false;
53c038ef
RH
748do_fault:
749 fi->domain = domain;
750 fi->level = level;
751 return true;
752}
753
6d2654ff
RH
754static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
755 uint32_t address, MMUAccessType access_type,
756 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
53c038ef 757{
53c038ef 758 ARMCPU *cpu = env_archcpu(env);
6d2654ff 759 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
53c038ef
RH
760 int level = 1;
761 uint32_t table;
762 uint32_t desc;
763 uint32_t xn;
764 uint32_t pxn = 0;
765 int type;
766 int ap;
767 int domain = 0;
768 int domain_prot;
769 hwaddr phys_addr;
770 uint32_t dacr;
771 bool ns;
6f2d9d74 772 int user_prot;
53c038ef
RH
773
774 /* Pagetable walk. */
775 /* Lookup l1 descriptor. */
776 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
777 /* Section translation fault if page walk is disabled by PD0 or PD1 */
778 fi->type = ARMFault_Translation;
779 goto do_fault;
780 }
93e5b3a6
RH
781 if (!S1_ptw_translate(env, ptw, table, fi)) {
782 goto do_fault;
783 }
784 desc = arm_ldl_ptw(env, ptw, fi);
53c038ef
RH
785 if (fi->type != ARMFault_None) {
786 goto do_fault;
787 }
788 type = (desc & 3);
789 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
790 /* Section translation fault, or attempt to use the encoding
791 * which is Reserved on implementations without PXN.
792 */
793 fi->type = ARMFault_Translation;
794 goto do_fault;
795 }
796 if ((type == 1) || !(desc & (1 << 18))) {
797 /* Page or Section. */
798 domain = (desc >> 5) & 0x0f;
799 }
800 if (regime_el(env, mmu_idx) == 1) {
801 dacr = env->cp15.dacr_ns;
802 } else {
803 dacr = env->cp15.dacr_s;
804 }
805 if (type == 1) {
806 level = 2;
807 }
808 domain_prot = (dacr >> (domain * 2)) & 3;
809 if (domain_prot == 0 || domain_prot == 2) {
810 /* Section or Page domain fault */
811 fi->type = ARMFault_Domain;
812 goto do_fault;
813 }
814 if (type != 1) {
815 if (desc & (1 << 18)) {
816 /* Supersection. */
817 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
818 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
819 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
7fa7ea8f 820 result->f.lg_page_size = 24; /* 16MB */
53c038ef
RH
821 } else {
822 /* Section. */
823 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7fa7ea8f 824 result->f.lg_page_size = 20; /* 1MB */
53c038ef
RH
825 }
826 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
827 xn = desc & (1 << 4);
828 pxn = desc & 1;
829 ns = extract32(desc, 19, 1);
830 } else {
831 if (cpu_isar_feature(aa32_pxn, cpu)) {
832 pxn = (desc >> 2) & 1;
833 }
834 ns = extract32(desc, 3, 1);
835 /* Lookup l2 entry. */
836 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
93e5b3a6
RH
837 if (!S1_ptw_translate(env, ptw, table, fi)) {
838 goto do_fault;
839 }
840 desc = arm_ldl_ptw(env, ptw, fi);
53c038ef
RH
841 if (fi->type != ARMFault_None) {
842 goto do_fault;
843 }
844 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
845 switch (desc & 3) {
846 case 0: /* Page translation fault. */
847 fi->type = ARMFault_Translation;
848 goto do_fault;
849 case 1: /* 64k page. */
850 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
851 xn = desc & (1 << 15);
7fa7ea8f 852 result->f.lg_page_size = 16;
53c038ef
RH
853 break;
854 case 2: case 3: /* 4k page. */
855 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
856 xn = desc & 1;
7fa7ea8f 857 result->f.lg_page_size = 12;
53c038ef
RH
858 break;
859 default:
860 /* Never happens, but compiler isn't smart enough to tell. */
861 g_assert_not_reached();
862 }
863 }
864 if (domain_prot == 3) {
7fa7ea8f 865 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
53c038ef
RH
866 } else {
867 if (pxn && !regime_is_user(env, mmu_idx)) {
868 xn = 1;
869 }
870 if (xn && access_type == MMU_INST_FETCH) {
871 fi->type = ARMFault_Permission;
872 goto do_fault;
873 }
874
875 if (arm_feature(env, ARM_FEATURE_V6K) &&
876 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
877 /* The simplified model uses AP[0] as an access control bit. */
878 if ((ap & 1) == 0) {
879 /* Access flag fault. */
880 fi->type = ARMFault_AccessFlag;
881 goto do_fault;
882 }
7fa7ea8f 883 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
6f2d9d74 884 user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
53c038ef 885 } else {
7fa7ea8f 886 result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
6f2d9d74 887 user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
53c038ef 888 }
7fa7ea8f
RH
889 if (result->f.prot && !xn) {
890 result->f.prot |= PAGE_EXEC;
53c038ef 891 }
7fa7ea8f 892 if (!(result->f.prot & (1 << access_type))) {
53c038ef
RH
893 /* Access permission fault. */
894 fi->type = ARMFault_Permission;
895 goto do_fault;
896 }
6f2d9d74
TK
897 if (regime_is_pan(env, mmu_idx) &&
898 !regime_is_user(env, mmu_idx) &&
899 user_prot &&
900 access_type != MMU_INST_FETCH) {
901 /* Privileged Access Never fault */
902 fi->type = ARMFault_Permission;
903 goto do_fault;
904 }
53c038ef
RH
905 }
906 if (ns) {
907 /* The NS bit will (as required by the architecture) have no effect if
908 * the CPU doesn't support TZ or this is a non-secure translation
909 * regime, because the attribute will already be non-secure.
910 */
7fa7ea8f 911 result->f.attrs.secure = false;
53c038ef 912 }
7fa7ea8f 913 result->f.phys_addr = phys_addr;
53c038ef 914 return false;
f2d2f5ce
RH
915do_fault:
916 fi->domain = domain;
917 fi->level = level;
918 return true;
919}
920
f8526edc
RH
921/*
922 * Translate S2 section/page access permissions to protection flags
923 * @env: CPUARMState
924 * @s2ap: The 2-bit stage2 access permissions (S2AP)
925 * @xn: XN (execute-never) bits
926 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
927 */
928static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
929{
930 int prot = 0;
931
932 if (s2ap & 1) {
933 prot |= PAGE_READ;
934 }
935 if (s2ap & 2) {
936 prot |= PAGE_WRITE;
937 }
938
939 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
940 switch (xn) {
941 case 0:
942 prot |= PAGE_EXEC;
943 break;
944 case 1:
945 if (s1_is_el0) {
946 prot |= PAGE_EXEC;
947 }
948 break;
949 case 2:
950 break;
951 case 3:
952 if (!s1_is_el0) {
953 prot |= PAGE_EXEC;
954 }
955 break;
956 default:
957 g_assert_not_reached();
958 }
959 } else {
960 if (!extract32(xn, 1, 1)) {
961 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
962 prot |= PAGE_EXEC;
963 }
964 }
965 }
966 return prot;
967}
968
969/*
970 * Translate section/page access permissions to protection flags
971 * @env: CPUARMState
972 * @mmu_idx: MMU index indicating required translation regime
973 * @is_aa64: TRUE if AArch64
974 * @ap: The 2-bit simple AP (AP[2:1])
975 * @ns: NS (non-secure) bit
976 * @xn: XN (execute-never) bit
977 * @pxn: PXN (privileged execute-never) bit
978 */
979static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
980 int ap, int ns, int xn, int pxn)
981{
dd17143f 982 ARMCPU *cpu = env_archcpu(env);
f8526edc
RH
983 bool is_user = regime_is_user(env, mmu_idx);
984 int prot_rw, user_rw;
985 bool have_wxn;
986 int wxn = 0;
987
edc05dd4 988 assert(!regime_is_stage2(mmu_idx));
f8526edc
RH
989
990 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
991 if (is_user) {
992 prot_rw = user_rw;
993 } else {
dd17143f
PM
994 /*
995 * PAN controls can forbid data accesses but don't affect insn fetch.
996 * Plain PAN forbids data accesses if EL0 has data permissions;
997 * PAN3 forbids data accesses if EL0 has either data or exec perms.
998 * Note that for AArch64 the 'user can exec' case is exactly !xn.
999 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1000 * do not affect EPAN.
1001 */
f8526edc 1002 if (user_rw && regime_is_pan(env, mmu_idx)) {
dd17143f
PM
1003 prot_rw = 0;
1004 } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
1005 regime_is_pan(env, mmu_idx) &&
1006 (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
f8526edc
RH
1007 prot_rw = 0;
1008 } else {
1009 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
1010 }
1011 }
1012
1013 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
1014 return prot_rw;
1015 }
1016
1017 /* TODO have_wxn should be replaced with
1018 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1019 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1020 * compatible processors have EL2, which is required for [U]WXN.
1021 */
1022 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
1023
1024 if (have_wxn) {
1025 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
1026 }
1027
1028 if (is_aa64) {
1029 if (regime_has_2_ranges(mmu_idx) && !is_user) {
1030 xn = pxn || (user_rw & PAGE_WRITE);
1031 }
1032 } else if (arm_feature(env, ARM_FEATURE_V7)) {
1033 switch (regime_el(env, mmu_idx)) {
1034 case 1:
1035 case 3:
1036 if (is_user) {
1037 xn = xn || !(user_rw & PAGE_READ);
1038 } else {
1039 int uwxn = 0;
1040 if (have_wxn) {
1041 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
1042 }
1043 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
1044 (uwxn && (user_rw & PAGE_WRITE));
1045 }
1046 break;
1047 case 2:
1048 break;
1049 }
1050 } else {
1051 xn = wxn = 0;
1052 }
1053
1054 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
1055 return prot_rw;
1056 }
1057 return prot_rw | PAGE_EXEC;
1058}
1059
2f0ec92e
RH
1060static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
1061 ARMMMUIdx mmu_idx)
1062{
c1547bba 1063 uint64_t tcr = regime_tcr(env, mmu_idx);
2f0ec92e
RH
1064 uint32_t el = regime_el(env, mmu_idx);
1065 int select, tsz;
1066 bool epd, hpd;
1067
1068 assert(mmu_idx != ARMMMUIdx_Stage2_S);
1069
1070 if (mmu_idx == ARMMMUIdx_Stage2) {
1071 /* VTCR */
1072 bool sext = extract32(tcr, 4, 1);
1073 bool sign = extract32(tcr, 3, 1);
1074
1075 /*
1076 * If the sign-extend bit is not the same as t0sz[3], the result
1077 * is unpredictable. Flag this as a guest error.
1078 */
1079 if (sign != sext) {
1080 qemu_log_mask(LOG_GUEST_ERROR,
1081 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1082 }
1083 tsz = sextract32(tcr, 0, 4) + 8;
1084 select = 0;
1085 hpd = false;
1086 epd = false;
1087 } else if (el == 2) {
1088 /* HTCR */
1089 tsz = extract32(tcr, 0, 3);
1090 select = 0;
1091 hpd = extract64(tcr, 24, 1);
1092 epd = false;
1093 } else {
1094 int t0sz = extract32(tcr, 0, 3);
1095 int t1sz = extract32(tcr, 16, 3);
1096
1097 if (t1sz == 0) {
1098 select = va > (0xffffffffu >> t0sz);
1099 } else {
1100 /* Note that we will detect errors later. */
1101 select = va >= ~(0xffffffffu >> t1sz);
1102 }
1103 if (!select) {
1104 tsz = t0sz;
1105 epd = extract32(tcr, 7, 1);
1106 hpd = extract64(tcr, 41, 1);
1107 } else {
1108 tsz = t1sz;
1109 epd = extract32(tcr, 23, 1);
1110 hpd = extract64(tcr, 42, 1);
1111 }
1112 /* For aarch32, hpd0 is not enabled without t2e as well. */
1113 hpd &= extract32(tcr, 6, 1);
1114 }
1115
1116 return (ARMVAParameters) {
1117 .tsz = tsz,
1118 .select = select,
1119 .epd = epd,
1120 .hpd = hpd,
1121 };
1122}
1123
c5168785
RH
1124/*
1125 * check_s2_mmu_setup
1126 * @cpu: ARMCPU
1127 * @is_aa64: True if the translation regime is in AArch64 state
0ffe5b7b
RH
1128 * @tcr: VTCR_EL2 or VSTCR_EL2
1129 * @ds: Effective value of TCR.DS.
1130 * @iasize: Bitsize of IPAs
c5168785
RH
1131 * @stride: Page-table stride (See the ARM ARM)
1132 *
0ffe5b7b
RH
1133 * Decode the starting level of the S2 lookup, returning INT_MIN if
1134 * the configuration is invalid.
c5168785 1135 */
0ffe5b7b
RH
1136static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
1137 bool ds, int iasize, int stride)
c5168785 1138{
0ffe5b7b
RH
1139 int sl0, sl2, startlevel, granulebits, levels;
1140 int s1_min_iasize, s1_max_iasize;
c5168785 1141
0ffe5b7b 1142 sl0 = extract32(tcr, 6, 2);
c5168785 1143 if (is_aa64) {
0ffe5b7b
RH
1144 /*
1145 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
1146 * so interleave AArch64.S2StartLevel.
1147 */
c5168785 1148 switch (stride) {
0ffe5b7b
RH
1149 case 9: /* 4KB */
1150 /* SL2 is RES0 unless DS=1 & 4KB granule. */
1151 sl2 = extract64(tcr, 33, 1);
1152 if (ds && sl2) {
1153 if (sl0 != 0) {
1154 goto fail;
1155 }
1156 startlevel = -1;
1157 } else {
1158 startlevel = 2 - sl0;
1159 switch (sl0) {
1160 case 2:
1161 if (arm_pamax(cpu) < 44) {
1162 goto fail;
1163 }
1164 break;
1165 case 3:
1166 if (!cpu_isar_feature(aa64_st, cpu)) {
1167 goto fail;
1168 }
1169 startlevel = 3;
1170 break;
1171 }
c5168785
RH
1172 }
1173 break;
0ffe5b7b
RH
1174 case 11: /* 16KB */
1175 switch (sl0) {
1176 case 2:
1177 if (arm_pamax(cpu) < 42) {
1178 goto fail;
1179 }
1180 break;
1181 case 3:
1182 if (!ds) {
1183 goto fail;
1184 }
1185 break;
c5168785 1186 }
0ffe5b7b 1187 startlevel = 3 - sl0;
c5168785 1188 break;
0ffe5b7b
RH
1189 case 13: /* 64KB */
1190 switch (sl0) {
1191 case 2:
1192 if (arm_pamax(cpu) < 44) {
1193 goto fail;
1194 }
1195 break;
1196 case 3:
1197 goto fail;
c5168785 1198 }
0ffe5b7b 1199 startlevel = 3 - sl0;
c5168785
RH
1200 break;
1201 default:
1202 g_assert_not_reached();
1203 }
c5168785 1204 } else {
0ffe5b7b
RH
1205 /*
1206 * Things are simpler for AArch32 EL2, with only 4k pages.
1207 * There is no separate S2InvalidSL function, but AArch32.S2Walk
1208 * begins with walkparms.sl0 in {'1x'}.
1209 */
c5168785 1210 assert(stride == 9);
0ffe5b7b
RH
1211 if (sl0 >= 2) {
1212 goto fail;
c5168785 1213 }
0ffe5b7b 1214 startlevel = 2 - sl0;
c5168785 1215 }
0ffe5b7b
RH
1216
1217 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
1218 levels = 3 - startlevel;
1219 granulebits = stride + 3;
1220
1221 s1_min_iasize = levels * stride + granulebits + 1;
1222 s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
1223
1224 if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
1225 return startlevel;
1226 }
1227
1228 fail:
1229 return INT_MIN;
c5168785
RH
1230}
1231
3283222a
RH
1232/**
1233 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1234 *
1235 * Returns false if the translation was successful. Otherwise, phys_ptr,
1236 * attrs, prot and page_size may not be filled in, and the populated fsr
1237 * value provides information on why the translation aborted, in the format
1238 * of a long-format DFSR/IFSR fault register, with the following caveat:
1239 * the WnR bit is never set (the caller must do this).
1240 *
1241 * @env: CPUARMState
6d2654ff 1242 * @ptw: Current and next stage parameters for the walk.
3283222a
RH
1243 * @address: virtual address to get physical address for
1244 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
6d2654ff
RH
1245 * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
1246 * (so this is a stage 2 page table walk),
1247 * must be true if this is stage 2 of a stage 1+2
3283222a
RH
1248 * walk for an EL0 access. If @mmu_idx is anything else,
1249 * @s1_is_el0 is ignored.
03ee9bbe 1250 * @result: set on translation success,
3283222a 1251 * @fi: set to fault info if the translation fails
3283222a 1252 */
6d2654ff
RH
1253static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
1254 uint64_t address,
1255 MMUAccessType access_type, bool s1_is_el0,
c23f08a5 1256 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
3283222a
RH
1257{
1258 ARMCPU *cpu = env_archcpu(env);
6d2654ff
RH
1259 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1260 bool is_secure = ptw->in_secure;
15f8f467 1261 int32_t level;
3283222a
RH
1262 ARMVAParameters param;
1263 uint64_t ttbr;
1264 hwaddr descaddr, indexmask, indexmask_grainsize;
1265 uint32_t tableattrs;
1266 target_ulong page_size;
45666091 1267 uint64_t attrs;
3283222a
RH
1268 int32_t stride;
1269 int addrsize, inputsize, outputsize;
c1547bba 1270 uint64_t tcr = regime_tcr(env, mmu_idx);
3283222a
RH
1271 int ap, ns, xn, pxn;
1272 uint32_t el = regime_el(env, mmu_idx);
1273 uint64_t descaddrmask;
1274 bool aarch64 = arm_el_is_aa64(env, el);
71943a1e 1275 uint64_t descriptor, new_descriptor;
fe4ddc15 1276 bool nstable;
3283222a
RH
1277
1278 /* TODO: This code does not support shareability levels. */
1279 if (aarch64) {
1280 int ps;
1281
1282 param = aa64_va_parameters(env, address, mmu_idx,
478dccbb
PM
1283 access_type != MMU_INST_FETCH,
1284 !arm_el_is_aa64(env, 1));
3283222a
RH
1285 level = 0;
1286
1287 /*
1288 * If TxSZ is programmed to a value larger than the maximum,
1289 * or smaller than the effective minimum, it is IMPLEMENTATION
1290 * DEFINED whether we behave as if the field were programmed
1291 * within bounds, or if a level 0 Translation fault is generated.
1292 *
1293 * With FEAT_LVA, fault on less than minimum becomes required,
1294 * so our choice is to always raise the fault.
1295 */
1296 if (param.tsz_oob) {
27c1b81d 1297 goto do_translation_fault;
3283222a
RH
1298 }
1299
1300 addrsize = 64 - 8 * param.tbi;
1301 inputsize = 64 - param.tsz;
1302
1303 /*
1304 * Bound PS by PARANGE to find the effective output address size.
1305 * ID_AA64MMFR0 is a read-only register so values outside of the
1306 * supported mappings can be considered an implementation error.
1307 */
1308 ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1309 ps = MIN(ps, param.ps);
1310 assert(ps < ARRAY_SIZE(pamax_map));
1311 outputsize = pamax_map[ps];
312b71ab
AB
1312
1313 /*
1314 * With LPA2, the effective output address (OA) size is at most 48 bits
1315 * unless TCR.DS == 1
1316 */
1317 if (!param.ds && param.gran != Gran64K) {
1318 outputsize = MIN(outputsize, 48);
1319 }
3283222a
RH
1320 } else {
1321 param = aa32_va_parameters(env, address, mmu_idx);
1322 level = 1;
1323 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1324 inputsize = addrsize - param.tsz;
1325 outputsize = 40;
1326 }
1327
1328 /*
1329 * We determined the region when collecting the parameters, but we
1330 * have not yet validated that the address is valid for the region.
1331 * Extract the top bits and verify that they all match select.
1332 *
1333 * For aa32, if inputsize == addrsize, then we have selected the
1334 * region by exclusion in aa32_va_parameters and there is no more
1335 * validation to do here.
1336 */
1337 if (inputsize < addrsize) {
1338 target_ulong top_bits = sextract64(address, inputsize,
1339 addrsize - inputsize);
1340 if (-top_bits != param.select) {
1341 /* The gap between the two regions is a Translation fault */
27c1b81d 1342 goto do_translation_fault;
3283222a
RH
1343 }
1344 }
1345
3c003f70 1346 stride = arm_granule_bits(param.gran) - 3;
3283222a
RH
1347
1348 /*
1349 * Note that QEMU ignores shareability and cacheability attributes,
1350 * so we don't need to do anything with the SH, ORGN, IRGN fields
1351 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1352 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1353 * implement any ASID-like capability so we can ignore it (instead
1354 * we will always flush the TLB any time the ASID is changed).
1355 */
1356 ttbr = regime_ttbr(env, mmu_idx, param.select);
1357
1358 /*
1359 * Here we should have set up all the parameters for the translation:
1360 * inputsize, ttbr, epd, stride, tbi
1361 */
1362
1363 if (param.epd) {
1364 /*
1365 * Translation table walk disabled => Translation fault on TLB miss
1366 * Note: This is always 0 on 64-bit EL2 and EL3.
1367 */
27c1b81d 1368 goto do_translation_fault;
3283222a
RH
1369 }
1370
edc05dd4 1371 if (!regime_is_stage2(mmu_idx)) {
3283222a
RH
1372 /*
1373 * The starting level depends on the virtual address size (which can
1374 * be up to 48 bits) and the translation granule size. It indicates
1375 * the number of strides (stride bits at a time) needed to
1376 * consume the bits of the input address. In the pseudocode this is:
1377 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1378 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1379 * our 'stride + 3' and 'stride' is our 'stride'.
1380 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1381 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1382 * = 4 - (inputsize - 4) / stride;
1383 */
1384 level = 4 - (inputsize - 4) / stride;
1385 } else {
0ffe5b7b
RH
1386 int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
1387 inputsize, stride);
1388 if (startlevel == INT_MIN) {
1389 level = 0;
27c1b81d 1390 goto do_translation_fault;
3283222a
RH
1391 }
1392 level = startlevel;
1393 }
1394
1395 indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1396 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1397
1398 /* Now we can extract the actual base address from the TTBR */
1399 descaddr = extract64(ttbr, 0, 48);
1400
1401 /*
1402 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1403 *
1404 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1405 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1406 * but we've just cleared the bits above 47, so simplify the test.
1407 */
1408 if (outputsize > 48) {
1409 descaddr |= extract64(ttbr, 2, 4) << 48;
1410 } else if (descaddr >> outputsize) {
1411 level = 0;
27c1b81d 1412 fi->type = ARMFault_AddressSize;
3283222a
RH
1413 goto do_fault;
1414 }
1415
1416 /*
1417 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1418 * and also to mask out CnP (bit 0) which could validly be non-zero.
1419 */
1420 descaddr &= ~indexmask;
1421
1422 /*
1423 * For AArch32, the address field in the descriptor goes up to bit 39
1424 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1425 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1426 * bits as part of the address, which will be checked via outputsize.
1427 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1428 * the highest bits of a 52-bit output are placed elsewhere.
1429 */
1430 if (param.ds) {
1431 descaddrmask = MAKE_64BIT_MASK(0, 50);
1432 } else if (arm_feature(env, ARM_FEATURE_V8)) {
1433 descaddrmask = MAKE_64BIT_MASK(0, 48);
1434 } else {
1435 descaddrmask = MAKE_64BIT_MASK(0, 40);
1436 }
1437 descaddrmask &= ~indexmask_grainsize;
1438
1439 /*
21a4ab83 1440 * Secure stage 1 accesses start with the page table in secure memory and
3283222a
RH
1441 * can be downgraded to non-secure at any step. Non-secure accesses
1442 * remain non-secure. We implement this by just ORing in the NSTable/NS
1443 * bits at each step.
21a4ab83 1444 * Stage 2 never gets this kind of downgrade.
3283222a 1445 */
c23f08a5 1446 tableattrs = is_secure ? 0 : (1 << 4);
3283222a 1447
fe4ddc15
RH
1448 next_level:
1449 descaddr |= (address >> (stride * (4 - level))) & indexmask;
1450 descaddr &= ~7ULL;
21a4ab83 1451 nstable = !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1);
d38fa967 1452 if (nstable && ptw->in_secure) {
3283222a 1453 /*
fe4ddc15 1454 * Stage2_S -> Stage2 or Phys_S -> Phys_NS
d38fa967 1455 * Assert the relative order of the secure/non-secure indexes.
3283222a 1456 */
d38fa967
RH
1457 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
1458 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
1459 ptw->in_ptw_idx += 1;
fe4ddc15
RH
1460 ptw->in_secure = false;
1461 }
1462 if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
1463 goto do_fault;
1464 }
1465 descriptor = arm_ldq_ptw(env, ptw, fi);
1466 if (fi->type != ARMFault_None) {
1467 goto do_fault;
1468 }
71943a1e 1469 new_descriptor = descriptor;
3283222a 1470
71943a1e 1471 restart_atomic_update:
fe4ddc15
RH
1472 if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
1473 /* Invalid, or the Reserved level 3 encoding */
27c1b81d 1474 goto do_translation_fault;
fe4ddc15
RH
1475 }
1476
1477 descaddr = descriptor & descaddrmask;
1478
1479 /*
1480 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1481 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1482 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1483 * raise AddressSizeFault.
1484 */
1485 if (outputsize > 48) {
1486 if (param.ds) {
1487 descaddr |= extract64(descriptor, 8, 2) << 50;
1488 } else {
1489 descaddr |= extract64(descriptor, 12, 4) << 48;
3283222a 1490 }
fe4ddc15 1491 } else if (descaddr >> outputsize) {
27c1b81d 1492 fi->type = ARMFault_AddressSize;
fe4ddc15
RH
1493 goto do_fault;
1494 }
1495
1496 if ((descriptor & 2) && (level < 3)) {
3283222a 1497 /*
fe4ddc15
RH
1498 * Table entry. The top five bits are attributes which may
1499 * propagate down through lower levels of the table (and
1500 * which are all arranged so that 0 means "no effect", so
1501 * we can gather them up by ORing in the bits at each level).
3283222a 1502 */
fe4ddc15
RH
1503 tableattrs |= extract64(descriptor, 59, 5);
1504 level++;
1505 indexmask = indexmask_grainsize;
1506 goto next_level;
3283222a 1507 }
fe4ddc15
RH
1508
1509 /*
1510 * Block entry at level 1 or 2, or page entry at level 3.
1511 * These are basically the same thing, although the number
1512 * of bits we pull in from the vaddr varies. Note that although
1513 * descaddrmask masks enough of the low bits of the descriptor
1514 * to give a correct page or table address, the address field
1515 * in a block descriptor is smaller; so we need to explicitly
1516 * clear the lower bits here before ORing in the low vaddr bits.
71943a1e
RH
1517 *
1518 * Afterward, descaddr is the final physical address.
fe4ddc15
RH
1519 */
1520 page_size = (1ULL << ((stride * (4 - level)) + 3));
1521 descaddr &= ~(hwaddr)(page_size - 1);
1522 descaddr |= (address & (page_size - 1));
fe4ddc15 1523
71943a1e
RH
1524 if (likely(!ptw->in_debug)) {
1525 /*
1526 * Access flag.
1527 * If HA is enabled, prepare to update the descriptor below.
1528 * Otherwise, pass the access fault on to software.
1529 */
1530 if (!(descriptor & (1 << 10))) {
1531 if (param.ha) {
1532 new_descriptor |= 1 << 10; /* AF */
1533 } else {
1534 fi->type = ARMFault_AccessFlag;
1535 goto do_fault;
1536 }
1537 }
65c123fd
RH
1538
1539 /*
1540 * Dirty Bit.
1541 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1542 * bit for writeback. The actual write protection test may still be
1543 * overridden by tableattrs, to be merged below.
1544 */
1545 if (param.hd
1546 && extract64(descriptor, 51, 1) /* DBM */
1547 && access_type == MMU_DATA_STORE) {
1548 if (regime_is_stage2(mmu_idx)) {
1549 new_descriptor |= 1ull << 7; /* set S2AP[1] */
1550 } else {
1551 new_descriptor &= ~(1ull << 7); /* clear AP[2] */
1552 }
1553 }
71943a1e
RH
1554 }
1555
fe4ddc15 1556 /*
71943a1e
RH
1557 * Extract attributes from the (modified) descriptor, and apply
1558 * table descriptors. Stage 2 table descriptors do not include
1559 * any attribute fields. HPD disables all the table attributes
1560 * except NSTable.
fe4ddc15 1561 */
71943a1e 1562 attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
34a57fae
RH
1563 if (!regime_is_stage2(mmu_idx)) {
1564 attrs |= nstable << 5; /* NS */
1565 if (!param.hpd) {
1566 attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
1567 /*
1568 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1569 * means "force PL1 access only", which means forcing AP[1] to 0.
1570 */
1571 attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
1572 attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
1573 }
1574 }
fe4ddc15 1575
45666091 1576 ap = extract32(attrs, 6, 2);
edc05dd4 1577 if (regime_is_stage2(mmu_idx)) {
3283222a 1578 ns = mmu_idx == ARMMMUIdx_Stage2;
45666091 1579 xn = extract64(attrs, 53, 2);
7fa7ea8f 1580 result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
3283222a 1581 } else {
45666091
RH
1582 ns = extract32(attrs, 5, 1);
1583 xn = extract64(attrs, 54, 1);
1584 pxn = extract64(attrs, 53, 1);
7fa7ea8f 1585 result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
3283222a
RH
1586 }
1587
7fa7ea8f 1588 if (!(result->f.prot & (1 << access_type))) {
27c1b81d 1589 fi->type = ARMFault_Permission;
3283222a
RH
1590 goto do_fault;
1591 }
1592
71943a1e
RH
1593 /* If FEAT_HAFDBS has made changes, update the PTE. */
1594 if (new_descriptor != descriptor) {
1595 new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
1596 if (fi->type != ARMFault_None) {
1597 goto do_fault;
1598 }
1599 /*
1600 * I_YZSVV says that if the in-memory descriptor has changed,
1601 * then we must use the information in that new value
1602 * (which might include a different output address, different
1603 * attributes, or generate a fault).
1604 * Restart the handling of the descriptor value from scratch.
1605 */
1606 if (new_descriptor != descriptor) {
1607 descriptor = new_descriptor;
1608 goto restart_atomic_update;
1609 }
1610 }
1611
3283222a
RH
1612 if (ns) {
1613 /*
1614 * The NS bit will (as required by the architecture) have no effect if
1615 * the CPU doesn't support TZ or this is a non-secure translation
1616 * regime, because the attribute will already be non-secure.
1617 */
7fa7ea8f 1618 result->f.attrs.secure = false;
3283222a 1619 }
937f2245 1620
edc05dd4 1621 if (regime_is_stage2(mmu_idx)) {
03ee9bbe 1622 result->cacheattrs.is_s2_format = true;
45666091 1623 result->cacheattrs.attrs = extract32(attrs, 2, 4);
3283222a
RH
1624 } else {
1625 /* Index into MAIR registers for cache attributes */
45666091 1626 uint8_t attrindx = extract32(attrs, 2, 3);
3283222a
RH
1627 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1628 assert(attrindx <= 7);
03ee9bbe
RH
1629 result->cacheattrs.is_s2_format = false;
1630 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
6a3b1e44
RH
1631
1632 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
1633 if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
1634 result->f.guarded = extract64(attrs, 50, 1); /* GP */
1635 }
3283222a
RH
1636 }
1637
1638 /*
1639 * For FEAT_LPA2 and effective DS, the SH field in the attributes
1640 * was re-purposed for output address bits. The SH attribute in
1641 * that case comes from TCR_ELx, which we extracted earlier.
1642 */
1643 if (param.ds) {
03ee9bbe 1644 result->cacheattrs.shareability = param.sh;
3283222a 1645 } else {
45666091 1646 result->cacheattrs.shareability = extract32(attrs, 8, 2);
3283222a
RH
1647 }
1648
7fa7ea8f
RH
1649 result->f.phys_addr = descaddr;
1650 result->f.lg_page_size = ctz64(page_size);
3283222a
RH
1651 return false;
1652
27c1b81d
RH
1653 do_translation_fault:
1654 fi->type = ARMFault_Translation;
1655 do_fault:
3283222a
RH
1656 fi->level = level;
1657 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
edc05dd4 1658 fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
3283222a
RH
1659 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1660 return true;
1661}
1662
9a12fb36
RH
1663static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1664 MMUAccessType access_type, ARMMMUIdx mmu_idx,
a5b5092f 1665 bool is_secure, GetPhysAddrResult *result,
9a12fb36
RH
1666 ARMMMUFaultInfo *fi)
1667{
1668 int n;
1669 uint32_t mask;
1670 uint32_t base;
1671 bool is_user = regime_is_user(env, mmu_idx);
1672
7e80c0a4 1673 if (regime_translation_disabled(env, mmu_idx, is_secure)) {
9a12fb36 1674 /* MPU disabled. */
7fa7ea8f
RH
1675 result->f.phys_addr = address;
1676 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9a12fb36
RH
1677 return false;
1678 }
1679
7fa7ea8f 1680 result->f.phys_addr = address;
9a12fb36
RH
1681 for (n = 7; n >= 0; n--) {
1682 base = env->cp15.c6_region[n];
1683 if ((base & 1) == 0) {
1684 continue;
1685 }
1686 mask = 1 << ((base >> 1) & 0x1f);
1687 /* Keep this shift separate from the above to avoid an
1688 (undefined) << 32. */
1689 mask = (mask << 1) - 1;
1690 if (((base ^ address) & ~mask) == 0) {
1691 break;
1692 }
1693 }
1694 if (n < 0) {
1695 fi->type = ARMFault_Background;
1696 return true;
1697 }
1698
1699 if (access_type == MMU_INST_FETCH) {
1700 mask = env->cp15.pmsav5_insn_ap;
1701 } else {
1702 mask = env->cp15.pmsav5_data_ap;
1703 }
1704 mask = (mask >> (n * 4)) & 0xf;
1705 switch (mask) {
1706 case 0:
1707 fi->type = ARMFault_Permission;
1708 fi->level = 1;
1709 return true;
1710 case 1:
1711 if (is_user) {
1712 fi->type = ARMFault_Permission;
1713 fi->level = 1;
1714 return true;
1715 }
7fa7ea8f 1716 result->f.prot = PAGE_READ | PAGE_WRITE;
9a12fb36
RH
1717 break;
1718 case 2:
7fa7ea8f 1719 result->f.prot = PAGE_READ;
9a12fb36 1720 if (!is_user) {
7fa7ea8f 1721 result->f.prot |= PAGE_WRITE;
9a12fb36
RH
1722 }
1723 break;
1724 case 3:
7fa7ea8f 1725 result->f.prot = PAGE_READ | PAGE_WRITE;
9a12fb36
RH
1726 break;
1727 case 5:
1728 if (is_user) {
1729 fi->type = ARMFault_Permission;
1730 fi->level = 1;
1731 return true;
1732 }
7fa7ea8f 1733 result->f.prot = PAGE_READ;
9a12fb36
RH
1734 break;
1735 case 6:
7fa7ea8f 1736 result->f.prot = PAGE_READ;
9a12fb36
RH
1737 break;
1738 default:
1739 /* Bad permission. */
1740 fi->type = ARMFault_Permission;
1741 fi->level = 1;
1742 return true;
1743 }
7fa7ea8f 1744 result->f.prot |= PAGE_EXEC;
9a12fb36
RH
1745 return false;
1746}
1747
fedbaa05 1748static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
7fa7ea8f 1749 int32_t address, uint8_t *prot)
7d2e08c9
RH
1750{
1751 if (!arm_feature(env, ARM_FEATURE_M)) {
1752 *prot = PAGE_READ | PAGE_WRITE;
1753 switch (address) {
1754 case 0xF0000000 ... 0xFFFFFFFF:
1755 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1756 /* hivecs execing is ok */
1757 *prot |= PAGE_EXEC;
1758 }
1759 break;
1760 case 0x00000000 ... 0x7FFFFFFF:
1761 *prot |= PAGE_EXEC;
1762 break;
1763 }
1764 } else {
1765 /* Default system address map for M profile cores.
1766 * The architecture specifies which regions are execute-never;
1767 * at the MPU level no other checks are defined.
1768 */
1769 switch (address) {
1770 case 0x00000000 ... 0x1fffffff: /* ROM */
1771 case 0x20000000 ... 0x3fffffff: /* SRAM */
1772 case 0x60000000 ... 0x7fffffff: /* RAM */
1773 case 0x80000000 ... 0x9fffffff: /* RAM */
1774 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1775 break;
1776 case 0x40000000 ... 0x5fffffff: /* Peripheral */
1777 case 0xa0000000 ... 0xbfffffff: /* Device */
1778 case 0xc0000000 ... 0xdfffffff: /* Device */
1779 case 0xe0000000 ... 0xffffffff: /* System */
1780 *prot = PAGE_READ | PAGE_WRITE;
1781 break;
1782 default:
1783 g_assert_not_reached();
1784 }
1785 }
1786}
1787
47ff5ba9
RH
1788static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1789{
1790 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1791 return arm_feature(env, ARM_FEATURE_M) &&
1792 extract32(address, 20, 12) == 0xe00;
1793}
1794
1795static bool m_is_system_region(CPUARMState *env, uint32_t address)
1796{
1797 /*
1798 * True if address is in the M profile system region
1799 * 0xe0000000 - 0xffffffff
1800 */
1801 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1802}
1803
c8e436c9 1804static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1a469cf7 1805 bool is_secure, bool is_user)
c8e436c9
RH
1806{
1807 /*
1808 * Return true if we should use the default memory map as a
1809 * "background" region if there are no hits against any MPU regions.
1810 */
1811 CPUARMState *env = &cpu->env;
1812
1813 if (is_user) {
1814 return false;
1815 }
1816
1817 if (arm_feature(env, ARM_FEATURE_M)) {
1a469cf7 1818 return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
c8e436c9 1819 }
fca45e34
TR
1820
1821 if (mmu_idx == ARMMMUIdx_Stage2) {
1822 return false;
1823 }
1824
1825 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
c8e436c9
RH
1826}
1827
1f2e87e5
RH
1828static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1829 MMUAccessType access_type, ARMMMUIdx mmu_idx,
957a0bb7 1830 bool secure, GetPhysAddrResult *result,
1f2e87e5
RH
1831 ARMMMUFaultInfo *fi)
1832{
1833 ARMCPU *cpu = env_archcpu(env);
1834 int n;
1835 bool is_user = regime_is_user(env, mmu_idx);
1836
7fa7ea8f
RH
1837 result->f.phys_addr = address;
1838 result->f.lg_page_size = TARGET_PAGE_BITS;
1839 result->f.prot = 0;
1f2e87e5 1840
7e80c0a4 1841 if (regime_translation_disabled(env, mmu_idx, secure) ||
1f2e87e5
RH
1842 m_is_ppb_region(env, address)) {
1843 /*
1844 * MPU disabled or M profile PPB access: use default memory map.
1845 * The other case which uses the default memory map in the
1846 * v7M ARM ARM pseudocode is exception vector reads from the vector
1847 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1848 * which always does a direct read using address_space_ldl(), rather
1849 * than going via this function, so we don't need to check that here.
1850 */
7fa7ea8f 1851 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1f2e87e5
RH
1852 } else { /* MPU enabled */
1853 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1854 /* region search */
1855 uint32_t base = env->pmsav7.drbar[n];
1856 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1857 uint32_t rmask;
1858 bool srdis = false;
1859
1860 if (!(env->pmsav7.drsr[n] & 0x1)) {
1861 continue;
1862 }
1863
1864 if (!rsize) {
1865 qemu_log_mask(LOG_GUEST_ERROR,
1866 "DRSR[%d]: Rsize field cannot be 0\n", n);
1867 continue;
1868 }
1869 rsize++;
1870 rmask = (1ull << rsize) - 1;
1871
1872 if (base & rmask) {
1873 qemu_log_mask(LOG_GUEST_ERROR,
1874 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1875 "to DRSR region size, mask = 0x%" PRIx32 "\n",
1876 n, base, rmask);
1877 continue;
1878 }
1879
1880 if (address < base || address > base + rmask) {
1881 /*
1882 * Address not in this region. We must check whether the
1883 * region covers addresses in the same page as our address.
1884 * In that case we must not report a size that covers the
1885 * whole page for a subsequent hit against a different MPU
1886 * region or the background region, because it would result in
1887 * incorrect TLB hits for subsequent accesses to addresses that
1888 * are in this MPU region.
1889 */
1890 if (ranges_overlap(base, rmask,
1891 address & TARGET_PAGE_MASK,
1892 TARGET_PAGE_SIZE)) {
7fa7ea8f 1893 result->f.lg_page_size = 0;
1f2e87e5
RH
1894 }
1895 continue;
1896 }
1897
1898 /* Region matched */
1899
1900 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1901 int i, snd;
1902 uint32_t srdis_mask;
1903
1904 rsize -= 3; /* sub region size (power of 2) */
1905 snd = ((address - base) >> rsize) & 0x7;
1906 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1907
1908 srdis_mask = srdis ? 0x3 : 0x0;
1909 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1910 /*
1911 * This will check in groups of 2, 4 and then 8, whether
1912 * the subregion bits are consistent. rsize is incremented
1913 * back up to give the region size, considering consistent
1914 * adjacent subregions as one region. Stop testing if rsize
1915 * is already big enough for an entire QEMU page.
1916 */
1917 int snd_rounded = snd & ~(i - 1);
1918 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1919 snd_rounded + 8, i);
1920 if (srdis_mask ^ srdis_multi) {
1921 break;
1922 }
1923 srdis_mask = (srdis_mask << i) | srdis_mask;
1924 rsize++;
1925 }
1926 }
1927 if (srdis) {
1928 continue;
1929 }
1930 if (rsize < TARGET_PAGE_BITS) {
7fa7ea8f 1931 result->f.lg_page_size = rsize;
1f2e87e5
RH
1932 }
1933 break;
1934 }
1935
1936 if (n == -1) { /* no hits */
1a469cf7 1937 if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1f2e87e5
RH
1938 /* background fault */
1939 fi->type = ARMFault_Background;
1940 return true;
1941 }
7fa7ea8f
RH
1942 get_phys_addr_pmsav7_default(env, mmu_idx, address,
1943 &result->f.prot);
1f2e87e5
RH
1944 } else { /* a MPU hit! */
1945 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1946 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1947
1948 if (m_is_system_region(env, address)) {
1949 /* System space is always execute never */
1950 xn = 1;
1951 }
1952
1953 if (is_user) { /* User mode AP bit decoding */
1954 switch (ap) {
1955 case 0:
1956 case 1:
1957 case 5:
1958 break; /* no access */
1959 case 3:
7fa7ea8f 1960 result->f.prot |= PAGE_WRITE;
1f2e87e5
RH
1961 /* fall through */
1962 case 2:
1963 case 6:
7fa7ea8f 1964 result->f.prot |= PAGE_READ | PAGE_EXEC;
1f2e87e5
RH
1965 break;
1966 case 7:
1967 /* for v7M, same as 6; for R profile a reserved value */
1968 if (arm_feature(env, ARM_FEATURE_M)) {
7fa7ea8f 1969 result->f.prot |= PAGE_READ | PAGE_EXEC;
1f2e87e5
RH
1970 break;
1971 }
1972 /* fall through */
1973 default:
1974 qemu_log_mask(LOG_GUEST_ERROR,
1975 "DRACR[%d]: Bad value for AP bits: 0x%"
1976 PRIx32 "\n", n, ap);
1977 }
1978 } else { /* Priv. mode AP bits decoding */
1979 switch (ap) {
1980 case 0:
1981 break; /* no access */
1982 case 1:
1983 case 2:
1984 case 3:
7fa7ea8f 1985 result->f.prot |= PAGE_WRITE;
1f2e87e5
RH
1986 /* fall through */
1987 case 5:
1988 case 6:
7fa7ea8f 1989 result->f.prot |= PAGE_READ | PAGE_EXEC;
1f2e87e5
RH
1990 break;
1991 case 7:
1992 /* for v7M, same as 6; for R profile a reserved value */
1993 if (arm_feature(env, ARM_FEATURE_M)) {
7fa7ea8f 1994 result->f.prot |= PAGE_READ | PAGE_EXEC;
1f2e87e5
RH
1995 break;
1996 }
1997 /* fall through */
1998 default:
1999 qemu_log_mask(LOG_GUEST_ERROR,
2000 "DRACR[%d]: Bad value for AP bits: 0x%"
2001 PRIx32 "\n", n, ap);
2002 }
2003 }
2004
2005 /* execute never */
2006 if (xn) {
7fa7ea8f 2007 result->f.prot &= ~PAGE_EXEC;
1f2e87e5
RH
2008 }
2009 }
2010 }
2011
2012 fi->type = ARMFault_Permission;
2013 fi->level = 1;
7fa7ea8f 2014 return !(result->f.prot & (1 << access_type));
1f2e87e5
RH
2015}
2016
fca45e34
TR
2017static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
2018 uint32_t secure)
2019{
2020 if (regime_el(env, mmu_idx) == 2) {
2021 return env->pmsav8.hprbar;
2022 } else {
2023 return env->pmsav8.rbar[secure];
2024 }
2025}
2026
2027static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
2028 uint32_t secure)
2029{
2030 if (regime_el(env, mmu_idx) == 2) {
2031 return env->pmsav8.hprlar;
2032 } else {
2033 return env->pmsav8.rlar[secure];
2034 }
2035}
2036
fedbaa05
RH
2037bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
2038 MMUAccessType access_type, ARMMMUIdx mmu_idx,
e9fb7090
RH
2039 bool secure, GetPhysAddrResult *result,
2040 ARMMMUFaultInfo *fi, uint32_t *mregion)
fedbaa05
RH
2041{
2042 /*
2043 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2044 * that a full phys-to-virt translation does).
2045 * mregion is (if not NULL) set to the region number which matched,
2046 * or -1 if no region number is returned (MPU off, address did not
2047 * hit a region, address hit in multiple regions).
652c750e
RH
2048 * If the region hit doesn't cover the entire TARGET_PAGE the address
2049 * is within, then we set the result page_size to 1 to force the
2050 * memory system to use a subpage.
fedbaa05
RH
2051 */
2052 ARMCPU *cpu = env_archcpu(env);
2053 bool is_user = regime_is_user(env, mmu_idx);
fedbaa05
RH
2054 int n;
2055 int matchregion = -1;
2056 bool hit = false;
2057 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2058 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
fca45e34
TR
2059 int region_counter;
2060
2061 if (regime_el(env, mmu_idx) == 2) {
2062 region_counter = cpu->pmsav8r_hdregion;
2063 } else {
2064 region_counter = cpu->pmsav7_dregion;
2065 }
fedbaa05 2066
7fa7ea8f
RH
2067 result->f.lg_page_size = TARGET_PAGE_BITS;
2068 result->f.phys_addr = address;
2069 result->f.prot = 0;
fedbaa05
RH
2070 if (mregion) {
2071 *mregion = -1;
2072 }
2073
fca45e34
TR
2074 if (mmu_idx == ARMMMUIdx_Stage2) {
2075 fi->stage2 = true;
2076 }
2077
fedbaa05
RH
2078 /*
2079 * Unlike the ARM ARM pseudocode, we don't need to check whether this
2080 * was an exception vector read from the vector table (which is always
2081 * done using the default system address map), because those accesses
2082 * are done in arm_v7m_load_vector(), which always does a direct
2083 * read using address_space_ldl(), rather than going via this function.
2084 */
7e80c0a4 2085 if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
fedbaa05
RH
2086 hit = true;
2087 } else if (m_is_ppb_region(env, address)) {
2088 hit = true;
2089 } else {
1a469cf7 2090 if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
fedbaa05
RH
2091 hit = true;
2092 }
2093
fca45e34
TR
2094 uint32_t bitmask;
2095 if (arm_feature(env, ARM_FEATURE_M)) {
2096 bitmask = 0x1f;
2097 } else {
2098 bitmask = 0x3f;
2099 fi->level = 0;
2100 }
2101
2102 for (n = region_counter - 1; n >= 0; n--) {
fedbaa05
RH
2103 /* region search */
2104 /*
fca45e34
TR
2105 * Note that the base address is bits [31:x] from the register
2106 * with bits [x-1:0] all zeroes, but the limit address is bits
2107 * [31:x] from the register with bits [x:0] all ones. Where x is
2108 * 5 for Cortex-M and 6 for Cortex-R
fedbaa05 2109 */
fca45e34
TR
2110 uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
2111 uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
fedbaa05 2112
fca45e34 2113 if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
fedbaa05
RH
2114 /* Region disabled */
2115 continue;
2116 }
2117
2118 if (address < base || address > limit) {
2119 /*
2120 * Address not in this region. We must check whether the
2121 * region covers addresses in the same page as our address.
2122 * In that case we must not report a size that covers the
2123 * whole page for a subsequent hit against a different MPU
2124 * region or the background region, because it would result in
2125 * incorrect TLB hits for subsequent accesses to addresses that
2126 * are in this MPU region.
2127 */
2128 if (limit >= base &&
2129 ranges_overlap(base, limit - base + 1,
2130 addr_page_base,
2131 TARGET_PAGE_SIZE)) {
7fa7ea8f 2132 result->f.lg_page_size = 0;
fedbaa05
RH
2133 }
2134 continue;
2135 }
2136
2137 if (base > addr_page_base || limit < addr_page_limit) {
7fa7ea8f 2138 result->f.lg_page_size = 0;
fedbaa05
RH
2139 }
2140
2141 if (matchregion != -1) {
2142 /*
2143 * Multiple regions match -- always a failure (unlike
2144 * PMSAv7 where highest-numbered-region wins)
2145 */
2146 fi->type = ARMFault_Permission;
fca45e34
TR
2147 if (arm_feature(env, ARM_FEATURE_M)) {
2148 fi->level = 1;
2149 }
fedbaa05
RH
2150 return true;
2151 }
2152
2153 matchregion = n;
2154 hit = true;
2155 }
2156 }
2157
2158 if (!hit) {
fca45e34
TR
2159 if (arm_feature(env, ARM_FEATURE_M)) {
2160 fi->type = ARMFault_Background;
2161 } else {
2162 fi->type = ARMFault_Permission;
2163 }
fedbaa05
RH
2164 return true;
2165 }
2166
2167 if (matchregion == -1) {
2168 /* hit using the background region */
7fa7ea8f 2169 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
fedbaa05 2170 } else {
fca45e34
TR
2171 uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
2172 uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
2173 uint32_t ap = extract32(matched_rbar, 1, 2);
2174 uint32_t xn = extract32(matched_rbar, 0, 1);
fedbaa05
RH
2175 bool pxn = false;
2176
2177 if (arm_feature(env, ARM_FEATURE_V8_1M)) {
fca45e34 2178 pxn = extract32(matched_rlar, 4, 1);
fedbaa05
RH
2179 }
2180
2181 if (m_is_system_region(env, address)) {
2182 /* System space is always execute never */
2183 xn = 1;
2184 }
2185
fca45e34
TR
2186 if (regime_el(env, mmu_idx) == 2) {
2187 result->f.prot = simple_ap_to_rw_prot_is_user(ap,
2188 mmu_idx != ARMMMUIdx_E2);
2189 } else {
2190 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2191 }
2192
2193 if (!arm_feature(env, ARM_FEATURE_M)) {
2194 uint8_t attrindx = extract32(matched_rlar, 1, 3);
2195 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
2196 uint8_t sh = extract32(matched_rlar, 3, 2);
2197
2198 if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
2199 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
2200 xn = 0x1;
2201 }
2202
2203 if ((regime_el(env, mmu_idx) == 1) &&
2204 regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
2205 pxn = 0x1;
2206 }
2207
2208 result->cacheattrs.is_s2_format = false;
2209 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
2210 result->cacheattrs.shareability = sh;
2211 }
2212
7fa7ea8f
RH
2213 if (result->f.prot && !xn && !(pxn && !is_user)) {
2214 result->f.prot |= PAGE_EXEC;
fedbaa05 2215 }
fca45e34 2216
fedbaa05
RH
2217 if (mregion) {
2218 *mregion = matchregion;
2219 }
2220 }
2221
2222 fi->type = ARMFault_Permission;
fca45e34
TR
2223 if (arm_feature(env, ARM_FEATURE_M)) {
2224 fi->level = 1;
2225 }
7fa7ea8f 2226 return !(result->f.prot & (1 << access_type));
fedbaa05
RH
2227}
2228
2c1f429d
RH
2229static bool v8m_is_sau_exempt(CPUARMState *env,
2230 uint32_t address, MMUAccessType access_type)
2231{
2232 /*
2233 * The architecture specifies that certain address ranges are
2234 * exempt from v8M SAU/IDAU checks.
2235 */
2236 return
2237 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
2238 (address >= 0xe0000000 && address <= 0xe0002fff) ||
2239 (address >= 0xe000e000 && address <= 0xe000efff) ||
2240 (address >= 0xe002e000 && address <= 0xe002efff) ||
2241 (address >= 0xe0040000 && address <= 0xe0041fff) ||
2242 (address >= 0xe00ff000 && address <= 0xe00fffff);
2243}
2244
2245void v8m_security_lookup(CPUARMState *env, uint32_t address,
dbf2a71a
RH
2246 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2247 bool is_secure, V8M_SAttributes *sattrs)
2c1f429d
RH
2248{
2249 /*
2250 * Look up the security attributes for this address. Compare the
2251 * pseudocode SecurityCheck() function.
2252 * We assume the caller has zero-initialized *sattrs.
2253 */
2254 ARMCPU *cpu = env_archcpu(env);
2255 int r;
2256 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
2257 int idau_region = IREGION_NOTVALID;
2258 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2259 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2260
2261 if (cpu->idau) {
2262 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
2263 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
2264
2265 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
2266 &idau_nsc);
2267 }
2268
2269 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
2270 /* 0xf0000000..0xffffffff is always S for insn fetches */
2271 return;
2272 }
2273
2274 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
dbf2a71a 2275 sattrs->ns = !is_secure;
2c1f429d
RH
2276 return;
2277 }
2278
2279 if (idau_region != IREGION_NOTVALID) {
2280 sattrs->irvalid = true;
2281 sattrs->iregion = idau_region;
2282 }
2283
2284 switch (env->sau.ctrl & 3) {
2285 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2286 break;
2287 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2288 sattrs->ns = true;
2289 break;
2290 default: /* SAU.ENABLE == 1 */
2291 for (r = 0; r < cpu->sau_sregion; r++) {
2292 if (env->sau.rlar[r] & 1) {
2293 uint32_t base = env->sau.rbar[r] & ~0x1f;
2294 uint32_t limit = env->sau.rlar[r] | 0x1f;
2295
2296 if (base <= address && limit >= address) {
2297 if (base > addr_page_base || limit < addr_page_limit) {
2298 sattrs->subpage = true;
2299 }
2300 if (sattrs->srvalid) {
2301 /*
2302 * If we hit in more than one region then we must report
2303 * as Secure, not NS-Callable, with no valid region
2304 * number info.
2305 */
2306 sattrs->ns = false;
2307 sattrs->nsc = false;
2308 sattrs->sregion = 0;
2309 sattrs->srvalid = false;
2310 break;
2311 } else {
2312 if (env->sau.rlar[r] & 2) {
2313 sattrs->nsc = true;
2314 } else {
2315 sattrs->ns = true;
2316 }
2317 sattrs->srvalid = true;
2318 sattrs->sregion = r;
2319 }
2320 } else {
2321 /*
2322 * Address not in this region. We must check whether the
2323 * region covers addresses in the same page as our address.
2324 * In that case we must not report a size that covers the
2325 * whole page for a subsequent hit against a different MPU
2326 * region or the background region, because it would result
2327 * in incorrect TLB hits for subsequent accesses to
2328 * addresses that are in this MPU region.
2329 */
2330 if (limit >= base &&
2331 ranges_overlap(base, limit - base + 1,
2332 addr_page_base,
2333 TARGET_PAGE_SIZE)) {
2334 sattrs->subpage = true;
2335 }
2336 }
2337 }
2338 }
2339 break;
2340 }
2341
2342 /*
2343 * The IDAU will override the SAU lookup results if it specifies
2344 * higher security than the SAU does.
2345 */
2346 if (!idau_ns) {
2347 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
2348 sattrs->ns = false;
2349 sattrs->nsc = idau_nsc;
2350 }
2351 }
2352}
2353
730d5c31
RH
2354static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
2355 MMUAccessType access_type, ARMMMUIdx mmu_idx,
be0ca948 2356 bool secure, GetPhysAddrResult *result,
730d5c31
RH
2357 ARMMMUFaultInfo *fi)
2358{
730d5c31
RH
2359 V8M_SAttributes sattrs = {};
2360 bool ret;
730d5c31
RH
2361
2362 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
dbf2a71a
RH
2363 v8m_security_lookup(env, address, access_type, mmu_idx,
2364 secure, &sattrs);
730d5c31
RH
2365 if (access_type == MMU_INST_FETCH) {
2366 /*
2367 * Instruction fetches always use the MMU bank and the
2368 * transaction attribute determined by the fetch address,
2369 * regardless of CPU state. This is painful for QEMU
2370 * to handle, because it would mean we need to encode
2371 * into the mmu_idx not just the (user, negpri) information
2372 * for the current security state but also that for the
2373 * other security state, which would balloon the number
2374 * of mmu_idx values needed alarmingly.
2375 * Fortunately we can avoid this because it's not actually
2376 * possible to arbitrarily execute code from memory with
2377 * the wrong security attribute: it will always generate
2378 * an exception of some kind or another, apart from the
2379 * special case of an NS CPU executing an SG instruction
2380 * in S&NSC memory. So we always just fail the translation
2381 * here and sort things out in the exception handler
2382 * (including possibly emulating an SG instruction).
2383 */
2384 if (sattrs.ns != !secure) {
2385 if (sattrs.nsc) {
2386 fi->type = ARMFault_QEMU_NSCExec;
2387 } else {
2388 fi->type = ARMFault_QEMU_SFault;
2389 }
7fa7ea8f
RH
2390 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2391 result->f.phys_addr = address;
2392 result->f.prot = 0;
730d5c31
RH
2393 return true;
2394 }
2395 } else {
2396 /*
2397 * For data accesses we always use the MMU bank indicated
2398 * by the current CPU state, but the security attributes
2399 * might downgrade a secure access to nonsecure.
2400 */
2401 if (sattrs.ns) {
7fa7ea8f 2402 result->f.attrs.secure = false;
730d5c31
RH
2403 } else if (!secure) {
2404 /*
2405 * NS access to S memory must fault.
2406 * Architecturally we should first check whether the
2407 * MPU information for this address indicates that we
2408 * are doing an unaligned access to Device memory, which
2409 * should generate a UsageFault instead. QEMU does not
2410 * currently check for that kind of unaligned access though.
2411 * If we added it we would need to do so as a special case
2412 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2413 */
2414 fi->type = ARMFault_QEMU_SFault;
7fa7ea8f
RH
2415 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2416 result->f.phys_addr = address;
2417 result->f.prot = 0;
730d5c31
RH
2418 return true;
2419 }
2420 }
2421 }
2422
e9fb7090 2423 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
652c750e
RH
2424 result, fi, NULL);
2425 if (sattrs.subpage) {
7fa7ea8f 2426 result->f.lg_page_size = 0;
652c750e 2427 }
730d5c31
RH
2428 return ret;
2429}
2430
966f4bb7
RH
2431/*
2432 * Translate from the 4-bit stage 2 representation of
2433 * memory attributes (without cache-allocation hints) to
2434 * the 8-bit representation of the stage 1 MAIR registers
2435 * (which includes allocation hints).
2436 *
2437 * ref: shared/translation/attrs/S2AttrDecode()
2438 * .../S2ConvertAttrsHints()
2439 */
ac76c2e5 2440static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
966f4bb7
RH
2441{
2442 uint8_t hiattr = extract32(s2attrs, 2, 2);
2443 uint8_t loattr = extract32(s2attrs, 0, 2);
2444 uint8_t hihint = 0, lohint = 0;
2445
2446 if (hiattr != 0) { /* normal memory */
ac76c2e5 2447 if (hcr & HCR_CD) { /* cache disabled */
966f4bb7
RH
2448 hiattr = loattr = 1; /* non-cacheable */
2449 } else {
2450 if (hiattr != 1) { /* Write-through or write-back */
2451 hihint = 3; /* RW allocate */
2452 }
2453 if (loattr != 1) { /* Write-through or write-back */
2454 lohint = 3; /* RW allocate */
2455 }
2456 }
2457 }
2458
2459 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2460}
2461
2462/*
2463 * Combine either inner or outer cacheability attributes for normal
2464 * memory, according to table D4-42 and pseudocode procedure
2465 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2466 *
2467 * NB: only stage 1 includes allocation hints (RW bits), leading to
2468 * some asymmetry.
2469 */
2470static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2471{
2472 if (s1 == 4 || s2 == 4) {
2473 /* non-cacheable has precedence */
2474 return 4;
2475 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2476 /* stage 1 write-through takes precedence */
2477 return s1;
2478 } else if (extract32(s2, 2, 2) == 2) {
2479 /* stage 2 write-through takes precedence, but the allocation hint
2480 * is still taken from stage 1
2481 */
2482 return (2 << 2) | extract32(s1, 0, 2);
2483 } else { /* write-back */
2484 return s1;
2485 }
2486}
2487
2488/*
2489 * Combine the memory type and cacheability attributes of
2490 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2491 * combined attributes in MAIR_EL1 format.
2492 */
ac76c2e5 2493static uint8_t combined_attrs_nofwb(uint64_t hcr,
966f4bb7
RH
2494 ARMCacheAttrs s1, ARMCacheAttrs s2)
2495{
2496 uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2497
faa1451e
TR
2498 if (s2.is_s2_format) {
2499 s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2500 } else {
2501 s2_mair_attrs = s2.attrs;
2502 }
966f4bb7
RH
2503
2504 s1lo = extract32(s1.attrs, 0, 4);
2505 s2lo = extract32(s2_mair_attrs, 0, 4);
2506 s1hi = extract32(s1.attrs, 4, 4);
2507 s2hi = extract32(s2_mair_attrs, 4, 4);
2508
2509 /* Combine memory type and cacheability attributes */
2510 if (s1hi == 0 || s2hi == 0) {
2511 /* Device has precedence over normal */
2512 if (s1lo == 0 || s2lo == 0) {
2513 /* nGnRnE has precedence over anything */
2514 ret_attrs = 0;
2515 } else if (s1lo == 4 || s2lo == 4) {
2516 /* non-Reordering has precedence over Reordering */
2517 ret_attrs = 4; /* nGnRE */
2518 } else if (s1lo == 8 || s2lo == 8) {
2519 /* non-Gathering has precedence over Gathering */
2520 ret_attrs = 8; /* nGRE */
2521 } else {
2522 ret_attrs = 0xc; /* GRE */
2523 }
2524 } else { /* Normal memory */
2525 /* Outer/inner cacheability combine independently */
2526 ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2527 | combine_cacheattr_nibble(s1lo, s2lo);
2528 }
2529 return ret_attrs;
2530}
2531
2532static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2533{
2534 /*
2535 * Given the 4 bits specifying the outer or inner cacheability
2536 * in MAIR format, return a value specifying Normal Write-Back,
2537 * with the allocation and transient hints taken from the input
2538 * if the input specified some kind of cacheable attribute.
2539 */
2540 if (attr == 0 || attr == 4) {
2541 /*
2542 * 0 == an UNPREDICTABLE encoding
2543 * 4 == Non-cacheable
2544 * Either way, force Write-Back RW allocate non-transient
2545 */
2546 return 0xf;
2547 }
2548 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2549 return attr | 4;
2550}
2551
2552/*
2553 * Combine the memory type and cacheability attributes of
2554 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2555 * combined attributes in MAIR_EL1 format.
2556 */
72cef09c 2557static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
966f4bb7 2558{
faa1451e
TR
2559 assert(s2.is_s2_format && !s1.is_s2_format);
2560
966f4bb7
RH
2561 switch (s2.attrs) {
2562 case 7:
2563 /* Use stage 1 attributes */
2564 return s1.attrs;
2565 case 6:
2566 /*
2567 * Force Normal Write-Back. Note that if S1 is Normal cacheable
2568 * then we take the allocation hints from it; otherwise it is
2569 * RW allocate, non-transient.
2570 */
2571 if ((s1.attrs & 0xf0) == 0) {
2572 /* S1 is Device */
2573 return 0xff;
2574 }
2575 /* Need to check the Inner and Outer nibbles separately */
2576 return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2577 force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2578 case 5:
2579 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2580 if ((s1.attrs & 0xf0) == 0) {
2581 return s1.attrs;
2582 }
2583 return 0x44;
2584 case 0 ... 3:
2585 /* Force Device, of subtype specified by S2 */
2586 return s2.attrs << 2;
2587 default:
2588 /*
2589 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2590 * arbitrarily force Device.
2591 */
2592 return 0;
2593 }
2594}
2595
2596/*
2597 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2598 * and CombineS1S2Desc()
2599 *
2600 * @env: CPUARMState
2601 * @s1: Attributes from stage 1 walk
2602 * @s2: Attributes from stage 2 walk
2603 */
ac76c2e5 2604static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
966f4bb7
RH
2605 ARMCacheAttrs s1, ARMCacheAttrs s2)
2606{
2607 ARMCacheAttrs ret;
2608 bool tagged = false;
2609
faa1451e 2610 assert(!s1.is_s2_format);
966f4bb7 2611 ret.is_s2_format = false;
8539dc00 2612 ret.guarded = s1.guarded;
966f4bb7
RH
2613
2614 if (s1.attrs == 0xf0) {
2615 tagged = true;
2616 s1.attrs = 0xff;
2617 }
2618
2619 /* Combine shareability attributes (table D4-43) */
2620 if (s1.shareability == 2 || s2.shareability == 2) {
2621 /* if either are outer-shareable, the result is outer-shareable */
2622 ret.shareability = 2;
2623 } else if (s1.shareability == 3 || s2.shareability == 3) {
2624 /* if either are inner-shareable, the result is inner-shareable */
2625 ret.shareability = 3;
2626 } else {
2627 /* both non-shareable */
2628 ret.shareability = 0;
2629 }
2630
2631 /* Combine memory type and cacheability attributes */
ac76c2e5 2632 if (hcr & HCR_FWB) {
72cef09c 2633 ret.attrs = combined_attrs_fwb(s1, s2);
966f4bb7 2634 } else {
ac76c2e5 2635 ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
966f4bb7
RH
2636 }
2637
2638 /*
2639 * Any location for which the resultant memory type is any
2640 * type of Device memory is always treated as Outer Shareable.
2641 * Any location for which the resultant memory type is Normal
2642 * Inner Non-cacheable, Outer Non-cacheable is always treated
2643 * as Outer Shareable.
2644 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2645 */
2646 if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2647 ret.shareability = 2;
2648 }
2649
2650 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2651 if (tagged && ret.attrs == 0xff) {
2652 ret.attrs = 0xf0;
2653 }
2654
2655 return ret;
2656}
2657
448e42fd
RH
2658/*
2659 * MMU disabled. S1 addresses within aa64 translation regimes are
2660 * still checked for bounds -- see AArch64.S1DisabledOutput().
2661 */
2662static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2663 MMUAccessType access_type,
2664 ARMMMUIdx mmu_idx, bool is_secure,
2665 GetPhysAddrResult *result,
2666 ARMMMUFaultInfo *fi)
2667{
5b74f9b4
RH
2668 uint8_t memattr = 0x00; /* Device nGnRnE */
2669 uint8_t shareability = 0; /* non-sharable */
a1ce3084 2670 int r_el;
448e42fd 2671
a1ce3084
RH
2672 switch (mmu_idx) {
2673 case ARMMMUIdx_Stage2:
2674 case ARMMMUIdx_Stage2_S:
2675 case ARMMMUIdx_Phys_NS:
2676 case ARMMMUIdx_Phys_S:
2677 break;
5b74f9b4 2678
a1ce3084
RH
2679 default:
2680 r_el = regime_el(env, mmu_idx);
448e42fd
RH
2681 if (arm_el_is_aa64(env, r_el)) {
2682 int pamax = arm_pamax(env_archcpu(env));
2683 uint64_t tcr = env->cp15.tcr_el[r_el];
2684 int addrtop, tbi;
2685
2686 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2687 if (access_type == MMU_INST_FETCH) {
2688 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2689 }
2690 tbi = (tbi >> extract64(address, 55, 1)) & 1;
2691 addrtop = (tbi ? 55 : 63);
2692
2693 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2694 fi->type = ARMFault_AddressSize;
2695 fi->level = 0;
2696 fi->stage2 = false;
2697 return 1;
2698 }
2699
2700 /*
2701 * When TBI is disabled, we've just validated that all of the
2702 * bits above PAMax are zero, so logically we only need to
2703 * clear the top byte for TBI. But it's clearer to follow
2704 * the pseudocode set of addrdesc.paddress.
2705 */
2706 address = extract64(address, 0, 52);
2707 }
5b74f9b4
RH
2708
2709 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2710 if (r_el == 1) {
2711 uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2712 if (hcr & HCR_DC) {
2713 if (hcr & HCR_DCT) {
2714 memattr = 0xf0; /* Tagged, Normal, WB, RWA */
2715 } else {
2716 memattr = 0xff; /* Normal, WB, RWA */
2717 }
2718 }
2719 }
2720 if (memattr == 0 && access_type == MMU_INST_FETCH) {
2721 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2722 memattr = 0xee; /* Normal, WT, RA, NT */
2723 } else {
2724 memattr = 0x44; /* Normal, NC, No */
2725 }
2726 shareability = 2; /* outer sharable */
2727 }
2728 result->cacheattrs.is_s2_format = false;
a1ce3084 2729 break;
448e42fd
RH
2730 }
2731
7fa7ea8f
RH
2732 result->f.phys_addr = address;
2733 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2734 result->f.lg_page_size = TARGET_PAGE_BITS;
5b74f9b4 2735 result->cacheattrs.shareability = shareability;
448e42fd 2736 result->cacheattrs.attrs = memattr;
6b72c542 2737 return false;
448e42fd
RH
2738}
2739
3f5a74c5
RH
2740static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
2741 target_ulong address,
2742 MMUAccessType access_type,
2743 GetPhysAddrResult *result,
2744 ARMMMUFaultInfo *fi)
2745{
2746 hwaddr ipa;
c8d6c286 2747 int s1_prot, s1_lgpgsz;
3f5a74c5 2748 bool is_secure = ptw->in_secure;
fcc0b041 2749 bool ret, ipa_secure;
3f5a74c5
RH
2750 ARMCacheAttrs cacheattrs1;
2751 bool is_el0;
2752 uint64_t hcr;
2753
2754 ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
2755
26ba00cf
PM
2756 /* If S1 fails, return early. */
2757 if (ret) {
3f5a74c5
RH
2758 return ret;
2759 }
2760
2761 ipa = result->f.phys_addr;
2762 ipa_secure = result->f.attrs.secure;
3f5a74c5
RH
2763
2764 is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
fcc0b041
PM
2765 ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2766 ptw->in_secure = ipa_secure;
2767 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
3f5a74c5
RH
2768
2769 /*
2770 * S1 is done, now do S2 translation.
2771 * Save the stage1 results so that we may merge prot and cacheattrs later.
2772 */
2773 s1_prot = result->f.prot;
c8d6c286 2774 s1_lgpgsz = result->f.lg_page_size;
3f5a74c5
RH
2775 cacheattrs1 = result->cacheattrs;
2776 memset(result, 0, sizeof(*result));
2777
fca45e34
TR
2778 if (arm_feature(env, ARM_FEATURE_PMSA)) {
2779 ret = get_phys_addr_pmsav8(env, ipa, access_type,
2780 ptw->in_mmu_idx, is_secure, result, fi);
2781 } else {
2782 ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
2783 is_el0, result, fi);
2784 }
3f5a74c5
RH
2785 fi->s2addr = ipa;
2786
2787 /* Combine the S1 and S2 perms. */
2788 result->f.prot &= s1_prot;
2789
2790 /* If S2 fails, return early. */
2791 if (ret) {
2792 return ret;
2793 }
2794
c8d6c286 2795 /*
9e65f4e6
PM
2796 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
2797 * this means "don't put this in the TLB"; in this case, return a
2798 * result with lg_page_size == 0 to achieve that. Otherwise,
2799 * use the maximum of the S1 & S2 page size, so that invalidation
2800 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
2801 * we know the combined result permissions etc only cover the minimum
2802 * of the S1 and S2 page size, because we know that the common TLB code
2803 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
2804 * and passing a larger page size value only affects invalidations.)
c8d6c286 2805 */
9e65f4e6
PM
2806 if (result->f.lg_page_size < TARGET_PAGE_BITS ||
2807 s1_lgpgsz < TARGET_PAGE_BITS) {
2808 result->f.lg_page_size = 0;
2809 } else if (result->f.lg_page_size < s1_lgpgsz) {
c8d6c286
RH
2810 result->f.lg_page_size = s1_lgpgsz;
2811 }
2812
3f5a74c5
RH
2813 /* Combine the S1 and S2 cache attributes. */
2814 hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2815 if (hcr & HCR_DC) {
2816 /*
2817 * HCR.DC forces the first stage attributes to
2818 * Normal Non-Shareable,
2819 * Inner Write-Back Read-Allocate Write-Allocate,
2820 * Outer Write-Back Read-Allocate Write-Allocate.
2821 * Do not overwrite Tagged within attrs.
2822 */
2823 if (cacheattrs1.attrs != 0xf0) {
2824 cacheattrs1.attrs = 0xff;
2825 }
2826 cacheattrs1.shareability = 0;
2827 }
2828 result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
2829 result->cacheattrs);
2830
2831 /*
2832 * Check if IPA translates to secure or non-secure PA space.
2833 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2834 */
2835 result->f.attrs.secure =
2836 (is_secure
2837 && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
2838 && (ipa_secure
2839 || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
2840
6b72c542 2841 return false;
3f5a74c5
RH
2842}
2843
4a358556
RH
2844static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
2845 target_ulong address,
2846 MMUAccessType access_type,
2847 GetPhysAddrResult *result,
2848 ARMMMUFaultInfo *fi)
8ae08860 2849{
4a358556 2850 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
4a358556 2851 bool is_secure = ptw->in_secure;
48da29e4 2852 ARMMMUIdx s1_mmu_idx;
8ae08860 2853
cead7fa4
RH
2854 /*
2855 * The page table entries may downgrade secure to non-secure, but
2856 * cannot upgrade an non-secure translation regime's attributes
2857 * to secure.
2858 */
2859 result->f.attrs.secure = is_secure;
2860
48da29e4
RH
2861 switch (mmu_idx) {
2862 case ARMMMUIdx_Phys_S:
2863 case ARMMMUIdx_Phys_NS:
2864 /* Checking Phys early avoids special casing later vs regime_el. */
2865 return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2866 is_secure, result, fi);
2867
2868 case ARMMMUIdx_Stage1_E0:
2869 case ARMMMUIdx_Stage1_E1:
2870 case ARMMMUIdx_Stage1_E1_PAN:
2871 /* First stage lookup uses second stage for ptw. */
2872 ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2873 break;
2874
fcc0b041
PM
2875 case ARMMMUIdx_Stage2:
2876 case ARMMMUIdx_Stage2_S:
2877 /*
2878 * Second stage lookup uses physical for ptw; whether this is S or
2879 * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
2880 * the Secure EL2&0 regime.
2881 */
2882 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
2883 break;
2884
48da29e4
RH
2885 case ARMMMUIdx_E10_0:
2886 s1_mmu_idx = ARMMMUIdx_Stage1_E0;
2887 goto do_twostage;
2888 case ARMMMUIdx_E10_1:
2889 s1_mmu_idx = ARMMMUIdx_Stage1_E1;
2890 goto do_twostage;
2891 case ARMMMUIdx_E10_1_PAN:
2892 s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
2893 do_twostage:
8ae08860
RH
2894 /*
2895 * Call ourselves recursively to do the stage 1 and then stage 2
3f5a74c5
RH
2896 * translations if mmu_idx is a two-stage regime, and EL2 present.
2897 * Otherwise, a stage1+stage2 translation is just stage 1.
8ae08860 2898 */
3f5a74c5 2899 ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
26ba00cf
PM
2900 if (arm_feature(env, ARM_FEATURE_EL2) &&
2901 !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
3f5a74c5
RH
2902 return get_phys_addr_twostage(env, ptw, address, access_type,
2903 result, fi);
8ae08860 2904 }
48da29e4
RH
2905 /* fall through */
2906
2907 default:
fcc0b041 2908 /* Single stage uses physical for ptw. */
48da29e4
RH
2909 ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
2910 break;
8ae08860
RH
2911 }
2912
7fa7ea8f 2913 result->f.attrs.user = regime_is_user(env, mmu_idx);
8ae08860
RH
2914
2915 /*
2916 * Fast Context Switch Extension. This doesn't exist at all in v8.
2917 * In v7 and earlier it affects all stage 1 translations.
2918 */
2919 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2920 && !arm_feature(env, ARM_FEATURE_V8)) {
2921 if (regime_el(env, mmu_idx) == 3) {
2922 address += env->cp15.fcseidr_s;
2923 } else {
2924 address += env->cp15.fcseidr_ns;
2925 }
2926 }
2927
2928 if (arm_feature(env, ARM_FEATURE_PMSA)) {
2929 bool ret;
7fa7ea8f 2930 result->f.lg_page_size = TARGET_PAGE_BITS;
8ae08860
RH
2931
2932 if (arm_feature(env, ARM_FEATURE_V8)) {
2933 /* PMSAv8 */
2934 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
be0ca948 2935 is_secure, result, fi);
8ae08860
RH
2936 } else if (arm_feature(env, ARM_FEATURE_V7)) {
2937 /* PMSAv7 */
2938 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
957a0bb7 2939 is_secure, result, fi);
8ae08860
RH
2940 } else {
2941 /* Pre-v7 MPU */
2942 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
a5b5092f 2943 is_secure, result, fi);
8ae08860
RH
2944 }
2945 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2946 " mmu_idx %u -> %s (prot %c%c%c)\n",
2947 access_type == MMU_DATA_LOAD ? "reading" :
2948 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2949 (uint32_t)address, mmu_idx,
2950 ret ? "Miss" : "Hit",
7fa7ea8f
RH
2951 result->f.prot & PAGE_READ ? 'r' : '-',
2952 result->f.prot & PAGE_WRITE ? 'w' : '-',
2953 result->f.prot & PAGE_EXEC ? 'x' : '-');
8ae08860
RH
2954
2955 return ret;
2956 }
2957
2958 /* Definitely a real MMU, not an MPU */
2959
7e80c0a4 2960 if (regime_translation_disabled(env, mmu_idx, is_secure)) {
448e42fd
RH
2961 return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2962 is_secure, result, fi);
8ae08860 2963 }
6d2654ff 2964
8ae08860 2965 if (regime_using_lpae_format(env, mmu_idx)) {
4a358556 2966 return get_phys_addr_lpae(env, ptw, address, access_type, false,
6d2654ff 2967 result, fi);
6f2d9d74
TK
2968 } else if (arm_feature(env, ARM_FEATURE_V7) ||
2969 regime_sctlr(env, mmu_idx) & SCTLR_XP) {
4a358556 2970 return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
8ae08860 2971 } else {
4a358556 2972 return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
8ae08860
RH
2973 }
2974}
23971205 2975
4a358556
RH
2976bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
2977 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2978 bool is_secure, GetPhysAddrResult *result,
2979 ARMMMUFaultInfo *fi)
2980{
2981 S1Translate ptw = {
2982 .in_mmu_idx = mmu_idx,
2983 .in_secure = is_secure,
2984 };
2985 return get_phys_addr_with_struct(env, &ptw, address, access_type,
2986 result, fi);
2987}
2988
def8aa5b
RH
2989bool get_phys_addr(CPUARMState *env, target_ulong address,
2990 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2991 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2992{
03bea66e
RH
2993 bool is_secure;
2994
2995 switch (mmu_idx) {
2996 case ARMMMUIdx_E10_0:
2997 case ARMMMUIdx_E10_1:
2998 case ARMMMUIdx_E10_1_PAN:
2999 case ARMMMUIdx_E20_0:
3000 case ARMMMUIdx_E20_2:
3001 case ARMMMUIdx_E20_2_PAN:
3002 case ARMMMUIdx_Stage1_E0:
3003 case ARMMMUIdx_Stage1_E1:
3004 case ARMMMUIdx_Stage1_E1_PAN:
3005 case ARMMMUIdx_E2:
d902ae75
RH
3006 is_secure = arm_is_secure_below_el3(env);
3007 break;
03bea66e 3008 case ARMMMUIdx_Stage2:
a1ce3084 3009 case ARMMMUIdx_Phys_NS:
03bea66e
RH
3010 case ARMMMUIdx_MPrivNegPri:
3011 case ARMMMUIdx_MUserNegPri:
3012 case ARMMMUIdx_MPriv:
3013 case ARMMMUIdx_MUser:
3014 is_secure = false;
3015 break;
d902ae75 3016 case ARMMMUIdx_E3:
03bea66e 3017 case ARMMMUIdx_Stage2_S:
a1ce3084 3018 case ARMMMUIdx_Phys_S:
03bea66e
RH
3019 case ARMMMUIdx_MSPrivNegPri:
3020 case ARMMMUIdx_MSUserNegPri:
3021 case ARMMMUIdx_MSPriv:
3022 case ARMMMUIdx_MSUser:
3023 is_secure = true;
3024 break;
3025 default:
3026 g_assert_not_reached();
3027 }
def8aa5b 3028 return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
03bea66e 3029 is_secure, result, fi);
def8aa5b
RH
3030}
3031
23971205
RH
3032hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
3033 MemTxAttrs *attrs)
3034{
3035 ARMCPU *cpu = ARM_CPU(cs);
3036 CPUARMState *env = &cpu->env;
4a358556
RH
3037 S1Translate ptw = {
3038 .in_mmu_idx = arm_mmu_idx(env),
3039 .in_secure = arm_is_secure(env),
3040 .in_debug = true,
3041 };
de05a709 3042 GetPhysAddrResult res = {};
23971205 3043 ARMMMUFaultInfo fi = {};
de05a709 3044 bool ret;
23971205 3045
4a358556 3046 ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
7fa7ea8f 3047 *attrs = res.f.attrs;
23971205
RH
3048
3049 if (ret) {
3050 return -1;
3051 }
7fa7ea8f 3052 return res.f.phys_addr;
23971205 3053}