]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/ptw.c
target/arm: Move get_phys_addr_pmsav7_default to ptw.c
[mirror_qemu.git] / target / arm / ptw.c
CommitLineData
8ae08860
RH
1/*
2 * ARM page table walking.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9#include "qemu/osdep.h"
10#include "qemu/log.h"
11#include "cpu.h"
12#include "internals.h"
13#include "ptw.h"
14
15
f2d2f5ce
RH
16static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
17 MMUAccessType access_type, ARMMMUIdx mmu_idx,
18 hwaddr *phys_ptr, int *prot,
19 target_ulong *page_size,
20 ARMMMUFaultInfo *fi)
21{
22 CPUState *cs = env_cpu(env);
23 int level = 1;
24 uint32_t table;
25 uint32_t desc;
26 int type;
27 int ap;
28 int domain = 0;
29 int domain_prot;
30 hwaddr phys_addr;
31 uint32_t dacr;
32
33 /* Pagetable walk. */
34 /* Lookup l1 descriptor. */
35 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
36 /* Section translation fault if page walk is disabled by PD0 or PD1 */
37 fi->type = ARMFault_Translation;
38 goto do_fault;
39 }
40 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
41 mmu_idx, fi);
42 if (fi->type != ARMFault_None) {
43 goto do_fault;
44 }
45 type = (desc & 3);
46 domain = (desc >> 5) & 0x0f;
47 if (regime_el(env, mmu_idx) == 1) {
48 dacr = env->cp15.dacr_ns;
49 } else {
50 dacr = env->cp15.dacr_s;
51 }
52 domain_prot = (dacr >> (domain * 2)) & 3;
53 if (type == 0) {
54 /* Section translation fault. */
55 fi->type = ARMFault_Translation;
56 goto do_fault;
57 }
58 if (type != 2) {
59 level = 2;
60 }
61 if (domain_prot == 0 || domain_prot == 2) {
62 fi->type = ARMFault_Domain;
63 goto do_fault;
64 }
65 if (type == 2) {
66 /* 1Mb section. */
67 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
68 ap = (desc >> 10) & 3;
69 *page_size = 1024 * 1024;
70 } else {
71 /* Lookup l2 entry. */
72 if (type == 1) {
73 /* Coarse pagetable. */
74 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
75 } else {
76 /* Fine pagetable. */
77 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
78 }
79 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
80 mmu_idx, fi);
81 if (fi->type != ARMFault_None) {
82 goto do_fault;
83 }
84 switch (desc & 3) {
85 case 0: /* Page translation fault. */
86 fi->type = ARMFault_Translation;
87 goto do_fault;
88 case 1: /* 64k page. */
89 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
90 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
91 *page_size = 0x10000;
92 break;
93 case 2: /* 4k page. */
94 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
95 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
96 *page_size = 0x1000;
97 break;
98 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
99 if (type == 1) {
100 /* ARMv6/XScale extended small page format */
101 if (arm_feature(env, ARM_FEATURE_XSCALE)
102 || arm_feature(env, ARM_FEATURE_V6)) {
103 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
104 *page_size = 0x1000;
105 } else {
106 /*
107 * UNPREDICTABLE in ARMv5; we choose to take a
108 * page translation fault.
109 */
110 fi->type = ARMFault_Translation;
111 goto do_fault;
112 }
113 } else {
114 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
115 *page_size = 0x400;
116 }
117 ap = (desc >> 4) & 3;
118 break;
119 default:
120 /* Never happens, but compiler isn't smart enough to tell. */
121 g_assert_not_reached();
122 }
123 }
124 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
125 *prot |= *prot ? PAGE_EXEC : 0;
126 if (!(*prot & (1 << access_type))) {
127 /* Access permission fault. */
128 fi->type = ARMFault_Permission;
129 goto do_fault;
130 }
131 *phys_ptr = phys_addr;
132 return false;
53c038ef
RH
133do_fault:
134 fi->domain = domain;
135 fi->level = level;
136 return true;
137}
138
139static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
140 MMUAccessType access_type, ARMMMUIdx mmu_idx,
141 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
142 target_ulong *page_size, ARMMMUFaultInfo *fi)
143{
144 CPUState *cs = env_cpu(env);
145 ARMCPU *cpu = env_archcpu(env);
146 int level = 1;
147 uint32_t table;
148 uint32_t desc;
149 uint32_t xn;
150 uint32_t pxn = 0;
151 int type;
152 int ap;
153 int domain = 0;
154 int domain_prot;
155 hwaddr phys_addr;
156 uint32_t dacr;
157 bool ns;
158
159 /* Pagetable walk. */
160 /* Lookup l1 descriptor. */
161 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
162 /* Section translation fault if page walk is disabled by PD0 or PD1 */
163 fi->type = ARMFault_Translation;
164 goto do_fault;
165 }
166 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
167 mmu_idx, fi);
168 if (fi->type != ARMFault_None) {
169 goto do_fault;
170 }
171 type = (desc & 3);
172 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
173 /* Section translation fault, or attempt to use the encoding
174 * which is Reserved on implementations without PXN.
175 */
176 fi->type = ARMFault_Translation;
177 goto do_fault;
178 }
179 if ((type == 1) || !(desc & (1 << 18))) {
180 /* Page or Section. */
181 domain = (desc >> 5) & 0x0f;
182 }
183 if (regime_el(env, mmu_idx) == 1) {
184 dacr = env->cp15.dacr_ns;
185 } else {
186 dacr = env->cp15.dacr_s;
187 }
188 if (type == 1) {
189 level = 2;
190 }
191 domain_prot = (dacr >> (domain * 2)) & 3;
192 if (domain_prot == 0 || domain_prot == 2) {
193 /* Section or Page domain fault */
194 fi->type = ARMFault_Domain;
195 goto do_fault;
196 }
197 if (type != 1) {
198 if (desc & (1 << 18)) {
199 /* Supersection. */
200 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
201 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
202 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
203 *page_size = 0x1000000;
204 } else {
205 /* Section. */
206 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
207 *page_size = 0x100000;
208 }
209 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
210 xn = desc & (1 << 4);
211 pxn = desc & 1;
212 ns = extract32(desc, 19, 1);
213 } else {
214 if (cpu_isar_feature(aa32_pxn, cpu)) {
215 pxn = (desc >> 2) & 1;
216 }
217 ns = extract32(desc, 3, 1);
218 /* Lookup l2 entry. */
219 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
220 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
221 mmu_idx, fi);
222 if (fi->type != ARMFault_None) {
223 goto do_fault;
224 }
225 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
226 switch (desc & 3) {
227 case 0: /* Page translation fault. */
228 fi->type = ARMFault_Translation;
229 goto do_fault;
230 case 1: /* 64k page. */
231 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
232 xn = desc & (1 << 15);
233 *page_size = 0x10000;
234 break;
235 case 2: case 3: /* 4k page. */
236 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
237 xn = desc & 1;
238 *page_size = 0x1000;
239 break;
240 default:
241 /* Never happens, but compiler isn't smart enough to tell. */
242 g_assert_not_reached();
243 }
244 }
245 if (domain_prot == 3) {
246 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
247 } else {
248 if (pxn && !regime_is_user(env, mmu_idx)) {
249 xn = 1;
250 }
251 if (xn && access_type == MMU_INST_FETCH) {
252 fi->type = ARMFault_Permission;
253 goto do_fault;
254 }
255
256 if (arm_feature(env, ARM_FEATURE_V6K) &&
257 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
258 /* The simplified model uses AP[0] as an access control bit. */
259 if ((ap & 1) == 0) {
260 /* Access flag fault. */
261 fi->type = ARMFault_AccessFlag;
262 goto do_fault;
263 }
264 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
265 } else {
266 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
267 }
268 if (*prot && !xn) {
269 *prot |= PAGE_EXEC;
270 }
271 if (!(*prot & (1 << access_type))) {
272 /* Access permission fault. */
273 fi->type = ARMFault_Permission;
274 goto do_fault;
275 }
276 }
277 if (ns) {
278 /* The NS bit will (as required by the architecture) have no effect if
279 * the CPU doesn't support TZ or this is a non-secure translation
280 * regime, because the attribute will already be non-secure.
281 */
282 attrs->secure = false;
283 }
284 *phys_ptr = phys_addr;
285 return false;
f2d2f5ce
RH
286do_fault:
287 fi->domain = domain;
288 fi->level = level;
289 return true;
290}
291
9a12fb36
RH
292static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
293 MMUAccessType access_type, ARMMMUIdx mmu_idx,
294 hwaddr *phys_ptr, int *prot,
295 ARMMMUFaultInfo *fi)
296{
297 int n;
298 uint32_t mask;
299 uint32_t base;
300 bool is_user = regime_is_user(env, mmu_idx);
301
302 if (regime_translation_disabled(env, mmu_idx)) {
303 /* MPU disabled. */
304 *phys_ptr = address;
305 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
306 return false;
307 }
308
309 *phys_ptr = address;
310 for (n = 7; n >= 0; n--) {
311 base = env->cp15.c6_region[n];
312 if ((base & 1) == 0) {
313 continue;
314 }
315 mask = 1 << ((base >> 1) & 0x1f);
316 /* Keep this shift separate from the above to avoid an
317 (undefined) << 32. */
318 mask = (mask << 1) - 1;
319 if (((base ^ address) & ~mask) == 0) {
320 break;
321 }
322 }
323 if (n < 0) {
324 fi->type = ARMFault_Background;
325 return true;
326 }
327
328 if (access_type == MMU_INST_FETCH) {
329 mask = env->cp15.pmsav5_insn_ap;
330 } else {
331 mask = env->cp15.pmsav5_data_ap;
332 }
333 mask = (mask >> (n * 4)) & 0xf;
334 switch (mask) {
335 case 0:
336 fi->type = ARMFault_Permission;
337 fi->level = 1;
338 return true;
339 case 1:
340 if (is_user) {
341 fi->type = ARMFault_Permission;
342 fi->level = 1;
343 return true;
344 }
345 *prot = PAGE_READ | PAGE_WRITE;
346 break;
347 case 2:
348 *prot = PAGE_READ;
349 if (!is_user) {
350 *prot |= PAGE_WRITE;
351 }
352 break;
353 case 3:
354 *prot = PAGE_READ | PAGE_WRITE;
355 break;
356 case 5:
357 if (is_user) {
358 fi->type = ARMFault_Permission;
359 fi->level = 1;
360 return true;
361 }
362 *prot = PAGE_READ;
363 break;
364 case 6:
365 *prot = PAGE_READ;
366 break;
367 default:
368 /* Bad permission. */
369 fi->type = ARMFault_Permission;
370 fi->level = 1;
371 return true;
372 }
373 *prot |= PAGE_EXEC;
374 return false;
375}
376
7d2e08c9
RH
377void get_phys_addr_pmsav7_default(CPUARMState *env,
378 ARMMMUIdx mmu_idx,
379 int32_t address, int *prot)
380{
381 if (!arm_feature(env, ARM_FEATURE_M)) {
382 *prot = PAGE_READ | PAGE_WRITE;
383 switch (address) {
384 case 0xF0000000 ... 0xFFFFFFFF:
385 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
386 /* hivecs execing is ok */
387 *prot |= PAGE_EXEC;
388 }
389 break;
390 case 0x00000000 ... 0x7FFFFFFF:
391 *prot |= PAGE_EXEC;
392 break;
393 }
394 } else {
395 /* Default system address map for M profile cores.
396 * The architecture specifies which regions are execute-never;
397 * at the MPU level no other checks are defined.
398 */
399 switch (address) {
400 case 0x00000000 ... 0x1fffffff: /* ROM */
401 case 0x20000000 ... 0x3fffffff: /* SRAM */
402 case 0x60000000 ... 0x7fffffff: /* RAM */
403 case 0x80000000 ... 0x9fffffff: /* RAM */
404 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
405 break;
406 case 0x40000000 ... 0x5fffffff: /* Peripheral */
407 case 0xa0000000 ... 0xbfffffff: /* Device */
408 case 0xc0000000 ... 0xdfffffff: /* Device */
409 case 0xe0000000 ... 0xffffffff: /* System */
410 *prot = PAGE_READ | PAGE_WRITE;
411 break;
412 default:
413 g_assert_not_reached();
414 }
415 }
416}
417
8ae08860
RH
418/**
419 * get_phys_addr - get the physical address for this virtual address
420 *
421 * Find the physical address corresponding to the given virtual address,
422 * by doing a translation table walk on MMU based systems or using the
423 * MPU state on MPU based systems.
424 *
425 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
426 * prot and page_size may not be filled in, and the populated fsr value provides
427 * information on why the translation aborted, in the format of a
428 * DFSR/IFSR fault register, with the following caveats:
429 * * we honour the short vs long DFSR format differences.
430 * * the WnR bit is never set (the caller must do this).
431 * * for PSMAv5 based systems we don't bother to return a full FSR format
432 * value.
433 *
434 * @env: CPUARMState
435 * @address: virtual address to get physical address for
436 * @access_type: 0 for read, 1 for write, 2 for execute
437 * @mmu_idx: MMU index indicating required translation regime
438 * @phys_ptr: set to the physical address corresponding to the virtual address
439 * @attrs: set to the memory transaction attributes to use
440 * @prot: set to the permissions for the page containing phys_ptr
441 * @page_size: set to the size of the page containing phys_ptr
442 * @fi: set to fault info if the translation fails
443 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
444 */
445bool get_phys_addr(CPUARMState *env, target_ulong address,
446 MMUAccessType access_type, ARMMMUIdx mmu_idx,
447 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
448 target_ulong *page_size,
449 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
450{
451 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
452
453 if (mmu_idx != s1_mmu_idx) {
454 /*
455 * Call ourselves recursively to do the stage 1 and then stage 2
456 * translations if mmu_idx is a two-stage regime.
457 */
458 if (arm_feature(env, ARM_FEATURE_EL2)) {
459 hwaddr ipa;
460 int s2_prot;
461 int ret;
462 bool ipa_secure;
463 ARMCacheAttrs cacheattrs2 = {};
464 ARMMMUIdx s2_mmu_idx;
465 bool is_el0;
466
467 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
468 attrs, prot, page_size, fi, cacheattrs);
469
470 /* If S1 fails or S2 is disabled, return early. */
471 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
472 *phys_ptr = ipa;
473 return ret;
474 }
475
476 ipa_secure = attrs->secure;
477 if (arm_is_secure_below_el3(env)) {
478 if (ipa_secure) {
479 attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
480 } else {
481 attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
482 }
483 } else {
484 assert(!ipa_secure);
485 }
486
487 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
488 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
489
490 /* S1 is done. Now do S2 translation. */
491 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
492 phys_ptr, attrs, &s2_prot,
493 page_size, fi, &cacheattrs2);
494 fi->s2addr = ipa;
495 /* Combine the S1 and S2 perms. */
496 *prot &= s2_prot;
497
498 /* If S2 fails, return early. */
499 if (ret) {
500 return ret;
501 }
502
503 /* Combine the S1 and S2 cache attributes. */
504 if (arm_hcr_el2_eff(env) & HCR_DC) {
505 /*
506 * HCR.DC forces the first stage attributes to
507 * Normal Non-Shareable,
508 * Inner Write-Back Read-Allocate Write-Allocate,
509 * Outer Write-Back Read-Allocate Write-Allocate.
510 * Do not overwrite Tagged within attrs.
511 */
512 if (cacheattrs->attrs != 0xf0) {
513 cacheattrs->attrs = 0xff;
514 }
515 cacheattrs->shareability = 0;
516 }
517 *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
518
519 /* Check if IPA translates to secure or non-secure PA space. */
520 if (arm_is_secure_below_el3(env)) {
521 if (ipa_secure) {
522 attrs->secure =
523 !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
524 } else {
525 attrs->secure =
526 !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
527 || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
528 }
529 }
530 return 0;
531 } else {
532 /*
533 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
534 */
535 mmu_idx = stage_1_mmu_idx(mmu_idx);
536 }
537 }
538
539 /*
540 * The page table entries may downgrade secure to non-secure, but
541 * cannot upgrade an non-secure translation regime's attributes
542 * to secure.
543 */
544 attrs->secure = regime_is_secure(env, mmu_idx);
545 attrs->user = regime_is_user(env, mmu_idx);
546
547 /*
548 * Fast Context Switch Extension. This doesn't exist at all in v8.
549 * In v7 and earlier it affects all stage 1 translations.
550 */
551 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
552 && !arm_feature(env, ARM_FEATURE_V8)) {
553 if (regime_el(env, mmu_idx) == 3) {
554 address += env->cp15.fcseidr_s;
555 } else {
556 address += env->cp15.fcseidr_ns;
557 }
558 }
559
560 if (arm_feature(env, ARM_FEATURE_PMSA)) {
561 bool ret;
562 *page_size = TARGET_PAGE_SIZE;
563
564 if (arm_feature(env, ARM_FEATURE_V8)) {
565 /* PMSAv8 */
566 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
567 phys_ptr, attrs, prot, page_size, fi);
568 } else if (arm_feature(env, ARM_FEATURE_V7)) {
569 /* PMSAv7 */
570 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
571 phys_ptr, prot, page_size, fi);
572 } else {
573 /* Pre-v7 MPU */
574 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
575 phys_ptr, prot, fi);
576 }
577 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
578 " mmu_idx %u -> %s (prot %c%c%c)\n",
579 access_type == MMU_DATA_LOAD ? "reading" :
580 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
581 (uint32_t)address, mmu_idx,
582 ret ? "Miss" : "Hit",
583 *prot & PAGE_READ ? 'r' : '-',
584 *prot & PAGE_WRITE ? 'w' : '-',
585 *prot & PAGE_EXEC ? 'x' : '-');
586
587 return ret;
588 }
589
590 /* Definitely a real MMU, not an MPU */
591
592 if (regime_translation_disabled(env, mmu_idx)) {
593 uint64_t hcr;
594 uint8_t memattr;
595
596 /*
597 * MMU disabled. S1 addresses within aa64 translation regimes are
598 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
599 */
600 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
601 int r_el = regime_el(env, mmu_idx);
602 if (arm_el_is_aa64(env, r_el)) {
603 int pamax = arm_pamax(env_archcpu(env));
604 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
605 int addrtop, tbi;
606
607 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
608 if (access_type == MMU_INST_FETCH) {
609 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
610 }
611 tbi = (tbi >> extract64(address, 55, 1)) & 1;
612 addrtop = (tbi ? 55 : 63);
613
614 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
615 fi->type = ARMFault_AddressSize;
616 fi->level = 0;
617 fi->stage2 = false;
618 return 1;
619 }
620
621 /*
622 * When TBI is disabled, we've just validated that all of the
623 * bits above PAMax are zero, so logically we only need to
624 * clear the top byte for TBI. But it's clearer to follow
625 * the pseudocode set of addrdesc.paddress.
626 */
627 address = extract64(address, 0, 52);
628 }
629 }
630 *phys_ptr = address;
631 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
632 *page_size = TARGET_PAGE_SIZE;
633
634 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
635 hcr = arm_hcr_el2_eff(env);
636 cacheattrs->shareability = 0;
637 cacheattrs->is_s2_format = false;
638 if (hcr & HCR_DC) {
639 if (hcr & HCR_DCT) {
640 memattr = 0xf0; /* Tagged, Normal, WB, RWA */
641 } else {
642 memattr = 0xff; /* Normal, WB, RWA */
643 }
644 } else if (access_type == MMU_INST_FETCH) {
645 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
646 memattr = 0xee; /* Normal, WT, RA, NT */
647 } else {
648 memattr = 0x44; /* Normal, NC, No */
649 }
650 cacheattrs->shareability = 2; /* outer sharable */
651 } else {
652 memattr = 0x00; /* Device, nGnRnE */
653 }
654 cacheattrs->attrs = memattr;
655 return 0;
656 }
657
658 if (regime_using_lpae_format(env, mmu_idx)) {
659 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
660 phys_ptr, attrs, prot, page_size,
661 fi, cacheattrs);
662 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
663 return get_phys_addr_v6(env, address, access_type, mmu_idx,
664 phys_ptr, attrs, prot, page_size, fi);
665 } else {
666 return get_phys_addr_v5(env, address, access_type, mmu_idx,
667 phys_ptr, prot, page_size, fi);
668 }
669}