]>
Commit | Line | Data |
---|---|---|
2328826b MF |
1 | /* |
2 | * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions are met: | |
7 | * * Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * * Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * * Neither the name of the Open Source and Linux Lab nor the | |
13 | * names of its contributors may be used to endorse or promote products | |
14 | * derived from this software without specific prior written permission. | |
15 | * | |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | |
27 | ||
28 | #include "cpu.h" | |
29 | #include "exec-all.h" | |
30 | #include "gdbstub.h" | |
31 | #include "qemu-common.h" | |
32 | #include "host-utils.h" | |
33 | #if !defined(CONFIG_USER_ONLY) | |
34 | #include "hw/loader.h" | |
35 | #endif | |
36 | ||
ccfcaba6 MF |
37 | #define XTREG(idx, ofs, bi, sz, al, no, flags, cp, typ, grp, name, \ |
38 | a1, a2, a3, a4, a5, a6) \ | |
39 | { .targno = (no), .type = (typ), .group = (grp) }, | |
40 | ||
63f95e4c MF |
41 | static const XtensaConfig core_config[0]; |
42 | ||
b67ea0cd MF |
43 | static void reset_mmu(CPUState *env); |
44 | ||
2328826b MF |
45 | void cpu_reset(CPUXtensaState *env) |
46 | { | |
40643d7c MF |
47 | env->exception_taken = 0; |
48 | env->pc = env->config->exception_vector[EXC_RESET]; | |
6ad6dbf7 | 49 | env->sregs[LITBASE] &= ~1; |
b994e91b MF |
50 | env->sregs[PS] = xtensa_option_enabled(env->config, |
51 | XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10; | |
97836cee | 52 | env->sregs[VECBASE] = env->config->vecbase; |
b994e91b MF |
53 | |
54 | env->pending_irq_level = 0; | |
b67ea0cd | 55 | reset_mmu(env); |
2328826b MF |
56 | } |
57 | ||
dedc5eae | 58 | |
2328826b MF |
59 | CPUXtensaState *cpu_xtensa_init(const char *cpu_model) |
60 | { | |
61 | static int tcg_inited; | |
62 | CPUXtensaState *env; | |
dedc5eae MF |
63 | const XtensaConfig *config = NULL; |
64 | int i; | |
65 | ||
66 | for (i = 0; i < ARRAY_SIZE(core_config); ++i) | |
67 | if (strcmp(core_config[i].name, cpu_model) == 0) { | |
68 | config = core_config + i; | |
69 | break; | |
70 | } | |
71 | ||
72 | if (config == NULL) { | |
73 | return NULL; | |
74 | } | |
2328826b MF |
75 | |
76 | env = g_malloc0(sizeof(*env)); | |
dedc5eae | 77 | env->config = config; |
2328826b MF |
78 | cpu_exec_init(env); |
79 | ||
80 | if (!tcg_inited) { | |
81 | tcg_inited = 1; | |
82 | xtensa_translate_init(); | |
83 | } | |
84 | ||
b994e91b | 85 | xtensa_irq_init(env); |
2328826b MF |
86 | qemu_init_vcpu(env); |
87 | return env; | |
88 | } | |
89 | ||
90 | ||
91 | void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf) | |
92 | { | |
dedc5eae MF |
93 | int i; |
94 | cpu_fprintf(f, "Available CPUs:\n"); | |
95 | for (i = 0; i < ARRAY_SIZE(core_config); ++i) { | |
96 | cpu_fprintf(f, " %s\n", core_config[i].name); | |
97 | } | |
2328826b MF |
98 | } |
99 | ||
100 | target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) | |
101 | { | |
b67ea0cd MF |
102 | uint32_t paddr; |
103 | uint32_t page_size; | |
104 | unsigned access; | |
105 | ||
106 | if (xtensa_get_physical_addr(env, addr, 0, 0, | |
107 | &paddr, &page_size, &access) == 0) { | |
108 | return paddr; | |
109 | } | |
110 | if (xtensa_get_physical_addr(env, addr, 2, 0, | |
111 | &paddr, &page_size, &access) == 0) { | |
112 | return paddr; | |
113 | } | |
114 | return ~0; | |
2328826b MF |
115 | } |
116 | ||
97836cee MF |
117 | static uint32_t relocated_vector(CPUState *env, uint32_t vector) |
118 | { | |
119 | if (xtensa_option_enabled(env->config, | |
120 | XTENSA_OPTION_RELOCATABLE_VECTOR)) { | |
121 | return vector - env->config->vecbase + env->sregs[VECBASE]; | |
122 | } else { | |
123 | return vector; | |
124 | } | |
125 | } | |
126 | ||
b994e91b MF |
127 | /*! |
128 | * Handle penging IRQ. | |
129 | * For the high priority interrupt jump to the corresponding interrupt vector. | |
130 | * For the level-1 interrupt convert it to either user, kernel or double | |
131 | * exception with the 'level-1 interrupt' exception cause. | |
132 | */ | |
133 | static void handle_interrupt(CPUState *env) | |
134 | { | |
135 | int level = env->pending_irq_level; | |
136 | ||
137 | if (level > xtensa_get_cintlevel(env) && | |
138 | level <= env->config->nlevel && | |
139 | (env->config->level_mask[level] & | |
140 | env->sregs[INTSET] & | |
141 | env->sregs[INTENABLE])) { | |
142 | if (level > 1) { | |
143 | env->sregs[EPC1 + level - 1] = env->pc; | |
144 | env->sregs[EPS2 + level - 2] = env->sregs[PS]; | |
145 | env->sregs[PS] = | |
146 | (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM; | |
97836cee MF |
147 | env->pc = relocated_vector(env, |
148 | env->config->interrupt_vector[level]); | |
b994e91b MF |
149 | } else { |
150 | env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE; | |
151 | ||
152 | if (env->sregs[PS] & PS_EXCM) { | |
153 | if (env->config->ndepc) { | |
154 | env->sregs[DEPC] = env->pc; | |
155 | } else { | |
156 | env->sregs[EPC1] = env->pc; | |
157 | } | |
158 | env->exception_index = EXC_DOUBLE; | |
159 | } else { | |
160 | env->sregs[EPC1] = env->pc; | |
161 | env->exception_index = | |
162 | (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; | |
163 | } | |
164 | env->sregs[PS] |= PS_EXCM; | |
165 | } | |
166 | env->exception_taken = 1; | |
167 | } | |
168 | } | |
169 | ||
2328826b MF |
170 | void do_interrupt(CPUState *env) |
171 | { | |
b994e91b MF |
172 | if (env->exception_index == EXC_IRQ) { |
173 | qemu_log_mask(CPU_LOG_INT, | |
174 | "%s(EXC_IRQ) level = %d, cintlevel = %d, " | |
175 | "pc = %08x, a0 = %08x, ps = %08x, " | |
176 | "intset = %08x, intenable = %08x, " | |
177 | "ccount = %08x\n", | |
178 | __func__, env->pending_irq_level, xtensa_get_cintlevel(env), | |
179 | env->pc, env->regs[0], env->sregs[PS], | |
180 | env->sregs[INTSET], env->sregs[INTENABLE], | |
181 | env->sregs[CCOUNT]); | |
182 | handle_interrupt(env); | |
183 | } | |
184 | ||
40643d7c MF |
185 | switch (env->exception_index) { |
186 | case EXC_WINDOW_OVERFLOW4: | |
187 | case EXC_WINDOW_UNDERFLOW4: | |
188 | case EXC_WINDOW_OVERFLOW8: | |
189 | case EXC_WINDOW_UNDERFLOW8: | |
190 | case EXC_WINDOW_OVERFLOW12: | |
191 | case EXC_WINDOW_UNDERFLOW12: | |
192 | case EXC_KERNEL: | |
193 | case EXC_USER: | |
194 | case EXC_DOUBLE: | |
b994e91b MF |
195 | qemu_log_mask(CPU_LOG_INT, "%s(%d) " |
196 | "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n", | |
197 | __func__, env->exception_index, | |
198 | env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]); | |
40643d7c | 199 | if (env->config->exception_vector[env->exception_index]) { |
97836cee MF |
200 | env->pc = relocated_vector(env, |
201 | env->config->exception_vector[env->exception_index]); | |
40643d7c MF |
202 | env->exception_taken = 1; |
203 | } else { | |
204 | qemu_log("%s(pc = %08x) bad exception_index: %d\n", | |
205 | __func__, env->pc, env->exception_index); | |
206 | } | |
207 | break; | |
208 | ||
b994e91b MF |
209 | case EXC_IRQ: |
210 | break; | |
211 | ||
212 | default: | |
213 | qemu_log("%s(pc = %08x) unknown exception_index: %d\n", | |
214 | __func__, env->pc, env->exception_index); | |
215 | break; | |
40643d7c | 216 | } |
b994e91b | 217 | check_interrupts(env); |
2328826b | 218 | } |
b67ea0cd MF |
219 | |
220 | static void reset_tlb_mmu_all_ways(CPUState *env, | |
221 | const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
222 | { | |
223 | unsigned wi, ei; | |
224 | ||
225 | for (wi = 0; wi < tlb->nways; ++wi) { | |
226 | for (ei = 0; ei < tlb->way_size[wi]; ++ei) { | |
227 | entry[wi][ei].asid = 0; | |
228 | entry[wi][ei].variable = true; | |
229 | } | |
230 | } | |
231 | } | |
232 | ||
233 | static void reset_tlb_mmu_ways56(CPUState *env, | |
234 | const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
235 | { | |
236 | if (!tlb->varway56) { | |
237 | static const xtensa_tlb_entry way5[] = { | |
238 | { | |
239 | .vaddr = 0xd0000000, | |
240 | .paddr = 0, | |
241 | .asid = 1, | |
242 | .attr = 7, | |
243 | .variable = false, | |
244 | }, { | |
245 | .vaddr = 0xd8000000, | |
246 | .paddr = 0, | |
247 | .asid = 1, | |
248 | .attr = 3, | |
249 | .variable = false, | |
250 | } | |
251 | }; | |
252 | static const xtensa_tlb_entry way6[] = { | |
253 | { | |
254 | .vaddr = 0xe0000000, | |
255 | .paddr = 0xf0000000, | |
256 | .asid = 1, | |
257 | .attr = 7, | |
258 | .variable = false, | |
259 | }, { | |
260 | .vaddr = 0xf0000000, | |
261 | .paddr = 0xf0000000, | |
262 | .asid = 1, | |
263 | .attr = 3, | |
264 | .variable = false, | |
265 | } | |
266 | }; | |
267 | memcpy(entry[5], way5, sizeof(way5)); | |
268 | memcpy(entry[6], way6, sizeof(way6)); | |
269 | } else { | |
270 | uint32_t ei; | |
271 | for (ei = 0; ei < 8; ++ei) { | |
272 | entry[6][ei].vaddr = ei << 29; | |
273 | entry[6][ei].paddr = ei << 29; | |
274 | entry[6][ei].asid = 1; | |
275 | entry[6][ei].attr = 2; | |
276 | } | |
277 | } | |
278 | } | |
279 | ||
280 | static void reset_tlb_region_way0(CPUState *env, | |
281 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
282 | { | |
283 | unsigned ei; | |
284 | ||
285 | for (ei = 0; ei < 8; ++ei) { | |
286 | entry[0][ei].vaddr = ei << 29; | |
287 | entry[0][ei].paddr = ei << 29; | |
288 | entry[0][ei].asid = 1; | |
289 | entry[0][ei].attr = 2; | |
290 | entry[0][ei].variable = true; | |
291 | } | |
292 | } | |
293 | ||
294 | static void reset_mmu(CPUState *env) | |
295 | { | |
296 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
297 | env->sregs[RASID] = 0x04030201; | |
298 | env->sregs[ITLBCFG] = 0; | |
299 | env->sregs[DTLBCFG] = 0; | |
300 | env->autorefill_idx = 0; | |
301 | reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); | |
302 | reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); | |
303 | reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); | |
304 | reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); | |
305 | } else { | |
306 | reset_tlb_region_way0(env, env->itlb); | |
307 | reset_tlb_region_way0(env, env->dtlb); | |
308 | } | |
309 | } | |
310 | ||
311 | static unsigned get_ring(const CPUState *env, uint8_t asid) | |
312 | { | |
313 | unsigned i; | |
314 | for (i = 0; i < 4; ++i) { | |
315 | if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { | |
316 | return i; | |
317 | } | |
318 | } | |
319 | return 0xff; | |
320 | } | |
321 | ||
322 | /*! | |
323 | * Lookup xtensa TLB for the given virtual address. | |
324 | * See ISA, 4.6.2.2 | |
325 | * | |
326 | * \param pwi: [out] way index | |
327 | * \param pei: [out] entry index | |
328 | * \param pring: [out] access ring | |
329 | * \return 0 if ok, exception cause code otherwise | |
330 | */ | |
331 | int xtensa_tlb_lookup(const CPUState *env, uint32_t addr, bool dtlb, | |
332 | uint32_t *pwi, uint32_t *pei, uint8_t *pring) | |
333 | { | |
334 | const xtensa_tlb *tlb = dtlb ? | |
335 | &env->config->dtlb : &env->config->itlb; | |
336 | const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? | |
337 | env->dtlb : env->itlb; | |
338 | ||
339 | int nhits = 0; | |
340 | unsigned wi; | |
341 | ||
342 | for (wi = 0; wi < tlb->nways; ++wi) { | |
343 | uint32_t vpn; | |
344 | uint32_t ei; | |
345 | split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); | |
346 | if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { | |
347 | unsigned ring = get_ring(env, entry[wi][ei].asid); | |
348 | if (ring < 4) { | |
349 | if (++nhits > 1) { | |
350 | return dtlb ? | |
351 | LOAD_STORE_TLB_MULTI_HIT_CAUSE : | |
352 | INST_TLB_MULTI_HIT_CAUSE; | |
353 | } | |
354 | *pwi = wi; | |
355 | *pei = ei; | |
356 | *pring = ring; | |
357 | } | |
358 | } | |
359 | } | |
360 | return nhits ? 0 : | |
361 | (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); | |
362 | } | |
363 | ||
364 | /*! | |
365 | * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
366 | * See ISA, 4.6.5.10 | |
367 | */ | |
368 | static unsigned mmu_attr_to_access(uint32_t attr) | |
369 | { | |
370 | unsigned access = 0; | |
371 | if (attr < 12) { | |
372 | access |= PAGE_READ; | |
373 | if (attr & 0x1) { | |
374 | access |= PAGE_EXEC; | |
375 | } | |
376 | if (attr & 0x2) { | |
377 | access |= PAGE_WRITE; | |
378 | } | |
379 | } else if (attr == 13) { | |
380 | access |= PAGE_READ | PAGE_WRITE; | |
381 | } | |
382 | return access; | |
383 | } | |
384 | ||
385 | /*! | |
386 | * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
387 | * See ISA, 4.6.3.3 | |
388 | */ | |
389 | static unsigned region_attr_to_access(uint32_t attr) | |
390 | { | |
391 | unsigned access = 0; | |
392 | if ((attr < 6 && attr != 3) || attr == 14) { | |
393 | access |= PAGE_READ | PAGE_WRITE; | |
394 | } | |
395 | if (attr > 0 && attr < 6) { | |
396 | access |= PAGE_EXEC; | |
397 | } | |
398 | return access; | |
399 | } | |
400 | ||
401 | static bool is_access_granted(unsigned access, int is_write) | |
402 | { | |
403 | switch (is_write) { | |
404 | case 0: | |
405 | return access & PAGE_READ; | |
406 | ||
407 | case 1: | |
408 | return access & PAGE_WRITE; | |
409 | ||
410 | case 2: | |
411 | return access & PAGE_EXEC; | |
412 | ||
413 | default: | |
414 | return 0; | |
415 | } | |
416 | } | |
417 | ||
418 | static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb, | |
419 | uint32_t *wi, uint32_t *ei, uint8_t *ring); | |
420 | ||
421 | static int get_physical_addr_mmu(CPUState *env, | |
422 | uint32_t vaddr, int is_write, int mmu_idx, | |
423 | uint32_t *paddr, uint32_t *page_size, unsigned *access) | |
424 | { | |
425 | bool dtlb = is_write != 2; | |
426 | uint32_t wi; | |
427 | uint32_t ei; | |
428 | uint8_t ring; | |
429 | int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); | |
430 | ||
431 | if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && | |
432 | (mmu_idx != 0 || ((vaddr ^ env->sregs[PTEVADDR]) & 0xffc00000)) && | |
433 | autorefill_mmu(env, vaddr, dtlb, &wi, &ei, &ring) == 0) { | |
434 | ret = 0; | |
435 | } | |
436 | if (ret != 0) { | |
437 | return ret; | |
438 | } | |
439 | ||
440 | const xtensa_tlb_entry *entry = | |
441 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
442 | ||
443 | if (ring < mmu_idx) { | |
444 | return dtlb ? | |
445 | LOAD_STORE_PRIVILEGE_CAUSE : | |
446 | INST_FETCH_PRIVILEGE_CAUSE; | |
447 | } | |
448 | ||
449 | *access = mmu_attr_to_access(entry->attr); | |
450 | if (!is_access_granted(*access, is_write)) { | |
451 | return dtlb ? | |
452 | (is_write ? | |
453 | STORE_PROHIBITED_CAUSE : | |
454 | LOAD_PROHIBITED_CAUSE) : | |
455 | INST_FETCH_PROHIBITED_CAUSE; | |
456 | } | |
457 | ||
458 | *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); | |
459 | *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb, | |
465 | uint32_t *wi, uint32_t *ei, uint8_t *ring) | |
466 | { | |
467 | uint32_t paddr; | |
468 | uint32_t page_size; | |
469 | unsigned access; | |
470 | uint32_t pt_vaddr = | |
471 | (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; | |
472 | int ret = get_physical_addr_mmu(env, pt_vaddr, 0, 0, | |
473 | &paddr, &page_size, &access); | |
474 | ||
475 | qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__, | |
476 | vaddr, ret ? ~0 : paddr); | |
477 | ||
478 | if (ret == 0) { | |
479 | uint32_t vpn; | |
480 | uint32_t pte = ldl_phys(paddr); | |
481 | ||
482 | *ring = (pte >> 4) & 0x3; | |
483 | *wi = (++env->autorefill_idx) & 0x3; | |
484 | split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, *wi, ei); | |
485 | xtensa_tlb_set_entry(env, dtlb, *wi, *ei, vpn, pte); | |
486 | qemu_log("%s: autorefill(%08x): %08x -> %08x\n", | |
487 | __func__, vaddr, vpn, pte); | |
488 | } | |
489 | return ret; | |
490 | } | |
491 | ||
492 | static int get_physical_addr_region(CPUState *env, | |
493 | uint32_t vaddr, int is_write, int mmu_idx, | |
494 | uint32_t *paddr, uint32_t *page_size, unsigned *access) | |
495 | { | |
496 | bool dtlb = is_write != 2; | |
497 | uint32_t wi = 0; | |
498 | uint32_t ei = (vaddr >> 29) & 0x7; | |
499 | const xtensa_tlb_entry *entry = | |
500 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
501 | ||
502 | *access = region_attr_to_access(entry->attr); | |
503 | if (!is_access_granted(*access, is_write)) { | |
504 | return dtlb ? | |
505 | (is_write ? | |
506 | STORE_PROHIBITED_CAUSE : | |
507 | LOAD_PROHIBITED_CAUSE) : | |
508 | INST_FETCH_PROHIBITED_CAUSE; | |
509 | } | |
510 | ||
511 | *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); | |
512 | *page_size = ~REGION_PAGE_MASK + 1; | |
513 | ||
514 | return 0; | |
515 | } | |
516 | ||
517 | /*! | |
518 | * Convert virtual address to physical addr. | |
519 | * MMU may issue pagewalk and change xtensa autorefill TLB way entry. | |
520 | * | |
521 | * \return 0 if ok, exception cause code otherwise | |
522 | */ | |
523 | int xtensa_get_physical_addr(CPUState *env, | |
524 | uint32_t vaddr, int is_write, int mmu_idx, | |
525 | uint32_t *paddr, uint32_t *page_size, unsigned *access) | |
526 | { | |
527 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
528 | return get_physical_addr_mmu(env, vaddr, is_write, mmu_idx, | |
529 | paddr, page_size, access); | |
530 | } else if (xtensa_option_bits_enabled(env->config, | |
531 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
532 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { | |
533 | return get_physical_addr_region(env, vaddr, is_write, mmu_idx, | |
534 | paddr, page_size, access); | |
535 | } else { | |
536 | *paddr = vaddr; | |
537 | *page_size = TARGET_PAGE_SIZE; | |
538 | *access = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
539 | return 0; | |
540 | } | |
541 | } |