2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "host-utils.h"
32 #if !defined(CONFIG_USER_ONLY)
33 #include "hw/loader.h"
36 static void reset_mmu(CPUState
*env
);
38 void cpu_reset(CPUXtensaState
*env
)
40 env
->exception_taken
= 0;
41 env
->pc
= env
->config
->exception_vector
[EXC_RESET
];
42 env
->sregs
[LITBASE
] &= ~1;
43 env
->sregs
[PS
] = xtensa_option_enabled(env
->config
,
44 XTENSA_OPTION_INTERRUPT
) ? 0x1f : 0x10;
45 env
->sregs
[VECBASE
] = env
->config
->vecbase
;
47 env
->pending_irq_level
= 0;
51 static struct XtensaConfigList
*xtensa_cores
;
53 void xtensa_register_core(XtensaConfigList
*node
)
55 node
->next
= xtensa_cores
;
59 CPUXtensaState
*cpu_xtensa_init(const char *cpu_model
)
61 static int tcg_inited
;
63 const XtensaConfig
*config
= NULL
;
64 XtensaConfigList
*core
= xtensa_cores
;
66 for (; core
; core
= core
->next
)
67 if (strcmp(core
->config
->name
, cpu_model
) == 0) {
68 config
= core
->config
;
76 env
= g_malloc0(sizeof(*env
));
82 xtensa_translate_init();
91 void xtensa_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
93 XtensaConfigList
*core
= xtensa_cores
;
94 cpu_fprintf(f
, "Available CPUs:\n");
95 for (; core
; core
= core
->next
) {
96 cpu_fprintf(f
, " %s\n", core
->config
->name
);
100 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
106 if (xtensa_get_physical_addr(env
, addr
, 0, 0,
107 &paddr
, &page_size
, &access
) == 0) {
110 if (xtensa_get_physical_addr(env
, addr
, 2, 0,
111 &paddr
, &page_size
, &access
) == 0) {
117 static uint32_t relocated_vector(CPUState
*env
, uint32_t vector
)
119 if (xtensa_option_enabled(env
->config
,
120 XTENSA_OPTION_RELOCATABLE_VECTOR
)) {
121 return vector
- env
->config
->vecbase
+ env
->sregs
[VECBASE
];
128 * Handle penging IRQ.
129 * For the high priority interrupt jump to the corresponding interrupt vector.
130 * For the level-1 interrupt convert it to either user, kernel or double
131 * exception with the 'level-1 interrupt' exception cause.
133 static void handle_interrupt(CPUState
*env
)
135 int level
= env
->pending_irq_level
;
137 if (level
> xtensa_get_cintlevel(env
) &&
138 level
<= env
->config
->nlevel
&&
139 (env
->config
->level_mask
[level
] &
141 env
->sregs
[INTENABLE
])) {
143 env
->sregs
[EPC1
+ level
- 1] = env
->pc
;
144 env
->sregs
[EPS2
+ level
- 2] = env
->sregs
[PS
];
146 (env
->sregs
[PS
] & ~PS_INTLEVEL
) | level
| PS_EXCM
;
147 env
->pc
= relocated_vector(env
,
148 env
->config
->interrupt_vector
[level
]);
150 env
->sregs
[EXCCAUSE
] = LEVEL1_INTERRUPT_CAUSE
;
152 if (env
->sregs
[PS
] & PS_EXCM
) {
153 if (env
->config
->ndepc
) {
154 env
->sregs
[DEPC
] = env
->pc
;
156 env
->sregs
[EPC1
] = env
->pc
;
158 env
->exception_index
= EXC_DOUBLE
;
160 env
->sregs
[EPC1
] = env
->pc
;
161 env
->exception_index
=
162 (env
->sregs
[PS
] & PS_UM
) ? EXC_USER
: EXC_KERNEL
;
164 env
->sregs
[PS
] |= PS_EXCM
;
166 env
->exception_taken
= 1;
170 void do_interrupt(CPUState
*env
)
172 if (env
->exception_index
== EXC_IRQ
) {
173 qemu_log_mask(CPU_LOG_INT
,
174 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
175 "pc = %08x, a0 = %08x, ps = %08x, "
176 "intset = %08x, intenable = %08x, "
178 __func__
, env
->pending_irq_level
, xtensa_get_cintlevel(env
),
179 env
->pc
, env
->regs
[0], env
->sregs
[PS
],
180 env
->sregs
[INTSET
], env
->sregs
[INTENABLE
],
182 handle_interrupt(env
);
185 switch (env
->exception_index
) {
186 case EXC_WINDOW_OVERFLOW4
:
187 case EXC_WINDOW_UNDERFLOW4
:
188 case EXC_WINDOW_OVERFLOW8
:
189 case EXC_WINDOW_UNDERFLOW8
:
190 case EXC_WINDOW_OVERFLOW12
:
191 case EXC_WINDOW_UNDERFLOW12
:
195 qemu_log_mask(CPU_LOG_INT
, "%s(%d) "
196 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
197 __func__
, env
->exception_index
,
198 env
->pc
, env
->regs
[0], env
->sregs
[PS
], env
->sregs
[CCOUNT
]);
199 if (env
->config
->exception_vector
[env
->exception_index
]) {
200 env
->pc
= relocated_vector(env
,
201 env
->config
->exception_vector
[env
->exception_index
]);
202 env
->exception_taken
= 1;
204 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
205 __func__
, env
->pc
, env
->exception_index
);
213 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
214 __func__
, env
->pc
, env
->exception_index
);
217 check_interrupts(env
);
220 static void reset_tlb_mmu_all_ways(CPUState
*env
,
221 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
225 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
226 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
227 entry
[wi
][ei
].asid
= 0;
228 entry
[wi
][ei
].variable
= true;
233 static void reset_tlb_mmu_ways56(CPUState
*env
,
234 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
236 if (!tlb
->varway56
) {
237 static const xtensa_tlb_entry way5
[] = {
252 static const xtensa_tlb_entry way6
[] = {
267 memcpy(entry
[5], way5
, sizeof(way5
));
268 memcpy(entry
[6], way6
, sizeof(way6
));
271 for (ei
= 0; ei
< 8; ++ei
) {
272 entry
[6][ei
].vaddr
= ei
<< 29;
273 entry
[6][ei
].paddr
= ei
<< 29;
274 entry
[6][ei
].asid
= 1;
275 entry
[6][ei
].attr
= 3;
280 static void reset_tlb_region_way0(CPUState
*env
,
281 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
285 for (ei
= 0; ei
< 8; ++ei
) {
286 entry
[0][ei
].vaddr
= ei
<< 29;
287 entry
[0][ei
].paddr
= ei
<< 29;
288 entry
[0][ei
].asid
= 1;
289 entry
[0][ei
].attr
= 2;
290 entry
[0][ei
].variable
= true;
294 static void reset_mmu(CPUState
*env
)
296 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
297 env
->sregs
[RASID
] = 0x04030201;
298 env
->sregs
[ITLBCFG
] = 0;
299 env
->sregs
[DTLBCFG
] = 0;
300 env
->autorefill_idx
= 0;
301 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
302 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
303 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
304 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
306 reset_tlb_region_way0(env
, env
->itlb
);
307 reset_tlb_region_way0(env
, env
->dtlb
);
311 static unsigned get_ring(const CPUState
*env
, uint8_t asid
)
314 for (i
= 0; i
< 4; ++i
) {
315 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
323 * Lookup xtensa TLB for the given virtual address.
326 * \param pwi: [out] way index
327 * \param pei: [out] entry index
328 * \param pring: [out] access ring
329 * \return 0 if ok, exception cause code otherwise
331 int xtensa_tlb_lookup(const CPUState
*env
, uint32_t addr
, bool dtlb
,
332 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
334 const xtensa_tlb
*tlb
= dtlb
?
335 &env
->config
->dtlb
: &env
->config
->itlb
;
336 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
337 env
->dtlb
: env
->itlb
;
342 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
345 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
346 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
347 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
351 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
352 INST_TLB_MULTI_HIT_CAUSE
;
361 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
365 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
368 static unsigned mmu_attr_to_access(uint32_t attr
)
377 access
|= PAGE_WRITE
;
379 } else if (attr
== 13) {
380 access
|= PAGE_READ
| PAGE_WRITE
;
386 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
389 static unsigned region_attr_to_access(uint32_t attr
)
392 if ((attr
< 6 && attr
!= 3) || attr
== 14) {
393 access
|= PAGE_READ
| PAGE_WRITE
;
395 if (attr
> 0 && attr
< 6) {
401 static bool is_access_granted(unsigned access
, int is_write
)
405 return access
& PAGE_READ
;
408 return access
& PAGE_WRITE
;
411 return access
& PAGE_EXEC
;
418 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
419 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
);
421 static int get_physical_addr_mmu(CPUState
*env
,
422 uint32_t vaddr
, int is_write
, int mmu_idx
,
423 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
425 bool dtlb
= is_write
!= 2;
429 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
431 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
432 (mmu_idx
!= 0 || ((vaddr
^ env
->sregs
[PTEVADDR
]) & 0xffc00000)) &&
433 autorefill_mmu(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
) == 0) {
440 const xtensa_tlb_entry
*entry
=
441 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
443 if (ring
< mmu_idx
) {
445 LOAD_STORE_PRIVILEGE_CAUSE
:
446 INST_FETCH_PRIVILEGE_CAUSE
;
449 *access
= mmu_attr_to_access(entry
->attr
);
450 if (!is_access_granted(*access
, is_write
)) {
453 STORE_PROHIBITED_CAUSE
:
454 LOAD_PROHIBITED_CAUSE
) :
455 INST_FETCH_PROHIBITED_CAUSE
;
458 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
459 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
464 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
465 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
)
471 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
472 int ret
= get_physical_addr_mmu(env
, pt_vaddr
, 0, 0,
473 &paddr
, &page_size
, &access
);
475 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__
,
476 vaddr
, ret
? ~0 : paddr
);
480 uint32_t pte
= ldl_phys(paddr
);
482 *ring
= (pte
>> 4) & 0x3;
483 *wi
= (++env
->autorefill_idx
) & 0x3;
484 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, *wi
, ei
);
485 xtensa_tlb_set_entry(env
, dtlb
, *wi
, *ei
, vpn
, pte
);
486 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
487 __func__
, vaddr
, vpn
, pte
);
492 static int get_physical_addr_region(CPUState
*env
,
493 uint32_t vaddr
, int is_write
, int mmu_idx
,
494 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
496 bool dtlb
= is_write
!= 2;
498 uint32_t ei
= (vaddr
>> 29) & 0x7;
499 const xtensa_tlb_entry
*entry
=
500 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
502 *access
= region_attr_to_access(entry
->attr
);
503 if (!is_access_granted(*access
, is_write
)) {
506 STORE_PROHIBITED_CAUSE
:
507 LOAD_PROHIBITED_CAUSE
) :
508 INST_FETCH_PROHIBITED_CAUSE
;
511 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
512 *page_size
= ~REGION_PAGE_MASK
+ 1;
518 * Convert virtual address to physical addr.
519 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
521 * \return 0 if ok, exception cause code otherwise
523 int xtensa_get_physical_addr(CPUState
*env
,
524 uint32_t vaddr
, int is_write
, int mmu_idx
,
525 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
527 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
528 return get_physical_addr_mmu(env
, vaddr
, is_write
, mmu_idx
,
529 paddr
, page_size
, access
);
530 } else if (xtensa_option_bits_enabled(env
->config
,
531 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
532 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
533 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
534 paddr
, page_size
, access
);
537 *page_size
= TARGET_PAGE_SIZE
;
538 *access
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;