2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qemu-common.h"
32 #include "host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
37 static void reset_mmu(CPUState
*env
);
39 void cpu_reset(CPUXtensaState
*env
)
41 env
->exception_taken
= 0;
42 env
->pc
= env
->config
->exception_vector
[EXC_RESET
];
43 env
->sregs
[LITBASE
] &= ~1;
44 env
->sregs
[PS
] = xtensa_option_enabled(env
->config
,
45 XTENSA_OPTION_INTERRUPT
) ? 0x1f : 0x10;
46 env
->sregs
[VECBASE
] = env
->config
->vecbase
;
47 env
->sregs
[IBREAKENABLE
] = 0;
49 env
->pending_irq_level
= 0;
53 static struct XtensaConfigList
*xtensa_cores
;
55 void xtensa_register_core(XtensaConfigList
*node
)
57 node
->next
= xtensa_cores
;
61 static uint32_t check_hw_breakpoints(CPUState
*env
)
65 for (i
= 0; i
< env
->config
->ndbreak
; ++i
) {
66 if (env
->cpu_watchpoint
[i
] &&
67 env
->cpu_watchpoint
[i
]->flags
& BP_WATCHPOINT_HIT
) {
68 return DEBUGCAUSE_DB
| (i
<< DEBUGCAUSE_DBNUM_SHIFT
);
74 static CPUDebugExcpHandler
*prev_debug_excp_handler
;
76 static void breakpoint_handler(CPUState
*env
)
78 if (env
->watchpoint_hit
) {
79 if (env
->watchpoint_hit
->flags
& BP_CPU
) {
82 env
->watchpoint_hit
= NULL
;
83 cause
= check_hw_breakpoints(env
);
85 debug_exception_env(env
, cause
);
87 cpu_resume_from_signal(env
, NULL
);
90 if (prev_debug_excp_handler
) {
91 prev_debug_excp_handler(env
);
95 CPUXtensaState
*cpu_xtensa_init(const char *cpu_model
)
97 static int tcg_inited
;
98 static int debug_handler_inited
;
100 const XtensaConfig
*config
= NULL
;
101 XtensaConfigList
*core
= xtensa_cores
;
103 for (; core
; core
= core
->next
)
104 if (strcmp(core
->config
->name
, cpu_model
) == 0) {
105 config
= core
->config
;
109 if (config
== NULL
) {
113 env
= g_malloc0(sizeof(*env
));
114 env
->config
= config
;
119 xtensa_translate_init();
122 if (!debug_handler_inited
&& tcg_enabled()) {
123 debug_handler_inited
= 1;
124 prev_debug_excp_handler
=
125 cpu_set_debug_excp_handler(breakpoint_handler
);
128 xtensa_irq_init(env
);
134 void xtensa_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
136 XtensaConfigList
*core
= xtensa_cores
;
137 cpu_fprintf(f
, "Available CPUs:\n");
138 for (; core
; core
= core
->next
) {
139 cpu_fprintf(f
, " %s\n", core
->config
->name
);
143 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
149 if (xtensa_get_physical_addr(env
, addr
, 0, 0,
150 &paddr
, &page_size
, &access
) == 0) {
153 if (xtensa_get_physical_addr(env
, addr
, 2, 0,
154 &paddr
, &page_size
, &access
) == 0) {
160 static uint32_t relocated_vector(CPUState
*env
, uint32_t vector
)
162 if (xtensa_option_enabled(env
->config
,
163 XTENSA_OPTION_RELOCATABLE_VECTOR
)) {
164 return vector
- env
->config
->vecbase
+ env
->sregs
[VECBASE
];
171 * Handle penging IRQ.
172 * For the high priority interrupt jump to the corresponding interrupt vector.
173 * For the level-1 interrupt convert it to either user, kernel or double
174 * exception with the 'level-1 interrupt' exception cause.
176 static void handle_interrupt(CPUState
*env
)
178 int level
= env
->pending_irq_level
;
180 if (level
> xtensa_get_cintlevel(env
) &&
181 level
<= env
->config
->nlevel
&&
182 (env
->config
->level_mask
[level
] &
184 env
->sregs
[INTENABLE
])) {
186 env
->sregs
[EPC1
+ level
- 1] = env
->pc
;
187 env
->sregs
[EPS2
+ level
- 2] = env
->sregs
[PS
];
189 (env
->sregs
[PS
] & ~PS_INTLEVEL
) | level
| PS_EXCM
;
190 env
->pc
= relocated_vector(env
,
191 env
->config
->interrupt_vector
[level
]);
193 env
->sregs
[EXCCAUSE
] = LEVEL1_INTERRUPT_CAUSE
;
195 if (env
->sregs
[PS
] & PS_EXCM
) {
196 if (env
->config
->ndepc
) {
197 env
->sregs
[DEPC
] = env
->pc
;
199 env
->sregs
[EPC1
] = env
->pc
;
201 env
->exception_index
= EXC_DOUBLE
;
203 env
->sregs
[EPC1
] = env
->pc
;
204 env
->exception_index
=
205 (env
->sregs
[PS
] & PS_UM
) ? EXC_USER
: EXC_KERNEL
;
207 env
->sregs
[PS
] |= PS_EXCM
;
209 env
->exception_taken
= 1;
213 void do_interrupt(CPUState
*env
)
215 if (env
->exception_index
== EXC_IRQ
) {
216 qemu_log_mask(CPU_LOG_INT
,
217 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
218 "pc = %08x, a0 = %08x, ps = %08x, "
219 "intset = %08x, intenable = %08x, "
221 __func__
, env
->pending_irq_level
, xtensa_get_cintlevel(env
),
222 env
->pc
, env
->regs
[0], env
->sregs
[PS
],
223 env
->sregs
[INTSET
], env
->sregs
[INTENABLE
],
225 handle_interrupt(env
);
228 switch (env
->exception_index
) {
229 case EXC_WINDOW_OVERFLOW4
:
230 case EXC_WINDOW_UNDERFLOW4
:
231 case EXC_WINDOW_OVERFLOW8
:
232 case EXC_WINDOW_UNDERFLOW8
:
233 case EXC_WINDOW_OVERFLOW12
:
234 case EXC_WINDOW_UNDERFLOW12
:
239 qemu_log_mask(CPU_LOG_INT
, "%s(%d) "
240 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
241 __func__
, env
->exception_index
,
242 env
->pc
, env
->regs
[0], env
->sregs
[PS
], env
->sregs
[CCOUNT
]);
243 if (env
->config
->exception_vector
[env
->exception_index
]) {
244 env
->pc
= relocated_vector(env
,
245 env
->config
->exception_vector
[env
->exception_index
]);
246 env
->exception_taken
= 1;
248 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
249 __func__
, env
->pc
, env
->exception_index
);
257 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
258 __func__
, env
->pc
, env
->exception_index
);
261 check_interrupts(env
);
264 static void reset_tlb_mmu_all_ways(CPUState
*env
,
265 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
269 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
270 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
271 entry
[wi
][ei
].asid
= 0;
272 entry
[wi
][ei
].variable
= true;
277 static void reset_tlb_mmu_ways56(CPUState
*env
,
278 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
280 if (!tlb
->varway56
) {
281 static const xtensa_tlb_entry way5
[] = {
296 static const xtensa_tlb_entry way6
[] = {
311 memcpy(entry
[5], way5
, sizeof(way5
));
312 memcpy(entry
[6], way6
, sizeof(way6
));
315 for (ei
= 0; ei
< 8; ++ei
) {
316 entry
[6][ei
].vaddr
= ei
<< 29;
317 entry
[6][ei
].paddr
= ei
<< 29;
318 entry
[6][ei
].asid
= 1;
319 entry
[6][ei
].attr
= 3;
324 static void reset_tlb_region_way0(CPUState
*env
,
325 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
329 for (ei
= 0; ei
< 8; ++ei
) {
330 entry
[0][ei
].vaddr
= ei
<< 29;
331 entry
[0][ei
].paddr
= ei
<< 29;
332 entry
[0][ei
].asid
= 1;
333 entry
[0][ei
].attr
= 2;
334 entry
[0][ei
].variable
= true;
338 static void reset_mmu(CPUState
*env
)
340 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
341 env
->sregs
[RASID
] = 0x04030201;
342 env
->sregs
[ITLBCFG
] = 0;
343 env
->sregs
[DTLBCFG
] = 0;
344 env
->autorefill_idx
= 0;
345 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
346 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
347 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
348 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
350 reset_tlb_region_way0(env
, env
->itlb
);
351 reset_tlb_region_way0(env
, env
->dtlb
);
355 static unsigned get_ring(const CPUState
*env
, uint8_t asid
)
358 for (i
= 0; i
< 4; ++i
) {
359 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
367 * Lookup xtensa TLB for the given virtual address.
370 * \param pwi: [out] way index
371 * \param pei: [out] entry index
372 * \param pring: [out] access ring
373 * \return 0 if ok, exception cause code otherwise
375 int xtensa_tlb_lookup(const CPUState
*env
, uint32_t addr
, bool dtlb
,
376 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
378 const xtensa_tlb
*tlb
= dtlb
?
379 &env
->config
->dtlb
: &env
->config
->itlb
;
380 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
381 env
->dtlb
: env
->itlb
;
386 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
389 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
390 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
391 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
395 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
396 INST_TLB_MULTI_HIT_CAUSE
;
405 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
409 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
412 static unsigned mmu_attr_to_access(uint32_t attr
)
421 access
|= PAGE_WRITE
;
423 } else if (attr
== 13) {
424 access
|= PAGE_READ
| PAGE_WRITE
;
430 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
433 static unsigned region_attr_to_access(uint32_t attr
)
436 if ((attr
< 6 && attr
!= 3) || attr
== 14) {
437 access
|= PAGE_READ
| PAGE_WRITE
;
439 if (attr
> 0 && attr
< 6) {
445 static bool is_access_granted(unsigned access
, int is_write
)
449 return access
& PAGE_READ
;
452 return access
& PAGE_WRITE
;
455 return access
& PAGE_EXEC
;
462 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
463 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
);
465 static int get_physical_addr_mmu(CPUState
*env
,
466 uint32_t vaddr
, int is_write
, int mmu_idx
,
467 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
469 bool dtlb
= is_write
!= 2;
473 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
475 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
476 (mmu_idx
!= 0 || ((vaddr
^ env
->sregs
[PTEVADDR
]) & 0xffc00000)) &&
477 autorefill_mmu(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
) == 0) {
484 const xtensa_tlb_entry
*entry
=
485 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
487 if (ring
< mmu_idx
) {
489 LOAD_STORE_PRIVILEGE_CAUSE
:
490 INST_FETCH_PRIVILEGE_CAUSE
;
493 *access
= mmu_attr_to_access(entry
->attr
);
494 if (!is_access_granted(*access
, is_write
)) {
497 STORE_PROHIBITED_CAUSE
:
498 LOAD_PROHIBITED_CAUSE
) :
499 INST_FETCH_PROHIBITED_CAUSE
;
502 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
503 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
508 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
509 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
)
515 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
516 int ret
= get_physical_addr_mmu(env
, pt_vaddr
, 0, 0,
517 &paddr
, &page_size
, &access
);
519 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__
,
520 vaddr
, ret
? ~0 : paddr
);
524 uint32_t pte
= ldl_phys(paddr
);
526 *ring
= (pte
>> 4) & 0x3;
527 *wi
= (++env
->autorefill_idx
) & 0x3;
528 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, *wi
, ei
);
529 xtensa_tlb_set_entry(env
, dtlb
, *wi
, *ei
, vpn
, pte
);
530 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
531 __func__
, vaddr
, vpn
, pte
);
536 static int get_physical_addr_region(CPUState
*env
,
537 uint32_t vaddr
, int is_write
, int mmu_idx
,
538 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
540 bool dtlb
= is_write
!= 2;
542 uint32_t ei
= (vaddr
>> 29) & 0x7;
543 const xtensa_tlb_entry
*entry
=
544 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
546 *access
= region_attr_to_access(entry
->attr
);
547 if (!is_access_granted(*access
, is_write
)) {
550 STORE_PROHIBITED_CAUSE
:
551 LOAD_PROHIBITED_CAUSE
) :
552 INST_FETCH_PROHIBITED_CAUSE
;
555 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
556 *page_size
= ~REGION_PAGE_MASK
+ 1;
562 * Convert virtual address to physical addr.
563 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
565 * \return 0 if ok, exception cause code otherwise
567 int xtensa_get_physical_addr(CPUState
*env
,
568 uint32_t vaddr
, int is_write
, int mmu_idx
,
569 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
571 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
572 return get_physical_addr_mmu(env
, vaddr
, is_write
, mmu_idx
,
573 paddr
, page_size
, access
);
574 } else if (xtensa_option_bits_enabled(env
->config
,
575 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
576 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
577 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
578 paddr
, page_size
, access
);
581 *page_size
= TARGET_PAGE_SIZE
;
582 *access
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
587 static void dump_tlb(FILE *f
, fprintf_function cpu_fprintf
,
588 CPUState
*env
, bool dtlb
)
591 const xtensa_tlb
*conf
=
592 dtlb
? &env
->config
->dtlb
: &env
->config
->itlb
;
593 unsigned (*attr_to_access
)(uint32_t) =
594 xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
) ?
595 mmu_attr_to_access
: region_attr_to_access
;
597 for (wi
= 0; wi
< conf
->nways
; ++wi
) {
598 uint32_t sz
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
600 bool print_header
= true;
602 if (sz
>= 0x100000) {
610 for (ei
= 0; ei
< conf
->way_size
[wi
]; ++ei
) {
611 const xtensa_tlb_entry
*entry
=
612 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
615 unsigned access
= attr_to_access(entry
->attr
);
618 print_header
= false;
619 cpu_fprintf(f
, "Way %u (%d %s)\n", wi
, sz
, sz_text
);
621 "\tVaddr Paddr ASID Attr RWX\n"
622 "\t---------- ---------- ---- ---- ---\n");
625 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c\n",
630 (access
& PAGE_READ
) ? 'R' : '-',
631 (access
& PAGE_WRITE
) ? 'W' : '-',
632 (access
& PAGE_EXEC
) ? 'X' : '-');
638 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUState
*env
)
640 if (xtensa_option_bits_enabled(env
->config
,
641 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
642 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
) |
643 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU
))) {
645 cpu_fprintf(f
, "ITLB:\n");
646 dump_tlb(f
, cpu_fprintf
, env
, false);
647 cpu_fprintf(f
, "\nDTLB:\n");
648 dump_tlb(f
, cpu_fprintf
, env
, true);
650 cpu_fprintf(f
, "No TLB for this CPU core\n");