]> git.proxmox.com Git - qemu.git/blob - target-xtensa/helper.c
Merge branch 'arm-devs.for-upstream' of git://git.linaro.org/people/pmaydell/qemu-arm
[qemu.git] / target-xtensa / helper.c
1 /*
2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "gdbstub.h"
31 #include "qemu-common.h"
32 #include "host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
35 #endif
36
37 static void reset_mmu(CPUState *env);
38
39 void cpu_reset(CPUXtensaState *env)
40 {
41 env->exception_taken = 0;
42 env->pc = env->config->exception_vector[EXC_RESET];
43 env->sregs[LITBASE] &= ~1;
44 env->sregs[PS] = xtensa_option_enabled(env->config,
45 XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10;
46 env->sregs[VECBASE] = env->config->vecbase;
47 env->sregs[IBREAKENABLE] = 0;
48
49 env->pending_irq_level = 0;
50 reset_mmu(env);
51 }
52
53 static struct XtensaConfigList *xtensa_cores;
54
55 void xtensa_register_core(XtensaConfigList *node)
56 {
57 node->next = xtensa_cores;
58 xtensa_cores = node;
59 }
60
61 static uint32_t check_hw_breakpoints(CPUState *env)
62 {
63 unsigned i;
64
65 for (i = 0; i < env->config->ndbreak; ++i) {
66 if (env->cpu_watchpoint[i] &&
67 env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
68 return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
69 }
70 }
71 return 0;
72 }
73
74 static CPUDebugExcpHandler *prev_debug_excp_handler;
75
76 static void breakpoint_handler(CPUState *env)
77 {
78 if (env->watchpoint_hit) {
79 if (env->watchpoint_hit->flags & BP_CPU) {
80 uint32_t cause;
81
82 env->watchpoint_hit = NULL;
83 cause = check_hw_breakpoints(env);
84 if (cause) {
85 debug_exception_env(env, cause);
86 }
87 cpu_resume_from_signal(env, NULL);
88 }
89 }
90 if (prev_debug_excp_handler) {
91 prev_debug_excp_handler(env);
92 }
93 }
94
95 CPUXtensaState *cpu_xtensa_init(const char *cpu_model)
96 {
97 static int tcg_inited;
98 static int debug_handler_inited;
99 CPUXtensaState *env;
100 const XtensaConfig *config = NULL;
101 XtensaConfigList *core = xtensa_cores;
102
103 for (; core; core = core->next)
104 if (strcmp(core->config->name, cpu_model) == 0) {
105 config = core->config;
106 break;
107 }
108
109 if (config == NULL) {
110 return NULL;
111 }
112
113 env = g_malloc0(sizeof(*env));
114 env->config = config;
115 cpu_exec_init(env);
116
117 if (!tcg_inited) {
118 tcg_inited = 1;
119 xtensa_translate_init();
120 }
121
122 if (!debug_handler_inited && tcg_enabled()) {
123 debug_handler_inited = 1;
124 prev_debug_excp_handler =
125 cpu_set_debug_excp_handler(breakpoint_handler);
126 }
127
128 xtensa_irq_init(env);
129 qemu_init_vcpu(env);
130 return env;
131 }
132
133
134 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
135 {
136 XtensaConfigList *core = xtensa_cores;
137 cpu_fprintf(f, "Available CPUs:\n");
138 for (; core; core = core->next) {
139 cpu_fprintf(f, " %s\n", core->config->name);
140 }
141 }
142
143 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
144 {
145 uint32_t paddr;
146 uint32_t page_size;
147 unsigned access;
148
149 if (xtensa_get_physical_addr(env, addr, 0, 0,
150 &paddr, &page_size, &access) == 0) {
151 return paddr;
152 }
153 if (xtensa_get_physical_addr(env, addr, 2, 0,
154 &paddr, &page_size, &access) == 0) {
155 return paddr;
156 }
157 return ~0;
158 }
159
160 static uint32_t relocated_vector(CPUState *env, uint32_t vector)
161 {
162 if (xtensa_option_enabled(env->config,
163 XTENSA_OPTION_RELOCATABLE_VECTOR)) {
164 return vector - env->config->vecbase + env->sregs[VECBASE];
165 } else {
166 return vector;
167 }
168 }
169
170 /*!
171 * Handle penging IRQ.
172 * For the high priority interrupt jump to the corresponding interrupt vector.
173 * For the level-1 interrupt convert it to either user, kernel or double
174 * exception with the 'level-1 interrupt' exception cause.
175 */
176 static void handle_interrupt(CPUState *env)
177 {
178 int level = env->pending_irq_level;
179
180 if (level > xtensa_get_cintlevel(env) &&
181 level <= env->config->nlevel &&
182 (env->config->level_mask[level] &
183 env->sregs[INTSET] &
184 env->sregs[INTENABLE])) {
185 if (level > 1) {
186 env->sregs[EPC1 + level - 1] = env->pc;
187 env->sregs[EPS2 + level - 2] = env->sregs[PS];
188 env->sregs[PS] =
189 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
190 env->pc = relocated_vector(env,
191 env->config->interrupt_vector[level]);
192 } else {
193 env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
194
195 if (env->sregs[PS] & PS_EXCM) {
196 if (env->config->ndepc) {
197 env->sregs[DEPC] = env->pc;
198 } else {
199 env->sregs[EPC1] = env->pc;
200 }
201 env->exception_index = EXC_DOUBLE;
202 } else {
203 env->sregs[EPC1] = env->pc;
204 env->exception_index =
205 (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
206 }
207 env->sregs[PS] |= PS_EXCM;
208 }
209 env->exception_taken = 1;
210 }
211 }
212
213 void do_interrupt(CPUState *env)
214 {
215 if (env->exception_index == EXC_IRQ) {
216 qemu_log_mask(CPU_LOG_INT,
217 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
218 "pc = %08x, a0 = %08x, ps = %08x, "
219 "intset = %08x, intenable = %08x, "
220 "ccount = %08x\n",
221 __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
222 env->pc, env->regs[0], env->sregs[PS],
223 env->sregs[INTSET], env->sregs[INTENABLE],
224 env->sregs[CCOUNT]);
225 handle_interrupt(env);
226 }
227
228 switch (env->exception_index) {
229 case EXC_WINDOW_OVERFLOW4:
230 case EXC_WINDOW_UNDERFLOW4:
231 case EXC_WINDOW_OVERFLOW8:
232 case EXC_WINDOW_UNDERFLOW8:
233 case EXC_WINDOW_OVERFLOW12:
234 case EXC_WINDOW_UNDERFLOW12:
235 case EXC_KERNEL:
236 case EXC_USER:
237 case EXC_DOUBLE:
238 case EXC_DEBUG:
239 qemu_log_mask(CPU_LOG_INT, "%s(%d) "
240 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
241 __func__, env->exception_index,
242 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
243 if (env->config->exception_vector[env->exception_index]) {
244 env->pc = relocated_vector(env,
245 env->config->exception_vector[env->exception_index]);
246 env->exception_taken = 1;
247 } else {
248 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
249 __func__, env->pc, env->exception_index);
250 }
251 break;
252
253 case EXC_IRQ:
254 break;
255
256 default:
257 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
258 __func__, env->pc, env->exception_index);
259 break;
260 }
261 check_interrupts(env);
262 }
263
264 static void reset_tlb_mmu_all_ways(CPUState *env,
265 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
266 {
267 unsigned wi, ei;
268
269 for (wi = 0; wi < tlb->nways; ++wi) {
270 for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
271 entry[wi][ei].asid = 0;
272 entry[wi][ei].variable = true;
273 }
274 }
275 }
276
277 static void reset_tlb_mmu_ways56(CPUState *env,
278 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
279 {
280 if (!tlb->varway56) {
281 static const xtensa_tlb_entry way5[] = {
282 {
283 .vaddr = 0xd0000000,
284 .paddr = 0,
285 .asid = 1,
286 .attr = 7,
287 .variable = false,
288 }, {
289 .vaddr = 0xd8000000,
290 .paddr = 0,
291 .asid = 1,
292 .attr = 3,
293 .variable = false,
294 }
295 };
296 static const xtensa_tlb_entry way6[] = {
297 {
298 .vaddr = 0xe0000000,
299 .paddr = 0xf0000000,
300 .asid = 1,
301 .attr = 7,
302 .variable = false,
303 }, {
304 .vaddr = 0xf0000000,
305 .paddr = 0xf0000000,
306 .asid = 1,
307 .attr = 3,
308 .variable = false,
309 }
310 };
311 memcpy(entry[5], way5, sizeof(way5));
312 memcpy(entry[6], way6, sizeof(way6));
313 } else {
314 uint32_t ei;
315 for (ei = 0; ei < 8; ++ei) {
316 entry[6][ei].vaddr = ei << 29;
317 entry[6][ei].paddr = ei << 29;
318 entry[6][ei].asid = 1;
319 entry[6][ei].attr = 3;
320 }
321 }
322 }
323
324 static void reset_tlb_region_way0(CPUState *env,
325 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
326 {
327 unsigned ei;
328
329 for (ei = 0; ei < 8; ++ei) {
330 entry[0][ei].vaddr = ei << 29;
331 entry[0][ei].paddr = ei << 29;
332 entry[0][ei].asid = 1;
333 entry[0][ei].attr = 2;
334 entry[0][ei].variable = true;
335 }
336 }
337
338 static void reset_mmu(CPUState *env)
339 {
340 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
341 env->sregs[RASID] = 0x04030201;
342 env->sregs[ITLBCFG] = 0;
343 env->sregs[DTLBCFG] = 0;
344 env->autorefill_idx = 0;
345 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
346 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
347 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
348 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
349 } else {
350 reset_tlb_region_way0(env, env->itlb);
351 reset_tlb_region_way0(env, env->dtlb);
352 }
353 }
354
355 static unsigned get_ring(const CPUState *env, uint8_t asid)
356 {
357 unsigned i;
358 for (i = 0; i < 4; ++i) {
359 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
360 return i;
361 }
362 }
363 return 0xff;
364 }
365
366 /*!
367 * Lookup xtensa TLB for the given virtual address.
368 * See ISA, 4.6.2.2
369 *
370 * \param pwi: [out] way index
371 * \param pei: [out] entry index
372 * \param pring: [out] access ring
373 * \return 0 if ok, exception cause code otherwise
374 */
375 int xtensa_tlb_lookup(const CPUState *env, uint32_t addr, bool dtlb,
376 uint32_t *pwi, uint32_t *pei, uint8_t *pring)
377 {
378 const xtensa_tlb *tlb = dtlb ?
379 &env->config->dtlb : &env->config->itlb;
380 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
381 env->dtlb : env->itlb;
382
383 int nhits = 0;
384 unsigned wi;
385
386 for (wi = 0; wi < tlb->nways; ++wi) {
387 uint32_t vpn;
388 uint32_t ei;
389 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
390 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
391 unsigned ring = get_ring(env, entry[wi][ei].asid);
392 if (ring < 4) {
393 if (++nhits > 1) {
394 return dtlb ?
395 LOAD_STORE_TLB_MULTI_HIT_CAUSE :
396 INST_TLB_MULTI_HIT_CAUSE;
397 }
398 *pwi = wi;
399 *pei = ei;
400 *pring = ring;
401 }
402 }
403 }
404 return nhits ? 0 :
405 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
406 }
407
408 /*!
409 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
410 * See ISA, 4.6.5.10
411 */
412 static unsigned mmu_attr_to_access(uint32_t attr)
413 {
414 unsigned access = 0;
415 if (attr < 12) {
416 access |= PAGE_READ;
417 if (attr & 0x1) {
418 access |= PAGE_EXEC;
419 }
420 if (attr & 0x2) {
421 access |= PAGE_WRITE;
422 }
423 } else if (attr == 13) {
424 access |= PAGE_READ | PAGE_WRITE;
425 }
426 return access;
427 }
428
429 /*!
430 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
431 * See ISA, 4.6.3.3
432 */
433 static unsigned region_attr_to_access(uint32_t attr)
434 {
435 unsigned access = 0;
436 if ((attr < 6 && attr != 3) || attr == 14) {
437 access |= PAGE_READ | PAGE_WRITE;
438 }
439 if (attr > 0 && attr < 6) {
440 access |= PAGE_EXEC;
441 }
442 return access;
443 }
444
445 static bool is_access_granted(unsigned access, int is_write)
446 {
447 switch (is_write) {
448 case 0:
449 return access & PAGE_READ;
450
451 case 1:
452 return access & PAGE_WRITE;
453
454 case 2:
455 return access & PAGE_EXEC;
456
457 default:
458 return 0;
459 }
460 }
461
462 static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb,
463 uint32_t *wi, uint32_t *ei, uint8_t *ring);
464
465 static int get_physical_addr_mmu(CPUState *env,
466 uint32_t vaddr, int is_write, int mmu_idx,
467 uint32_t *paddr, uint32_t *page_size, unsigned *access)
468 {
469 bool dtlb = is_write != 2;
470 uint32_t wi;
471 uint32_t ei;
472 uint8_t ring;
473 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
474
475 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
476 (mmu_idx != 0 || ((vaddr ^ env->sregs[PTEVADDR]) & 0xffc00000)) &&
477 autorefill_mmu(env, vaddr, dtlb, &wi, &ei, &ring) == 0) {
478 ret = 0;
479 }
480 if (ret != 0) {
481 return ret;
482 }
483
484 const xtensa_tlb_entry *entry =
485 xtensa_tlb_get_entry(env, dtlb, wi, ei);
486
487 if (ring < mmu_idx) {
488 return dtlb ?
489 LOAD_STORE_PRIVILEGE_CAUSE :
490 INST_FETCH_PRIVILEGE_CAUSE;
491 }
492
493 *access = mmu_attr_to_access(entry->attr);
494 if (!is_access_granted(*access, is_write)) {
495 return dtlb ?
496 (is_write ?
497 STORE_PROHIBITED_CAUSE :
498 LOAD_PROHIBITED_CAUSE) :
499 INST_FETCH_PROHIBITED_CAUSE;
500 }
501
502 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
503 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
504
505 return 0;
506 }
507
508 static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb,
509 uint32_t *wi, uint32_t *ei, uint8_t *ring)
510 {
511 uint32_t paddr;
512 uint32_t page_size;
513 unsigned access;
514 uint32_t pt_vaddr =
515 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
516 int ret = get_physical_addr_mmu(env, pt_vaddr, 0, 0,
517 &paddr, &page_size, &access);
518
519 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__,
520 vaddr, ret ? ~0 : paddr);
521
522 if (ret == 0) {
523 uint32_t vpn;
524 uint32_t pte = ldl_phys(paddr);
525
526 *ring = (pte >> 4) & 0x3;
527 *wi = (++env->autorefill_idx) & 0x3;
528 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, *wi, ei);
529 xtensa_tlb_set_entry(env, dtlb, *wi, *ei, vpn, pte);
530 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
531 __func__, vaddr, vpn, pte);
532 }
533 return ret;
534 }
535
536 static int get_physical_addr_region(CPUState *env,
537 uint32_t vaddr, int is_write, int mmu_idx,
538 uint32_t *paddr, uint32_t *page_size, unsigned *access)
539 {
540 bool dtlb = is_write != 2;
541 uint32_t wi = 0;
542 uint32_t ei = (vaddr >> 29) & 0x7;
543 const xtensa_tlb_entry *entry =
544 xtensa_tlb_get_entry(env, dtlb, wi, ei);
545
546 *access = region_attr_to_access(entry->attr);
547 if (!is_access_granted(*access, is_write)) {
548 return dtlb ?
549 (is_write ?
550 STORE_PROHIBITED_CAUSE :
551 LOAD_PROHIBITED_CAUSE) :
552 INST_FETCH_PROHIBITED_CAUSE;
553 }
554
555 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
556 *page_size = ~REGION_PAGE_MASK + 1;
557
558 return 0;
559 }
560
561 /*!
562 * Convert virtual address to physical addr.
563 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
564 *
565 * \return 0 if ok, exception cause code otherwise
566 */
567 int xtensa_get_physical_addr(CPUState *env,
568 uint32_t vaddr, int is_write, int mmu_idx,
569 uint32_t *paddr, uint32_t *page_size, unsigned *access)
570 {
571 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
572 return get_physical_addr_mmu(env, vaddr, is_write, mmu_idx,
573 paddr, page_size, access);
574 } else if (xtensa_option_bits_enabled(env->config,
575 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
576 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
577 return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
578 paddr, page_size, access);
579 } else {
580 *paddr = vaddr;
581 *page_size = TARGET_PAGE_SIZE;
582 *access = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
583 return 0;
584 }
585 }
586
587 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
588 CPUState *env, bool dtlb)
589 {
590 unsigned wi, ei;
591 const xtensa_tlb *conf =
592 dtlb ? &env->config->dtlb : &env->config->itlb;
593 unsigned (*attr_to_access)(uint32_t) =
594 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
595 mmu_attr_to_access : region_attr_to_access;
596
597 for (wi = 0; wi < conf->nways; ++wi) {
598 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
599 const char *sz_text;
600 bool print_header = true;
601
602 if (sz >= 0x100000) {
603 sz >>= 20;
604 sz_text = "MB";
605 } else {
606 sz >>= 10;
607 sz_text = "KB";
608 }
609
610 for (ei = 0; ei < conf->way_size[wi]; ++ei) {
611 const xtensa_tlb_entry *entry =
612 xtensa_tlb_get_entry(env, dtlb, wi, ei);
613
614 if (entry->asid) {
615 unsigned access = attr_to_access(entry->attr);
616
617 if (print_header) {
618 print_header = false;
619 cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
620 cpu_fprintf(f,
621 "\tVaddr Paddr ASID Attr RWX\n"
622 "\t---------- ---------- ---- ---- ---\n");
623 }
624 cpu_fprintf(f,
625 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c\n",
626 entry->vaddr,
627 entry->paddr,
628 entry->asid,
629 entry->attr,
630 (access & PAGE_READ) ? 'R' : '-',
631 (access & PAGE_WRITE) ? 'W' : '-',
632 (access & PAGE_EXEC) ? 'X' : '-');
633 }
634 }
635 }
636 }
637
638 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env)
639 {
640 if (xtensa_option_bits_enabled(env->config,
641 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
642 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
643 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
644
645 cpu_fprintf(f, "ITLB:\n");
646 dump_tlb(f, cpu_fprintf, env, false);
647 cpu_fprintf(f, "\nDTLB:\n");
648 dump_tlb(f, cpu_fprintf, env, true);
649 } else {
650 cpu_fprintf(f, "No TLB for this CPU core\n");
651 }
652 }