]> git.proxmox.com Git - qemu.git/blob - target-xtensa/helper.c
0a26f8dd3a56fc4dc8ed318029ef23e506189d08
[qemu.git] / target-xtensa / helper.c
1 /*
2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "gdbstub.h"
31 #include "qemu-common.h"
32 #include "host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
35 #endif
36
37 static void reset_mmu(CPUState *env);
38
39 void cpu_reset(CPUXtensaState *env)
40 {
41 env->exception_taken = 0;
42 env->pc = env->config->exception_vector[EXC_RESET];
43 env->sregs[LITBASE] &= ~1;
44 env->sregs[PS] = xtensa_option_enabled(env->config,
45 XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10;
46 env->sregs[VECBASE] = env->config->vecbase;
47 env->sregs[IBREAKENABLE] = 0;
48
49 env->pending_irq_level = 0;
50 reset_mmu(env);
51 }
52
53 static struct XtensaConfigList *xtensa_cores;
54
55 void xtensa_register_core(XtensaConfigList *node)
56 {
57 node->next = xtensa_cores;
58 xtensa_cores = node;
59 }
60
61 CPUXtensaState *cpu_xtensa_init(const char *cpu_model)
62 {
63 static int tcg_inited;
64 CPUXtensaState *env;
65 const XtensaConfig *config = NULL;
66 XtensaConfigList *core = xtensa_cores;
67
68 for (; core; core = core->next)
69 if (strcmp(core->config->name, cpu_model) == 0) {
70 config = core->config;
71 break;
72 }
73
74 if (config == NULL) {
75 return NULL;
76 }
77
78 env = g_malloc0(sizeof(*env));
79 env->config = config;
80 cpu_exec_init(env);
81
82 if (!tcg_inited) {
83 tcg_inited = 1;
84 xtensa_translate_init();
85 }
86
87 xtensa_irq_init(env);
88 qemu_init_vcpu(env);
89 return env;
90 }
91
92
93 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
94 {
95 XtensaConfigList *core = xtensa_cores;
96 cpu_fprintf(f, "Available CPUs:\n");
97 for (; core; core = core->next) {
98 cpu_fprintf(f, " %s\n", core->config->name);
99 }
100 }
101
102 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
103 {
104 uint32_t paddr;
105 uint32_t page_size;
106 unsigned access;
107
108 if (xtensa_get_physical_addr(env, addr, 0, 0,
109 &paddr, &page_size, &access) == 0) {
110 return paddr;
111 }
112 if (xtensa_get_physical_addr(env, addr, 2, 0,
113 &paddr, &page_size, &access) == 0) {
114 return paddr;
115 }
116 return ~0;
117 }
118
119 static uint32_t relocated_vector(CPUState *env, uint32_t vector)
120 {
121 if (xtensa_option_enabled(env->config,
122 XTENSA_OPTION_RELOCATABLE_VECTOR)) {
123 return vector - env->config->vecbase + env->sregs[VECBASE];
124 } else {
125 return vector;
126 }
127 }
128
129 /*!
130 * Handle penging IRQ.
131 * For the high priority interrupt jump to the corresponding interrupt vector.
132 * For the level-1 interrupt convert it to either user, kernel or double
133 * exception with the 'level-1 interrupt' exception cause.
134 */
135 static void handle_interrupt(CPUState *env)
136 {
137 int level = env->pending_irq_level;
138
139 if (level > xtensa_get_cintlevel(env) &&
140 level <= env->config->nlevel &&
141 (env->config->level_mask[level] &
142 env->sregs[INTSET] &
143 env->sregs[INTENABLE])) {
144 if (level > 1) {
145 env->sregs[EPC1 + level - 1] = env->pc;
146 env->sregs[EPS2 + level - 2] = env->sregs[PS];
147 env->sregs[PS] =
148 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
149 env->pc = relocated_vector(env,
150 env->config->interrupt_vector[level]);
151 } else {
152 env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
153
154 if (env->sregs[PS] & PS_EXCM) {
155 if (env->config->ndepc) {
156 env->sregs[DEPC] = env->pc;
157 } else {
158 env->sregs[EPC1] = env->pc;
159 }
160 env->exception_index = EXC_DOUBLE;
161 } else {
162 env->sregs[EPC1] = env->pc;
163 env->exception_index =
164 (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
165 }
166 env->sregs[PS] |= PS_EXCM;
167 }
168 env->exception_taken = 1;
169 }
170 }
171
172 void do_interrupt(CPUState *env)
173 {
174 if (env->exception_index == EXC_IRQ) {
175 qemu_log_mask(CPU_LOG_INT,
176 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
177 "pc = %08x, a0 = %08x, ps = %08x, "
178 "intset = %08x, intenable = %08x, "
179 "ccount = %08x\n",
180 __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
181 env->pc, env->regs[0], env->sregs[PS],
182 env->sregs[INTSET], env->sregs[INTENABLE],
183 env->sregs[CCOUNT]);
184 handle_interrupt(env);
185 }
186
187 switch (env->exception_index) {
188 case EXC_WINDOW_OVERFLOW4:
189 case EXC_WINDOW_UNDERFLOW4:
190 case EXC_WINDOW_OVERFLOW8:
191 case EXC_WINDOW_UNDERFLOW8:
192 case EXC_WINDOW_OVERFLOW12:
193 case EXC_WINDOW_UNDERFLOW12:
194 case EXC_KERNEL:
195 case EXC_USER:
196 case EXC_DOUBLE:
197 case EXC_DEBUG:
198 qemu_log_mask(CPU_LOG_INT, "%s(%d) "
199 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
200 __func__, env->exception_index,
201 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
202 if (env->config->exception_vector[env->exception_index]) {
203 env->pc = relocated_vector(env,
204 env->config->exception_vector[env->exception_index]);
205 env->exception_taken = 1;
206 } else {
207 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
208 __func__, env->pc, env->exception_index);
209 }
210 break;
211
212 case EXC_IRQ:
213 break;
214
215 default:
216 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
217 __func__, env->pc, env->exception_index);
218 break;
219 }
220 check_interrupts(env);
221 }
222
223 static void reset_tlb_mmu_all_ways(CPUState *env,
224 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
225 {
226 unsigned wi, ei;
227
228 for (wi = 0; wi < tlb->nways; ++wi) {
229 for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
230 entry[wi][ei].asid = 0;
231 entry[wi][ei].variable = true;
232 }
233 }
234 }
235
236 static void reset_tlb_mmu_ways56(CPUState *env,
237 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
238 {
239 if (!tlb->varway56) {
240 static const xtensa_tlb_entry way5[] = {
241 {
242 .vaddr = 0xd0000000,
243 .paddr = 0,
244 .asid = 1,
245 .attr = 7,
246 .variable = false,
247 }, {
248 .vaddr = 0xd8000000,
249 .paddr = 0,
250 .asid = 1,
251 .attr = 3,
252 .variable = false,
253 }
254 };
255 static const xtensa_tlb_entry way6[] = {
256 {
257 .vaddr = 0xe0000000,
258 .paddr = 0xf0000000,
259 .asid = 1,
260 .attr = 7,
261 .variable = false,
262 }, {
263 .vaddr = 0xf0000000,
264 .paddr = 0xf0000000,
265 .asid = 1,
266 .attr = 3,
267 .variable = false,
268 }
269 };
270 memcpy(entry[5], way5, sizeof(way5));
271 memcpy(entry[6], way6, sizeof(way6));
272 } else {
273 uint32_t ei;
274 for (ei = 0; ei < 8; ++ei) {
275 entry[6][ei].vaddr = ei << 29;
276 entry[6][ei].paddr = ei << 29;
277 entry[6][ei].asid = 1;
278 entry[6][ei].attr = 3;
279 }
280 }
281 }
282
283 static void reset_tlb_region_way0(CPUState *env,
284 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
285 {
286 unsigned ei;
287
288 for (ei = 0; ei < 8; ++ei) {
289 entry[0][ei].vaddr = ei << 29;
290 entry[0][ei].paddr = ei << 29;
291 entry[0][ei].asid = 1;
292 entry[0][ei].attr = 2;
293 entry[0][ei].variable = true;
294 }
295 }
296
297 static void reset_mmu(CPUState *env)
298 {
299 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
300 env->sregs[RASID] = 0x04030201;
301 env->sregs[ITLBCFG] = 0;
302 env->sregs[DTLBCFG] = 0;
303 env->autorefill_idx = 0;
304 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
305 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
306 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
307 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
308 } else {
309 reset_tlb_region_way0(env, env->itlb);
310 reset_tlb_region_way0(env, env->dtlb);
311 }
312 }
313
314 static unsigned get_ring(const CPUState *env, uint8_t asid)
315 {
316 unsigned i;
317 for (i = 0; i < 4; ++i) {
318 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
319 return i;
320 }
321 }
322 return 0xff;
323 }
324
325 /*!
326 * Lookup xtensa TLB for the given virtual address.
327 * See ISA, 4.6.2.2
328 *
329 * \param pwi: [out] way index
330 * \param pei: [out] entry index
331 * \param pring: [out] access ring
332 * \return 0 if ok, exception cause code otherwise
333 */
334 int xtensa_tlb_lookup(const CPUState *env, uint32_t addr, bool dtlb,
335 uint32_t *pwi, uint32_t *pei, uint8_t *pring)
336 {
337 const xtensa_tlb *tlb = dtlb ?
338 &env->config->dtlb : &env->config->itlb;
339 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
340 env->dtlb : env->itlb;
341
342 int nhits = 0;
343 unsigned wi;
344
345 for (wi = 0; wi < tlb->nways; ++wi) {
346 uint32_t vpn;
347 uint32_t ei;
348 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
349 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
350 unsigned ring = get_ring(env, entry[wi][ei].asid);
351 if (ring < 4) {
352 if (++nhits > 1) {
353 return dtlb ?
354 LOAD_STORE_TLB_MULTI_HIT_CAUSE :
355 INST_TLB_MULTI_HIT_CAUSE;
356 }
357 *pwi = wi;
358 *pei = ei;
359 *pring = ring;
360 }
361 }
362 }
363 return nhits ? 0 :
364 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
365 }
366
367 /*!
368 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
369 * See ISA, 4.6.5.10
370 */
371 static unsigned mmu_attr_to_access(uint32_t attr)
372 {
373 unsigned access = 0;
374 if (attr < 12) {
375 access |= PAGE_READ;
376 if (attr & 0x1) {
377 access |= PAGE_EXEC;
378 }
379 if (attr & 0x2) {
380 access |= PAGE_WRITE;
381 }
382 } else if (attr == 13) {
383 access |= PAGE_READ | PAGE_WRITE;
384 }
385 return access;
386 }
387
388 /*!
389 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
390 * See ISA, 4.6.3.3
391 */
392 static unsigned region_attr_to_access(uint32_t attr)
393 {
394 unsigned access = 0;
395 if ((attr < 6 && attr != 3) || attr == 14) {
396 access |= PAGE_READ | PAGE_WRITE;
397 }
398 if (attr > 0 && attr < 6) {
399 access |= PAGE_EXEC;
400 }
401 return access;
402 }
403
404 static bool is_access_granted(unsigned access, int is_write)
405 {
406 switch (is_write) {
407 case 0:
408 return access & PAGE_READ;
409
410 case 1:
411 return access & PAGE_WRITE;
412
413 case 2:
414 return access & PAGE_EXEC;
415
416 default:
417 return 0;
418 }
419 }
420
421 static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb,
422 uint32_t *wi, uint32_t *ei, uint8_t *ring);
423
424 static int get_physical_addr_mmu(CPUState *env,
425 uint32_t vaddr, int is_write, int mmu_idx,
426 uint32_t *paddr, uint32_t *page_size, unsigned *access)
427 {
428 bool dtlb = is_write != 2;
429 uint32_t wi;
430 uint32_t ei;
431 uint8_t ring;
432 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
433
434 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
435 (mmu_idx != 0 || ((vaddr ^ env->sregs[PTEVADDR]) & 0xffc00000)) &&
436 autorefill_mmu(env, vaddr, dtlb, &wi, &ei, &ring) == 0) {
437 ret = 0;
438 }
439 if (ret != 0) {
440 return ret;
441 }
442
443 const xtensa_tlb_entry *entry =
444 xtensa_tlb_get_entry(env, dtlb, wi, ei);
445
446 if (ring < mmu_idx) {
447 return dtlb ?
448 LOAD_STORE_PRIVILEGE_CAUSE :
449 INST_FETCH_PRIVILEGE_CAUSE;
450 }
451
452 *access = mmu_attr_to_access(entry->attr);
453 if (!is_access_granted(*access, is_write)) {
454 return dtlb ?
455 (is_write ?
456 STORE_PROHIBITED_CAUSE :
457 LOAD_PROHIBITED_CAUSE) :
458 INST_FETCH_PROHIBITED_CAUSE;
459 }
460
461 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
462 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
463
464 return 0;
465 }
466
467 static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb,
468 uint32_t *wi, uint32_t *ei, uint8_t *ring)
469 {
470 uint32_t paddr;
471 uint32_t page_size;
472 unsigned access;
473 uint32_t pt_vaddr =
474 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
475 int ret = get_physical_addr_mmu(env, pt_vaddr, 0, 0,
476 &paddr, &page_size, &access);
477
478 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__,
479 vaddr, ret ? ~0 : paddr);
480
481 if (ret == 0) {
482 uint32_t vpn;
483 uint32_t pte = ldl_phys(paddr);
484
485 *ring = (pte >> 4) & 0x3;
486 *wi = (++env->autorefill_idx) & 0x3;
487 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, *wi, ei);
488 xtensa_tlb_set_entry(env, dtlb, *wi, *ei, vpn, pte);
489 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
490 __func__, vaddr, vpn, pte);
491 }
492 return ret;
493 }
494
495 static int get_physical_addr_region(CPUState *env,
496 uint32_t vaddr, int is_write, int mmu_idx,
497 uint32_t *paddr, uint32_t *page_size, unsigned *access)
498 {
499 bool dtlb = is_write != 2;
500 uint32_t wi = 0;
501 uint32_t ei = (vaddr >> 29) & 0x7;
502 const xtensa_tlb_entry *entry =
503 xtensa_tlb_get_entry(env, dtlb, wi, ei);
504
505 *access = region_attr_to_access(entry->attr);
506 if (!is_access_granted(*access, is_write)) {
507 return dtlb ?
508 (is_write ?
509 STORE_PROHIBITED_CAUSE :
510 LOAD_PROHIBITED_CAUSE) :
511 INST_FETCH_PROHIBITED_CAUSE;
512 }
513
514 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
515 *page_size = ~REGION_PAGE_MASK + 1;
516
517 return 0;
518 }
519
520 /*!
521 * Convert virtual address to physical addr.
522 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
523 *
524 * \return 0 if ok, exception cause code otherwise
525 */
526 int xtensa_get_physical_addr(CPUState *env,
527 uint32_t vaddr, int is_write, int mmu_idx,
528 uint32_t *paddr, uint32_t *page_size, unsigned *access)
529 {
530 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
531 return get_physical_addr_mmu(env, vaddr, is_write, mmu_idx,
532 paddr, page_size, access);
533 } else if (xtensa_option_bits_enabled(env->config,
534 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
535 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
536 return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
537 paddr, page_size, access);
538 } else {
539 *paddr = vaddr;
540 *page_size = TARGET_PAGE_SIZE;
541 *access = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
542 return 0;
543 }
544 }
545
546 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
547 CPUState *env, bool dtlb)
548 {
549 unsigned wi, ei;
550 const xtensa_tlb *conf =
551 dtlb ? &env->config->dtlb : &env->config->itlb;
552 unsigned (*attr_to_access)(uint32_t) =
553 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
554 mmu_attr_to_access : region_attr_to_access;
555
556 for (wi = 0; wi < conf->nways; ++wi) {
557 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
558 const char *sz_text;
559 bool print_header = true;
560
561 if (sz >= 0x100000) {
562 sz >>= 20;
563 sz_text = "MB";
564 } else {
565 sz >>= 10;
566 sz_text = "KB";
567 }
568
569 for (ei = 0; ei < conf->way_size[wi]; ++ei) {
570 const xtensa_tlb_entry *entry =
571 xtensa_tlb_get_entry(env, dtlb, wi, ei);
572
573 if (entry->asid) {
574 unsigned access = attr_to_access(entry->attr);
575
576 if (print_header) {
577 print_header = false;
578 cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
579 cpu_fprintf(f,
580 "\tVaddr Paddr ASID Attr RWX\n"
581 "\t---------- ---------- ---- ---- ---\n");
582 }
583 cpu_fprintf(f,
584 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c\n",
585 entry->vaddr,
586 entry->paddr,
587 entry->asid,
588 entry->attr,
589 (access & PAGE_READ) ? 'R' : '-',
590 (access & PAGE_WRITE) ? 'W' : '-',
591 (access & PAGE_EXEC) ? 'X' : '-');
592 }
593 }
594 }
595 }
596
597 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUState *env)
598 {
599 if (xtensa_option_bits_enabled(env->config,
600 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
601 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
602 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
603
604 cpu_fprintf(f, "ITLB:\n");
605 dump_tlb(f, cpu_fprintf, env, false);
606 cpu_fprintf(f, "\nDTLB:\n");
607 dump_tlb(f, cpu_fprintf, env, true);
608 } else {
609 cpu_fprintf(f, "No TLB for this CPU core\n");
610 }
611 }