]> git.proxmox.com Git - mirror_qemu.git/blob - target/xtensa/op_helper.c
target/xtensa: implement MEMCTL SR
[mirror_qemu.git] / target / xtensa / op_helper.c
1 /*
2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "qemu/osdep.h"
29 #include "cpu.h"
30 #include "exec/helper-proto.h"
31 #include "qemu/host-utils.h"
32 #include "exec/exec-all.h"
33 #include "exec/cpu_ldst.h"
34 #include "exec/address-spaces.h"
35 #include "qemu/timer.h"
36
37 void xtensa_cpu_do_unaligned_access(CPUState *cs,
38 vaddr addr, MMUAccessType access_type,
39 int mmu_idx, uintptr_t retaddr)
40 {
41 XtensaCPU *cpu = XTENSA_CPU(cs);
42 CPUXtensaState *env = &cpu->env;
43
44 if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
45 !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
46 cpu_restore_state(CPU(cpu), retaddr);
47 HELPER(exception_cause_vaddr)(env,
48 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
49 }
50 }
51
52 void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type,
53 int mmu_idx, uintptr_t retaddr)
54 {
55 XtensaCPU *cpu = XTENSA_CPU(cs);
56 CPUXtensaState *env = &cpu->env;
57 uint32_t paddr;
58 uint32_t page_size;
59 unsigned access;
60 int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
61 &paddr, &page_size, &access);
62
63 qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
64 __func__, vaddr, access_type, mmu_idx, paddr, ret);
65
66 if (ret == 0) {
67 tlb_set_page(cs,
68 vaddr & TARGET_PAGE_MASK,
69 paddr & TARGET_PAGE_MASK,
70 access, mmu_idx, page_size);
71 } else {
72 cpu_restore_state(cs, retaddr);
73 HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
74 }
75 }
76
77 void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr,
78 bool is_write, bool is_exec, int opaque,
79 unsigned size)
80 {
81 XtensaCPU *cpu = XTENSA_CPU(cs);
82 CPUXtensaState *env = &cpu->env;
83
84 HELPER(exception_cause_vaddr)(env, env->pc,
85 is_exec ?
86 INSTR_PIF_ADDR_ERROR_CAUSE :
87 LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
88 is_exec ? addr : cs->mem_io_vaddr);
89 }
90
91 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
92 {
93 uint32_t paddr;
94 uint32_t page_size;
95 unsigned access;
96 int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
97 &paddr, &page_size, &access);
98 if (ret == 0) {
99 tb_invalidate_phys_addr(&address_space_memory, paddr);
100 }
101 }
102
103 void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
104 {
105 CPUState *cs = CPU(xtensa_env_get_cpu(env));
106
107 cs->exception_index = excp;
108 if (excp == EXCP_YIELD) {
109 env->yield_needed = 0;
110 }
111 if (excp == EXCP_DEBUG) {
112 env->exception_taken = 0;
113 }
114 cpu_loop_exit(cs);
115 }
116
117 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
118 {
119 uint32_t vector;
120
121 env->pc = pc;
122 if (env->sregs[PS] & PS_EXCM) {
123 if (env->config->ndepc) {
124 env->sregs[DEPC] = pc;
125 } else {
126 env->sregs[EPC1] = pc;
127 }
128 vector = EXC_DOUBLE;
129 } else {
130 env->sregs[EPC1] = pc;
131 vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
132 }
133
134 env->sregs[EXCCAUSE] = cause;
135 env->sregs[PS] |= PS_EXCM;
136
137 HELPER(exception)(env, vector);
138 }
139
140 void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
141 uint32_t pc, uint32_t cause, uint32_t vaddr)
142 {
143 env->sregs[EXCVADDR] = vaddr;
144 HELPER(exception_cause)(env, pc, cause);
145 }
146
147 void debug_exception_env(CPUXtensaState *env, uint32_t cause)
148 {
149 if (xtensa_get_cintlevel(env) < env->config->debug_level) {
150 HELPER(debug_exception)(env, env->pc, cause);
151 }
152 }
153
154 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
155 {
156 unsigned level = env->config->debug_level;
157
158 env->pc = pc;
159 env->sregs[DEBUGCAUSE] = cause;
160 env->sregs[EPC1 + level - 1] = pc;
161 env->sregs[EPS2 + level - 2] = env->sregs[PS];
162 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
163 (level << PS_INTLEVEL_SHIFT);
164 HELPER(exception)(env, EXC_DEBUG);
165 }
166
167 uint32_t HELPER(nsa)(uint32_t v)
168 {
169 if (v & 0x80000000) {
170 v = ~v;
171 }
172 return v ? clz32(v) - 1 : 31;
173 }
174
175 uint32_t HELPER(nsau)(uint32_t v)
176 {
177 return v ? clz32(v) : 32;
178 }
179
180 static void copy_window_from_phys(CPUXtensaState *env,
181 uint32_t window, uint32_t phys, uint32_t n)
182 {
183 assert(phys < env->config->nareg);
184 if (phys + n <= env->config->nareg) {
185 memcpy(env->regs + window, env->phys_regs + phys,
186 n * sizeof(uint32_t));
187 } else {
188 uint32_t n1 = env->config->nareg - phys;
189 memcpy(env->regs + window, env->phys_regs + phys,
190 n1 * sizeof(uint32_t));
191 memcpy(env->regs + window + n1, env->phys_regs,
192 (n - n1) * sizeof(uint32_t));
193 }
194 }
195
196 static void copy_phys_from_window(CPUXtensaState *env,
197 uint32_t phys, uint32_t window, uint32_t n)
198 {
199 assert(phys < env->config->nareg);
200 if (phys + n <= env->config->nareg) {
201 memcpy(env->phys_regs + phys, env->regs + window,
202 n * sizeof(uint32_t));
203 } else {
204 uint32_t n1 = env->config->nareg - phys;
205 memcpy(env->phys_regs + phys, env->regs + window,
206 n1 * sizeof(uint32_t));
207 memcpy(env->phys_regs, env->regs + window + n1,
208 (n - n1) * sizeof(uint32_t));
209 }
210 }
211
212
213 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
214 {
215 return a & (env->config->nareg / 4 - 1);
216 }
217
218 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
219 {
220 return 1 << windowbase_bound(a, env);
221 }
222
223 void xtensa_sync_window_from_phys(CPUXtensaState *env)
224 {
225 copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
226 }
227
228 void xtensa_sync_phys_from_window(CPUXtensaState *env)
229 {
230 copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
231 }
232
233 static void rotate_window_abs(CPUXtensaState *env, uint32_t position)
234 {
235 xtensa_sync_phys_from_window(env);
236 env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
237 xtensa_sync_window_from_phys(env);
238 }
239
240 static void rotate_window(CPUXtensaState *env, uint32_t delta)
241 {
242 rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
243 }
244
245 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
246 {
247 rotate_window_abs(env, v);
248 }
249
250 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
251 {
252 int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
253 if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
254 qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n",
255 pc, env->sregs[PS]);
256 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
257 } else {
258 uint32_t windowstart = xtensa_replicate_windowstart(env) >>
259 (env->sregs[WINDOW_BASE] + 1);
260
261 if (windowstart & ((1 << callinc) - 1)) {
262 HELPER(window_check)(env, pc, callinc);
263 }
264 env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3);
265 rotate_window(env, callinc);
266 env->sregs[WINDOW_START] |=
267 windowstart_bit(env->sregs[WINDOW_BASE], env);
268 }
269 }
270
271 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
272 {
273 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
274 uint32_t windowstart = xtensa_replicate_windowstart(env) >>
275 (env->sregs[WINDOW_BASE] + 1);
276 uint32_t n = ctz32(windowstart) + 1;
277
278 assert(n <= w);
279
280 rotate_window(env, n);
281 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
282 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
283 env->sregs[EPC1] = env->pc = pc;
284
285 switch (ctz32(windowstart >> n)) {
286 case 0:
287 HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
288 break;
289 case 1:
290 HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
291 break;
292 default:
293 HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
294 break;
295 }
296 }
297
298 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
299 {
300 int n = (env->regs[0] >> 30) & 0x3;
301 int m = 0;
302 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
303 uint32_t windowstart = env->sregs[WINDOW_START];
304 uint32_t ret_pc = 0;
305
306 if (windowstart & windowstart_bit(windowbase - 1, env)) {
307 m = 1;
308 } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
309 m = 2;
310 } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
311 m = 3;
312 }
313
314 if (n == 0 || (m != 0 && m != n) ||
315 ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
316 qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
317 "PS = %08x, m = %d, n = %d\n",
318 pc, env->sregs[PS], m, n);
319 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
320 } else {
321 int owb = windowbase;
322
323 ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
324
325 rotate_window(env, -n);
326 if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
327 env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env);
328 } else {
329 /* window underflow */
330 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
331 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
332 env->sregs[EPC1] = env->pc = pc;
333
334 if (n == 1) {
335 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
336 } else if (n == 2) {
337 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
338 } else if (n == 3) {
339 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
340 }
341 }
342 }
343 return ret_pc;
344 }
345
346 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
347 {
348 rotate_window(env, imm4);
349 }
350
351 void HELPER(restore_owb)(CPUXtensaState *env)
352 {
353 rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
354 }
355
356 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
357 {
358 if ((env->sregs[WINDOW_START] &
359 (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
360 windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
361 windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
362 HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
363 }
364 }
365
366 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
367 {
368 if (env->sregs[LBEG] != v) {
369 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
370 env->sregs[LBEG] = v;
371 }
372 }
373
374 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
375 {
376 if (env->sregs[LEND] != v) {
377 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
378 env->sregs[LEND] = v;
379 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
380 }
381 }
382
383 void HELPER(dump_state)(CPUXtensaState *env)
384 {
385 XtensaCPU *cpu = xtensa_env_get_cpu(env);
386
387 cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
388 }
389
390 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
391 {
392 CPUState *cpu;
393
394 env->pc = pc;
395 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
396 (intlevel << PS_INTLEVEL_SHIFT);
397 check_interrupts(env);
398 if (env->pending_irq_level) {
399 cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
400 return;
401 }
402
403 cpu = CPU(xtensa_env_get_cpu(env));
404 cpu->halted = 1;
405 HELPER(exception)(env, EXCP_HLT);
406 }
407
408 void HELPER(update_ccount)(CPUXtensaState *env)
409 {
410 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
411
412 env->ccount_time = now;
413 env->sregs[CCOUNT] = env->ccount_base +
414 (uint32_t)((now - env->time_base) *
415 env->config->clock_freq_khz / 1000000);
416 }
417
418 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
419 {
420 int i;
421
422 HELPER(update_ccount)(env);
423 env->ccount_base += v - env->sregs[CCOUNT];
424 for (i = 0; i < env->config->nccompare; ++i) {
425 HELPER(update_ccompare)(env, i);
426 }
427 }
428
429 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
430 {
431 uint64_t dcc;
432
433 HELPER(update_ccount)(env);
434 dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
435 timer_mod(env->ccompare[i].timer,
436 env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
437 env->yield_needed = 1;
438 }
439
440 void HELPER(check_interrupts)(CPUXtensaState *env)
441 {
442 check_interrupts(env);
443 }
444
445 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
446 {
447 get_page_addr_code(env, vaddr);
448 }
449
450 /*!
451 * Check vaddr accessibility/cache attributes and raise an exception if
452 * specified by the ATOMCTL SR.
453 *
454 * Note: local memory exclusion is not implemented
455 */
456 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
457 {
458 uint32_t paddr, page_size, access;
459 uint32_t atomctl = env->sregs[ATOMCTL];
460 int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
461 xtensa_get_cring(env), &paddr, &page_size, &access);
462
463 /*
464 * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
465 * see opcode description in the ISA
466 */
467 if (rc == 0 &&
468 (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
469 rc = STORE_PROHIBITED_CAUSE;
470 }
471
472 if (rc) {
473 HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
474 }
475
476 /*
477 * When data cache is not configured use ATOMCTL bypass field.
478 * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
479 * under the Conditional Store Option.
480 */
481 if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
482 access = PAGE_CACHE_BYPASS;
483 }
484
485 switch (access & PAGE_CACHE_MASK) {
486 case PAGE_CACHE_WB:
487 atomctl >>= 2;
488 /* fall through */
489 case PAGE_CACHE_WT:
490 atomctl >>= 2;
491 /* fall through */
492 case PAGE_CACHE_BYPASS:
493 if ((atomctl & 0x3) == 0) {
494 HELPER(exception_cause_vaddr)(env, pc,
495 LOAD_STORE_ERROR_CAUSE, vaddr);
496 }
497 break;
498
499 case PAGE_CACHE_ISOLATE:
500 HELPER(exception_cause_vaddr)(env, pc,
501 LOAD_STORE_ERROR_CAUSE, vaddr);
502 break;
503
504 default:
505 break;
506 }
507 }
508
509 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
510 {
511 if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
512 if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
513 env->config->icache_ways) {
514 deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
515 env->config->icache_ways);
516 }
517 }
518 if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
519 if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
520 env->config->dcache_ways) {
521 deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
522 env->config->dcache_ways);
523 }
524 if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
525 env->config->dcache_ways) {
526 deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
527 env->config->dcache_ways);
528 }
529 }
530 env->sregs[MEMCTL] = v & env->config->memctl_mask;
531 }
532
533 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
534 {
535 XtensaCPU *cpu = xtensa_env_get_cpu(env);
536
537 v = (v & 0xffffff00) | 0x1;
538 if (v != env->sregs[RASID]) {
539 env->sregs[RASID] = v;
540 tlb_flush(CPU(cpu), 1);
541 }
542 }
543
544 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
545 {
546 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
547
548 switch (way) {
549 case 4:
550 return (tlbcfg >> 16) & 0x3;
551
552 case 5:
553 return (tlbcfg >> 20) & 0x1;
554
555 case 6:
556 return (tlbcfg >> 24) & 0x1;
557
558 default:
559 return 0;
560 }
561 }
562
563 /*!
564 * Get bit mask for the virtual address bits translated by the TLB way
565 */
566 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
567 {
568 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
569 bool varway56 = dtlb ?
570 env->config->dtlb.varway56 :
571 env->config->itlb.varway56;
572
573 switch (way) {
574 case 4:
575 return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
576
577 case 5:
578 if (varway56) {
579 return 0xf8000000 << get_page_size(env, dtlb, way);
580 } else {
581 return 0xf8000000;
582 }
583
584 case 6:
585 if (varway56) {
586 return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
587 } else {
588 return 0xf0000000;
589 }
590
591 default:
592 return 0xfffff000;
593 }
594 } else {
595 return REGION_PAGE_MASK;
596 }
597 }
598
599 /*!
600 * Get bit mask for the 'VPN without index' field.
601 * See ISA, 4.6.5.6, data format for RxTLB0
602 */
603 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
604 {
605 if (way < 4) {
606 bool is32 = (dtlb ?
607 env->config->dtlb.nrefillentries :
608 env->config->itlb.nrefillentries) == 32;
609 return is32 ? 0xffff8000 : 0xffffc000;
610 } else if (way == 4) {
611 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
612 } else if (way <= 6) {
613 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
614 bool varway56 = dtlb ?
615 env->config->dtlb.varway56 :
616 env->config->itlb.varway56;
617
618 if (varway56) {
619 return mask << (way == 5 ? 2 : 3);
620 } else {
621 return mask << 1;
622 }
623 } else {
624 return 0xfffff000;
625 }
626 }
627
628 /*!
629 * Split virtual address into VPN (with index) and entry index
630 * for the given TLB way
631 */
632 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
633 uint32_t *vpn, uint32_t wi, uint32_t *ei)
634 {
635 bool varway56 = dtlb ?
636 env->config->dtlb.varway56 :
637 env->config->itlb.varway56;
638
639 if (!dtlb) {
640 wi &= 7;
641 }
642
643 if (wi < 4) {
644 bool is32 = (dtlb ?
645 env->config->dtlb.nrefillentries :
646 env->config->itlb.nrefillentries) == 32;
647 *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
648 } else {
649 switch (wi) {
650 case 4:
651 {
652 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
653 *ei = (v >> eibase) & 0x3;
654 }
655 break;
656
657 case 5:
658 if (varway56) {
659 uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
660 *ei = (v >> eibase) & 0x3;
661 } else {
662 *ei = (v >> 27) & 0x1;
663 }
664 break;
665
666 case 6:
667 if (varway56) {
668 uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
669 *ei = (v >> eibase) & 0x7;
670 } else {
671 *ei = (v >> 28) & 0x1;
672 }
673 break;
674
675 default:
676 *ei = 0;
677 break;
678 }
679 }
680 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
681 }
682
683 /*!
684 * Split TLB address into TLB way, entry index and VPN (with index).
685 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
686 */
687 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
688 uint32_t *vpn, uint32_t *wi, uint32_t *ei)
689 {
690 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
691 *wi = v & (dtlb ? 0xf : 0x7);
692 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
693 } else {
694 *vpn = v & REGION_PAGE_MASK;
695 *wi = 0;
696 *ei = (v >> 29) & 0x7;
697 }
698 }
699
700 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
701 uint32_t v, bool dtlb, uint32_t *pwi)
702 {
703 uint32_t vpn;
704 uint32_t wi;
705 uint32_t ei;
706
707 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
708 if (pwi) {
709 *pwi = wi;
710 }
711 return xtensa_tlb_get_entry(env, dtlb, wi, ei);
712 }
713
714 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
715 {
716 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
717 uint32_t wi;
718 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
719 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
720 } else {
721 return v & REGION_PAGE_MASK;
722 }
723 }
724
725 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
726 {
727 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
728 return entry->paddr | entry->attr;
729 }
730
731 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
732 {
733 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
734 uint32_t wi;
735 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
736 if (entry->variable && entry->asid) {
737 tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
738 entry->asid = 0;
739 }
740 }
741 }
742
743 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
744 {
745 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
746 uint32_t wi;
747 uint32_t ei;
748 uint8_t ring;
749 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
750
751 switch (res) {
752 case 0:
753 if (ring >= xtensa_get_ring(env)) {
754 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
755 }
756 break;
757
758 case INST_TLB_MULTI_HIT_CAUSE:
759 case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
760 HELPER(exception_cause_vaddr)(env, env->pc, res, v);
761 break;
762 }
763 return 0;
764 } else {
765 return (v & REGION_PAGE_MASK) | 0x1;
766 }
767 }
768
769 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
770 xtensa_tlb_entry *entry, bool dtlb,
771 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
772 {
773 entry->vaddr = vpn;
774 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
775 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
776 entry->attr = pte & 0xf;
777 }
778
779 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
780 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
781 {
782 XtensaCPU *cpu = xtensa_env_get_cpu(env);
783 CPUState *cs = CPU(cpu);
784 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
785
786 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
787 if (entry->variable) {
788 if (entry->asid) {
789 tlb_flush_page(cs, entry->vaddr);
790 }
791 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
792 tlb_flush_page(cs, entry->vaddr);
793 } else {
794 qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
795 __func__, dtlb, wi, ei);
796 }
797 } else {
798 tlb_flush_page(cs, entry->vaddr);
799 if (xtensa_option_enabled(env->config,
800 XTENSA_OPTION_REGION_TRANSLATION)) {
801 entry->paddr = pte & REGION_PAGE_MASK;
802 }
803 entry->attr = pte & 0xf;
804 }
805 }
806
807 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
808 {
809 uint32_t vpn;
810 uint32_t wi;
811 uint32_t ei;
812 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
813 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
814 }
815
816
817 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
818 {
819 uint32_t change = v ^ env->sregs[IBREAKENABLE];
820 unsigned i;
821
822 for (i = 0; i < env->config->nibreak; ++i) {
823 if (change & (1 << i)) {
824 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
825 }
826 }
827 env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
828 }
829
830 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
831 {
832 if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
833 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
834 tb_invalidate_virtual_addr(env, v);
835 }
836 env->sregs[IBREAKA + i] = v;
837 }
838
839 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
840 uint32_t dbreakc)
841 {
842 CPUState *cs = CPU(xtensa_env_get_cpu(env));
843 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
844 uint32_t mask = dbreakc | ~DBREAKC_MASK;
845
846 if (env->cpu_watchpoint[i]) {
847 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
848 }
849 if (dbreakc & DBREAKC_SB) {
850 flags |= BP_MEM_WRITE;
851 }
852 if (dbreakc & DBREAKC_LB) {
853 flags |= BP_MEM_READ;
854 }
855 /* contiguous mask after inversion is one less than some power of 2 */
856 if ((~mask + 1) & ~mask) {
857 qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
858 /* cut mask after the first zero bit */
859 mask = 0xffffffff << (32 - clo32(mask));
860 }
861 if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
862 flags, &env->cpu_watchpoint[i])) {
863 env->cpu_watchpoint[i] = NULL;
864 qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
865 dbreaka & mask, ~mask + 1);
866 }
867 }
868
869 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
870 {
871 uint32_t dbreakc = env->sregs[DBREAKC + i];
872
873 if ((dbreakc & DBREAKC_SB_LB) &&
874 env->sregs[DBREAKA + i] != v) {
875 set_dbreak(env, i, v, dbreakc);
876 }
877 env->sregs[DBREAKA + i] = v;
878 }
879
880 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
881 {
882 if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
883 if (v & DBREAKC_SB_LB) {
884 set_dbreak(env, i, env->sregs[DBREAKA + i], v);
885 } else {
886 if (env->cpu_watchpoint[i]) {
887 CPUState *cs = CPU(xtensa_env_get_cpu(env));
888
889 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
890 env->cpu_watchpoint[i] = NULL;
891 }
892 }
893 }
894 env->sregs[DBREAKC + i] = v;
895 }
896
897 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
898 {
899 static const int rounding_mode[] = {
900 float_round_nearest_even,
901 float_round_to_zero,
902 float_round_up,
903 float_round_down,
904 };
905
906 env->uregs[FCR] = v & 0xfffff07f;
907 set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
908 }
909
910 float32 HELPER(abs_s)(float32 v)
911 {
912 return float32_abs(v);
913 }
914
915 float32 HELPER(neg_s)(float32 v)
916 {
917 return float32_chs(v);
918 }
919
920 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
921 {
922 return float32_add(a, b, &env->fp_status);
923 }
924
925 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
926 {
927 return float32_sub(a, b, &env->fp_status);
928 }
929
930 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
931 {
932 return float32_mul(a, b, &env->fp_status);
933 }
934
935 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
936 {
937 return float32_muladd(b, c, a, 0,
938 &env->fp_status);
939 }
940
941 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
942 {
943 return float32_muladd(b, c, a, float_muladd_negate_product,
944 &env->fp_status);
945 }
946
947 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
948 {
949 float_status fp_status = {0};
950
951 set_float_rounding_mode(rounding_mode, &fp_status);
952 return float32_to_int32(
953 float32_scalbn(v, scale, &fp_status), &fp_status);
954 }
955
956 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
957 {
958 float_status fp_status = {0};
959 float32 res;
960
961 set_float_rounding_mode(rounding_mode, &fp_status);
962
963 res = float32_scalbn(v, scale, &fp_status);
964
965 if (float32_is_neg(v) && !float32_is_any_nan(v)) {
966 return float32_to_int32(res, &fp_status);
967 } else {
968 return float32_to_uint32(res, &fp_status);
969 }
970 }
971
972 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
973 {
974 return float32_scalbn(int32_to_float32(v, &env->fp_status),
975 (int32_t)scale, &env->fp_status);
976 }
977
978 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
979 {
980 return float32_scalbn(uint32_to_float32(v, &env->fp_status),
981 (int32_t)scale, &env->fp_status);
982 }
983
984 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
985 {
986 if (v) {
987 env->sregs[BR] |= br;
988 } else {
989 env->sregs[BR] &= ~br;
990 }
991 }
992
993 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
994 {
995 set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
996 }
997
998 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
999 {
1000 set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
1001 }
1002
1003 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1004 {
1005 int v = float32_compare_quiet(a, b, &env->fp_status);
1006 set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
1007 }
1008
1009 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1010 {
1011 set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
1012 }
1013
1014 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1015 {
1016 int v = float32_compare_quiet(a, b, &env->fp_status);
1017 set_br(env, v == float_relation_less || v == float_relation_unordered, br);
1018 }
1019
1020 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1021 {
1022 set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
1023 }
1024
1025 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1026 {
1027 int v = float32_compare_quiet(a, b, &env->fp_status);
1028 set_br(env, v != float_relation_greater, br);
1029 }