]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions are met: | |
7 | * * Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * * Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * * Neither the name of the Open Source and Linux Lab nor the | |
13 | * names of its contributors may be used to endorse or promote products | |
14 | * derived from this software without specific prior written permission. | |
15 | * | |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | |
27 | ||
28 | #include "cpu.h" | |
29 | #include "helper.h" | |
30 | #include "qemu/host-utils.h" | |
31 | #include "exec/softmmu_exec.h" | |
32 | ||
33 | static void do_unaligned_access(CPUXtensaState *env, | |
34 | target_ulong addr, int is_write, int is_user, uintptr_t retaddr); | |
35 | ||
36 | #define ALIGNED_ONLY | |
37 | #define MMUSUFFIX _mmu | |
38 | ||
39 | #define SHIFT 0 | |
40 | #include "exec/softmmu_template.h" | |
41 | ||
42 | #define SHIFT 1 | |
43 | #include "exec/softmmu_template.h" | |
44 | ||
45 | #define SHIFT 2 | |
46 | #include "exec/softmmu_template.h" | |
47 | ||
48 | #define SHIFT 3 | |
49 | #include "exec/softmmu_template.h" | |
50 | ||
51 | static void do_unaligned_access(CPUXtensaState *env, | |
52 | target_ulong addr, int is_write, int is_user, uintptr_t retaddr) | |
53 | { | |
54 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && | |
55 | !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { | |
56 | cpu_restore_state(env, retaddr); | |
57 | HELPER(exception_cause_vaddr)(env, | |
58 | env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); | |
59 | } | |
60 | } | |
61 | ||
62 | void tlb_fill(CPUXtensaState *env, | |
63 | target_ulong vaddr, int is_write, int mmu_idx, uintptr_t retaddr) | |
64 | { | |
65 | uint32_t paddr; | |
66 | uint32_t page_size; | |
67 | unsigned access; | |
68 | int ret = xtensa_get_physical_addr(env, true, vaddr, is_write, mmu_idx, | |
69 | &paddr, &page_size, &access); | |
70 | ||
71 | qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__, | |
72 | vaddr, is_write, mmu_idx, paddr, ret); | |
73 | ||
74 | if (ret == 0) { | |
75 | tlb_set_page(env, | |
76 | vaddr & TARGET_PAGE_MASK, | |
77 | paddr & TARGET_PAGE_MASK, | |
78 | access, mmu_idx, page_size); | |
79 | } else { | |
80 | cpu_restore_state(env, retaddr); | |
81 | HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); | |
82 | } | |
83 | } | |
84 | ||
85 | static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr) | |
86 | { | |
87 | uint32_t paddr; | |
88 | uint32_t page_size; | |
89 | unsigned access; | |
90 | int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0, | |
91 | &paddr, &page_size, &access); | |
92 | if (ret == 0) { | |
93 | tb_invalidate_phys_addr(paddr); | |
94 | } | |
95 | } | |
96 | ||
97 | void HELPER(exception)(CPUXtensaState *env, uint32_t excp) | |
98 | { | |
99 | env->exception_index = excp; | |
100 | if (excp == EXCP_DEBUG) { | |
101 | env->exception_taken = 0; | |
102 | } | |
103 | cpu_loop_exit(env); | |
104 | } | |
105 | ||
106 | void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause) | |
107 | { | |
108 | uint32_t vector; | |
109 | ||
110 | env->pc = pc; | |
111 | if (env->sregs[PS] & PS_EXCM) { | |
112 | if (env->config->ndepc) { | |
113 | env->sregs[DEPC] = pc; | |
114 | } else { | |
115 | env->sregs[EPC1] = pc; | |
116 | } | |
117 | vector = EXC_DOUBLE; | |
118 | } else { | |
119 | env->sregs[EPC1] = pc; | |
120 | vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; | |
121 | } | |
122 | ||
123 | env->sregs[EXCCAUSE] = cause; | |
124 | env->sregs[PS] |= PS_EXCM; | |
125 | ||
126 | HELPER(exception)(env, vector); | |
127 | } | |
128 | ||
129 | void HELPER(exception_cause_vaddr)(CPUXtensaState *env, | |
130 | uint32_t pc, uint32_t cause, uint32_t vaddr) | |
131 | { | |
132 | env->sregs[EXCVADDR] = vaddr; | |
133 | HELPER(exception_cause)(env, pc, cause); | |
134 | } | |
135 | ||
136 | void debug_exception_env(CPUXtensaState *env, uint32_t cause) | |
137 | { | |
138 | if (xtensa_get_cintlevel(env) < env->config->debug_level) { | |
139 | HELPER(debug_exception)(env, env->pc, cause); | |
140 | } | |
141 | } | |
142 | ||
143 | void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause) | |
144 | { | |
145 | unsigned level = env->config->debug_level; | |
146 | ||
147 | env->pc = pc; | |
148 | env->sregs[DEBUGCAUSE] = cause; | |
149 | env->sregs[EPC1 + level - 1] = pc; | |
150 | env->sregs[EPS2 + level - 2] = env->sregs[PS]; | |
151 | env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM | | |
152 | (level << PS_INTLEVEL_SHIFT); | |
153 | HELPER(exception)(env, EXC_DEBUG); | |
154 | } | |
155 | ||
156 | uint32_t HELPER(nsa)(uint32_t v) | |
157 | { | |
158 | if (v & 0x80000000) { | |
159 | v = ~v; | |
160 | } | |
161 | return v ? clz32(v) - 1 : 31; | |
162 | } | |
163 | ||
164 | uint32_t HELPER(nsau)(uint32_t v) | |
165 | { | |
166 | return v ? clz32(v) : 32; | |
167 | } | |
168 | ||
169 | static void copy_window_from_phys(CPUXtensaState *env, | |
170 | uint32_t window, uint32_t phys, uint32_t n) | |
171 | { | |
172 | assert(phys < env->config->nareg); | |
173 | if (phys + n <= env->config->nareg) { | |
174 | memcpy(env->regs + window, env->phys_regs + phys, | |
175 | n * sizeof(uint32_t)); | |
176 | } else { | |
177 | uint32_t n1 = env->config->nareg - phys; | |
178 | memcpy(env->regs + window, env->phys_regs + phys, | |
179 | n1 * sizeof(uint32_t)); | |
180 | memcpy(env->regs + window + n1, env->phys_regs, | |
181 | (n - n1) * sizeof(uint32_t)); | |
182 | } | |
183 | } | |
184 | ||
185 | static void copy_phys_from_window(CPUXtensaState *env, | |
186 | uint32_t phys, uint32_t window, uint32_t n) | |
187 | { | |
188 | assert(phys < env->config->nareg); | |
189 | if (phys + n <= env->config->nareg) { | |
190 | memcpy(env->phys_regs + phys, env->regs + window, | |
191 | n * sizeof(uint32_t)); | |
192 | } else { | |
193 | uint32_t n1 = env->config->nareg - phys; | |
194 | memcpy(env->phys_regs + phys, env->regs + window, | |
195 | n1 * sizeof(uint32_t)); | |
196 | memcpy(env->phys_regs, env->regs + window + n1, | |
197 | (n - n1) * sizeof(uint32_t)); | |
198 | } | |
199 | } | |
200 | ||
201 | ||
202 | static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env) | |
203 | { | |
204 | return a & (env->config->nareg / 4 - 1); | |
205 | } | |
206 | ||
207 | static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env) | |
208 | { | |
209 | return 1 << windowbase_bound(a, env); | |
210 | } | |
211 | ||
212 | void xtensa_sync_window_from_phys(CPUXtensaState *env) | |
213 | { | |
214 | copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16); | |
215 | } | |
216 | ||
217 | void xtensa_sync_phys_from_window(CPUXtensaState *env) | |
218 | { | |
219 | copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16); | |
220 | } | |
221 | ||
222 | static void rotate_window_abs(CPUXtensaState *env, uint32_t position) | |
223 | { | |
224 | xtensa_sync_phys_from_window(env); | |
225 | env->sregs[WINDOW_BASE] = windowbase_bound(position, env); | |
226 | xtensa_sync_window_from_phys(env); | |
227 | } | |
228 | ||
229 | static void rotate_window(CPUXtensaState *env, uint32_t delta) | |
230 | { | |
231 | rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta); | |
232 | } | |
233 | ||
234 | void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v) | |
235 | { | |
236 | rotate_window_abs(env, v); | |
237 | } | |
238 | ||
239 | void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm) | |
240 | { | |
241 | int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT; | |
242 | if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { | |
243 | qemu_log("Illegal entry instruction(pc = %08x), PS = %08x\n", | |
244 | pc, env->sregs[PS]); | |
245 | HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); | |
246 | } else { | |
247 | env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3); | |
248 | rotate_window(env, callinc); | |
249 | env->sregs[WINDOW_START] |= | |
250 | windowstart_bit(env->sregs[WINDOW_BASE], env); | |
251 | } | |
252 | } | |
253 | ||
254 | void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w) | |
255 | { | |
256 | uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); | |
257 | uint32_t windowstart = env->sregs[WINDOW_START]; | |
258 | uint32_t m, n; | |
259 | ||
260 | if ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) { | |
261 | return; | |
262 | } | |
263 | ||
264 | for (n = 1; ; ++n) { | |
265 | if (n > w) { | |
266 | return; | |
267 | } | |
268 | if (windowstart & windowstart_bit(windowbase + n, env)) { | |
269 | break; | |
270 | } | |
271 | } | |
272 | ||
273 | m = windowbase_bound(windowbase + n, env); | |
274 | rotate_window(env, n); | |
275 | env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | | |
276 | (windowbase << PS_OWB_SHIFT) | PS_EXCM; | |
277 | env->sregs[EPC1] = env->pc = pc; | |
278 | ||
279 | if (windowstart & windowstart_bit(m + 1, env)) { | |
280 | HELPER(exception)(env, EXC_WINDOW_OVERFLOW4); | |
281 | } else if (windowstart & windowstart_bit(m + 2, env)) { | |
282 | HELPER(exception)(env, EXC_WINDOW_OVERFLOW8); | |
283 | } else { | |
284 | HELPER(exception)(env, EXC_WINDOW_OVERFLOW12); | |
285 | } | |
286 | } | |
287 | ||
288 | uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc) | |
289 | { | |
290 | int n = (env->regs[0] >> 30) & 0x3; | |
291 | int m = 0; | |
292 | uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); | |
293 | uint32_t windowstart = env->sregs[WINDOW_START]; | |
294 | uint32_t ret_pc = 0; | |
295 | ||
296 | if (windowstart & windowstart_bit(windowbase - 1, env)) { | |
297 | m = 1; | |
298 | } else if (windowstart & windowstart_bit(windowbase - 2, env)) { | |
299 | m = 2; | |
300 | } else if (windowstart & windowstart_bit(windowbase - 3, env)) { | |
301 | m = 3; | |
302 | } | |
303 | ||
304 | if (n == 0 || (m != 0 && m != n) || | |
305 | ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { | |
306 | qemu_log("Illegal retw instruction(pc = %08x), " | |
307 | "PS = %08x, m = %d, n = %d\n", | |
308 | pc, env->sregs[PS], m, n); | |
309 | HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); | |
310 | } else { | |
311 | int owb = windowbase; | |
312 | ||
313 | ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff); | |
314 | ||
315 | rotate_window(env, -n); | |
316 | if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) { | |
317 | env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env); | |
318 | } else { | |
319 | /* window underflow */ | |
320 | env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | | |
321 | (windowbase << PS_OWB_SHIFT) | PS_EXCM; | |
322 | env->sregs[EPC1] = env->pc = pc; | |
323 | ||
324 | if (n == 1) { | |
325 | HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4); | |
326 | } else if (n == 2) { | |
327 | HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8); | |
328 | } else if (n == 3) { | |
329 | HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12); | |
330 | } | |
331 | } | |
332 | } | |
333 | return ret_pc; | |
334 | } | |
335 | ||
336 | void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4) | |
337 | { | |
338 | rotate_window(env, imm4); | |
339 | } | |
340 | ||
341 | void HELPER(restore_owb)(CPUXtensaState *env) | |
342 | { | |
343 | rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); | |
344 | } | |
345 | ||
346 | void HELPER(movsp)(CPUXtensaState *env, uint32_t pc) | |
347 | { | |
348 | if ((env->sregs[WINDOW_START] & | |
349 | (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) | | |
350 | windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) | | |
351 | windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) { | |
352 | HELPER(exception_cause)(env, pc, ALLOCA_CAUSE); | |
353 | } | |
354 | } | |
355 | ||
356 | void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v) | |
357 | { | |
358 | if (env->sregs[LBEG] != v) { | |
359 | tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); | |
360 | env->sregs[LBEG] = v; | |
361 | } | |
362 | } | |
363 | ||
364 | void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v) | |
365 | { | |
366 | if (env->sregs[LEND] != v) { | |
367 | tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); | |
368 | env->sregs[LEND] = v; | |
369 | tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); | |
370 | } | |
371 | } | |
372 | ||
373 | void HELPER(dump_state)(CPUXtensaState *env) | |
374 | { | |
375 | XtensaCPU *cpu = xtensa_env_get_cpu(env); | |
376 | ||
377 | cpu_dump_state(CPU(cpu), stderr, fprintf, 0); | |
378 | } | |
379 | ||
380 | void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) | |
381 | { | |
382 | CPUState *cpu; | |
383 | ||
384 | env->pc = pc; | |
385 | env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | | |
386 | (intlevel << PS_INTLEVEL_SHIFT); | |
387 | check_interrupts(env); | |
388 | if (env->pending_irq_level) { | |
389 | cpu_loop_exit(env); | |
390 | return; | |
391 | } | |
392 | ||
393 | cpu = CPU(xtensa_env_get_cpu(env)); | |
394 | env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); | |
395 | cpu->halted = 1; | |
396 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { | |
397 | xtensa_rearm_ccompare_timer(env); | |
398 | } | |
399 | HELPER(exception)(env, EXCP_HLT); | |
400 | } | |
401 | ||
402 | void HELPER(timer_irq)(CPUXtensaState *env, uint32_t id, uint32_t active) | |
403 | { | |
404 | xtensa_timer_irq(env, id, active); | |
405 | } | |
406 | ||
407 | void HELPER(advance_ccount)(CPUXtensaState *env, uint32_t d) | |
408 | { | |
409 | xtensa_advance_ccount(env, d); | |
410 | } | |
411 | ||
412 | void HELPER(check_interrupts)(CPUXtensaState *env) | |
413 | { | |
414 | check_interrupts(env); | |
415 | } | |
416 | ||
417 | /*! | |
418 | * Check vaddr accessibility/cache attributes and raise an exception if | |
419 | * specified by the ATOMCTL SR. | |
420 | * | |
421 | * Note: local memory exclusion is not implemented | |
422 | */ | |
423 | void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr) | |
424 | { | |
425 | uint32_t paddr, page_size, access; | |
426 | uint32_t atomctl = env->sregs[ATOMCTL]; | |
427 | int rc = xtensa_get_physical_addr(env, true, vaddr, 1, | |
428 | xtensa_get_cring(env), &paddr, &page_size, &access); | |
429 | ||
430 | /* | |
431 | * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions, | |
432 | * see opcode description in the ISA | |
433 | */ | |
434 | if (rc == 0 && | |
435 | (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) { | |
436 | rc = STORE_PROHIBITED_CAUSE; | |
437 | } | |
438 | ||
439 | if (rc) { | |
440 | HELPER(exception_cause_vaddr)(env, pc, rc, vaddr); | |
441 | } | |
442 | ||
443 | /* | |
444 | * When data cache is not configured use ATOMCTL bypass field. | |
445 | * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL) | |
446 | * under the Conditional Store Option. | |
447 | */ | |
448 | if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) { | |
449 | access = PAGE_CACHE_BYPASS; | |
450 | } | |
451 | ||
452 | switch (access & PAGE_CACHE_MASK) { | |
453 | case PAGE_CACHE_WB: | |
454 | atomctl >>= 2; | |
455 | /* fall through */ | |
456 | case PAGE_CACHE_WT: | |
457 | atomctl >>= 2; | |
458 | /* fall through */ | |
459 | case PAGE_CACHE_BYPASS: | |
460 | if ((atomctl & 0x3) == 0) { | |
461 | HELPER(exception_cause_vaddr)(env, pc, | |
462 | LOAD_STORE_ERROR_CAUSE, vaddr); | |
463 | } | |
464 | break; | |
465 | ||
466 | case PAGE_CACHE_ISOLATE: | |
467 | HELPER(exception_cause_vaddr)(env, pc, | |
468 | LOAD_STORE_ERROR_CAUSE, vaddr); | |
469 | break; | |
470 | ||
471 | default: | |
472 | break; | |
473 | } | |
474 | } | |
475 | ||
476 | void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) | |
477 | { | |
478 | v = (v & 0xffffff00) | 0x1; | |
479 | if (v != env->sregs[RASID]) { | |
480 | env->sregs[RASID] = v; | |
481 | tlb_flush(env, 1); | |
482 | } | |
483 | } | |
484 | ||
485 | static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way) | |
486 | { | |
487 | uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; | |
488 | ||
489 | switch (way) { | |
490 | case 4: | |
491 | return (tlbcfg >> 16) & 0x3; | |
492 | ||
493 | case 5: | |
494 | return (tlbcfg >> 20) & 0x1; | |
495 | ||
496 | case 6: | |
497 | return (tlbcfg >> 24) & 0x1; | |
498 | ||
499 | default: | |
500 | return 0; | |
501 | } | |
502 | } | |
503 | ||
504 | /*! | |
505 | * Get bit mask for the virtual address bits translated by the TLB way | |
506 | */ | |
507 | uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) | |
508 | { | |
509 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
510 | bool varway56 = dtlb ? | |
511 | env->config->dtlb.varway56 : | |
512 | env->config->itlb.varway56; | |
513 | ||
514 | switch (way) { | |
515 | case 4: | |
516 | return 0xfff00000 << get_page_size(env, dtlb, way) * 2; | |
517 | ||
518 | case 5: | |
519 | if (varway56) { | |
520 | return 0xf8000000 << get_page_size(env, dtlb, way); | |
521 | } else { | |
522 | return 0xf8000000; | |
523 | } | |
524 | ||
525 | case 6: | |
526 | if (varway56) { | |
527 | return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); | |
528 | } else { | |
529 | return 0xf0000000; | |
530 | } | |
531 | ||
532 | default: | |
533 | return 0xfffff000; | |
534 | } | |
535 | } else { | |
536 | return REGION_PAGE_MASK; | |
537 | } | |
538 | } | |
539 | ||
540 | /*! | |
541 | * Get bit mask for the 'VPN without index' field. | |
542 | * See ISA, 4.6.5.6, data format for RxTLB0 | |
543 | */ | |
544 | static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) | |
545 | { | |
546 | if (way < 4) { | |
547 | bool is32 = (dtlb ? | |
548 | env->config->dtlb.nrefillentries : | |
549 | env->config->itlb.nrefillentries) == 32; | |
550 | return is32 ? 0xffff8000 : 0xffffc000; | |
551 | } else if (way == 4) { | |
552 | return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; | |
553 | } else if (way <= 6) { | |
554 | uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); | |
555 | bool varway56 = dtlb ? | |
556 | env->config->dtlb.varway56 : | |
557 | env->config->itlb.varway56; | |
558 | ||
559 | if (varway56) { | |
560 | return mask << (way == 5 ? 2 : 3); | |
561 | } else { | |
562 | return mask << 1; | |
563 | } | |
564 | } else { | |
565 | return 0xfffff000; | |
566 | } | |
567 | } | |
568 | ||
569 | /*! | |
570 | * Split virtual address into VPN (with index) and entry index | |
571 | * for the given TLB way | |
572 | */ | |
573 | void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, | |
574 | uint32_t *vpn, uint32_t wi, uint32_t *ei) | |
575 | { | |
576 | bool varway56 = dtlb ? | |
577 | env->config->dtlb.varway56 : | |
578 | env->config->itlb.varway56; | |
579 | ||
580 | if (!dtlb) { | |
581 | wi &= 7; | |
582 | } | |
583 | ||
584 | if (wi < 4) { | |
585 | bool is32 = (dtlb ? | |
586 | env->config->dtlb.nrefillentries : | |
587 | env->config->itlb.nrefillentries) == 32; | |
588 | *ei = (v >> 12) & (is32 ? 0x7 : 0x3); | |
589 | } else { | |
590 | switch (wi) { | |
591 | case 4: | |
592 | { | |
593 | uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; | |
594 | *ei = (v >> eibase) & 0x3; | |
595 | } | |
596 | break; | |
597 | ||
598 | case 5: | |
599 | if (varway56) { | |
600 | uint32_t eibase = 27 + get_page_size(env, dtlb, wi); | |
601 | *ei = (v >> eibase) & 0x3; | |
602 | } else { | |
603 | *ei = (v >> 27) & 0x1; | |
604 | } | |
605 | break; | |
606 | ||
607 | case 6: | |
608 | if (varway56) { | |
609 | uint32_t eibase = 29 - get_page_size(env, dtlb, wi); | |
610 | *ei = (v >> eibase) & 0x7; | |
611 | } else { | |
612 | *ei = (v >> 28) & 0x1; | |
613 | } | |
614 | break; | |
615 | ||
616 | default: | |
617 | *ei = 0; | |
618 | break; | |
619 | } | |
620 | } | |
621 | *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); | |
622 | } | |
623 | ||
624 | /*! | |
625 | * Split TLB address into TLB way, entry index and VPN (with index). | |
626 | * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format | |
627 | */ | |
628 | static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, | |
629 | uint32_t *vpn, uint32_t *wi, uint32_t *ei) | |
630 | { | |
631 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
632 | *wi = v & (dtlb ? 0xf : 0x7); | |
633 | split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); | |
634 | } else { | |
635 | *vpn = v & REGION_PAGE_MASK; | |
636 | *wi = 0; | |
637 | *ei = (v >> 29) & 0x7; | |
638 | } | |
639 | } | |
640 | ||
641 | static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, | |
642 | uint32_t v, bool dtlb, uint32_t *pwi) | |
643 | { | |
644 | uint32_t vpn; | |
645 | uint32_t wi; | |
646 | uint32_t ei; | |
647 | ||
648 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); | |
649 | if (pwi) { | |
650 | *pwi = wi; | |
651 | } | |
652 | return xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
653 | } | |
654 | ||
655 | uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
656 | { | |
657 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
658 | uint32_t wi; | |
659 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); | |
660 | return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; | |
661 | } else { | |
662 | return v & REGION_PAGE_MASK; | |
663 | } | |
664 | } | |
665 | ||
666 | uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
667 | { | |
668 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); | |
669 | return entry->paddr | entry->attr; | |
670 | } | |
671 | ||
672 | void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
673 | { | |
674 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
675 | uint32_t wi; | |
676 | xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); | |
677 | if (entry->variable && entry->asid) { | |
678 | tlb_flush_page(env, entry->vaddr); | |
679 | entry->asid = 0; | |
680 | } | |
681 | } | |
682 | } | |
683 | ||
684 | uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
685 | { | |
686 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
687 | uint32_t wi; | |
688 | uint32_t ei; | |
689 | uint8_t ring; | |
690 | int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); | |
691 | ||
692 | switch (res) { | |
693 | case 0: | |
694 | if (ring >= xtensa_get_ring(env)) { | |
695 | return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); | |
696 | } | |
697 | break; | |
698 | ||
699 | case INST_TLB_MULTI_HIT_CAUSE: | |
700 | case LOAD_STORE_TLB_MULTI_HIT_CAUSE: | |
701 | HELPER(exception_cause_vaddr)(env, env->pc, res, v); | |
702 | break; | |
703 | } | |
704 | return 0; | |
705 | } else { | |
706 | return (v & REGION_PAGE_MASK) | 0x1; | |
707 | } | |
708 | } | |
709 | ||
710 | void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, | |
711 | xtensa_tlb_entry *entry, bool dtlb, | |
712 | unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) | |
713 | { | |
714 | entry->vaddr = vpn; | |
715 | entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); | |
716 | entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; | |
717 | entry->attr = pte & 0xf; | |
718 | } | |
719 | ||
720 | void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, | |
721 | unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) | |
722 | { | |
723 | xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
724 | ||
725 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
726 | if (entry->variable) { | |
727 | if (entry->asid) { | |
728 | tlb_flush_page(env, entry->vaddr); | |
729 | } | |
730 | xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); | |
731 | tlb_flush_page(env, entry->vaddr); | |
732 | } else { | |
733 | qemu_log("%s %d, %d, %d trying to set immutable entry\n", | |
734 | __func__, dtlb, wi, ei); | |
735 | } | |
736 | } else { | |
737 | tlb_flush_page(env, entry->vaddr); | |
738 | if (xtensa_option_enabled(env->config, | |
739 | XTENSA_OPTION_REGION_TRANSLATION)) { | |
740 | entry->paddr = pte & REGION_PAGE_MASK; | |
741 | } | |
742 | entry->attr = pte & 0xf; | |
743 | } | |
744 | } | |
745 | ||
746 | void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) | |
747 | { | |
748 | uint32_t vpn; | |
749 | uint32_t wi; | |
750 | uint32_t ei; | |
751 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); | |
752 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); | |
753 | } | |
754 | ||
755 | ||
756 | void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) | |
757 | { | |
758 | uint32_t change = v ^ env->sregs[IBREAKENABLE]; | |
759 | unsigned i; | |
760 | ||
761 | for (i = 0; i < env->config->nibreak; ++i) { | |
762 | if (change & (1 << i)) { | |
763 | tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); | |
764 | } | |
765 | } | |
766 | env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); | |
767 | } | |
768 | ||
769 | void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) | |
770 | { | |
771 | if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { | |
772 | tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); | |
773 | tb_invalidate_virtual_addr(env, v); | |
774 | } | |
775 | env->sregs[IBREAKA + i] = v; | |
776 | } | |
777 | ||
778 | static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, | |
779 | uint32_t dbreakc) | |
780 | { | |
781 | int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; | |
782 | uint32_t mask = dbreakc | ~DBREAKC_MASK; | |
783 | ||
784 | if (env->cpu_watchpoint[i]) { | |
785 | cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]); | |
786 | } | |
787 | if (dbreakc & DBREAKC_SB) { | |
788 | flags |= BP_MEM_WRITE; | |
789 | } | |
790 | if (dbreakc & DBREAKC_LB) { | |
791 | flags |= BP_MEM_READ; | |
792 | } | |
793 | /* contiguous mask after inversion is one less than some power of 2 */ | |
794 | if ((~mask + 1) & ~mask) { | |
795 | qemu_log("DBREAKC mask is not contiguous: 0x%08x\n", dbreakc); | |
796 | /* cut mask after the first zero bit */ | |
797 | mask = 0xffffffff << (32 - clo32(mask)); | |
798 | } | |
799 | if (cpu_watchpoint_insert(env, dbreaka & mask, ~mask + 1, | |
800 | flags, &env->cpu_watchpoint[i])) { | |
801 | env->cpu_watchpoint[i] = NULL; | |
802 | qemu_log("Failed to set data breakpoint at 0x%08x/%d\n", | |
803 | dbreaka & mask, ~mask + 1); | |
804 | } | |
805 | } | |
806 | ||
807 | void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) | |
808 | { | |
809 | uint32_t dbreakc = env->sregs[DBREAKC + i]; | |
810 | ||
811 | if ((dbreakc & DBREAKC_SB_LB) && | |
812 | env->sregs[DBREAKA + i] != v) { | |
813 | set_dbreak(env, i, v, dbreakc); | |
814 | } | |
815 | env->sregs[DBREAKA + i] = v; | |
816 | } | |
817 | ||
818 | void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v) | |
819 | { | |
820 | if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) { | |
821 | if (v & DBREAKC_SB_LB) { | |
822 | set_dbreak(env, i, env->sregs[DBREAKA + i], v); | |
823 | } else { | |
824 | if (env->cpu_watchpoint[i]) { | |
825 | cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]); | |
826 | env->cpu_watchpoint[i] = NULL; | |
827 | } | |
828 | } | |
829 | } | |
830 | env->sregs[DBREAKC + i] = v; | |
831 | } | |
832 | ||
833 | void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v) | |
834 | { | |
835 | static const int rounding_mode[] = { | |
836 | float_round_nearest_even, | |
837 | float_round_to_zero, | |
838 | float_round_up, | |
839 | float_round_down, | |
840 | }; | |
841 | ||
842 | env->uregs[FCR] = v & 0xfffff07f; | |
843 | set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status); | |
844 | } | |
845 | ||
846 | float32 HELPER(abs_s)(float32 v) | |
847 | { | |
848 | return float32_abs(v); | |
849 | } | |
850 | ||
851 | float32 HELPER(neg_s)(float32 v) | |
852 | { | |
853 | return float32_chs(v); | |
854 | } | |
855 | ||
856 | float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b) | |
857 | { | |
858 | return float32_add(a, b, &env->fp_status); | |
859 | } | |
860 | ||
861 | float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b) | |
862 | { | |
863 | return float32_sub(a, b, &env->fp_status); | |
864 | } | |
865 | ||
866 | float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b) | |
867 | { | |
868 | return float32_mul(a, b, &env->fp_status); | |
869 | } | |
870 | ||
871 | float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) | |
872 | { | |
873 | return float32_muladd(b, c, a, 0, | |
874 | &env->fp_status); | |
875 | } | |
876 | ||
877 | float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) | |
878 | { | |
879 | return float32_muladd(b, c, a, float_muladd_negate_product, | |
880 | &env->fp_status); | |
881 | } | |
882 | ||
883 | uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale) | |
884 | { | |
885 | float_status fp_status = {0}; | |
886 | ||
887 | set_float_rounding_mode(rounding_mode, &fp_status); | |
888 | return float32_to_int32( | |
889 | float32_scalbn(v, scale, &fp_status), &fp_status); | |
890 | } | |
891 | ||
892 | uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale) | |
893 | { | |
894 | float_status fp_status = {0}; | |
895 | float32 res; | |
896 | ||
897 | set_float_rounding_mode(rounding_mode, &fp_status); | |
898 | ||
899 | res = float32_scalbn(v, scale, &fp_status); | |
900 | ||
901 | if (float32_is_neg(v) && !float32_is_any_nan(v)) { | |
902 | return float32_to_int32(res, &fp_status); | |
903 | } else { | |
904 | return float32_to_uint32(res, &fp_status); | |
905 | } | |
906 | } | |
907 | ||
908 | float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale) | |
909 | { | |
910 | return float32_scalbn(int32_to_float32(v, &env->fp_status), | |
911 | (int32_t)scale, &env->fp_status); | |
912 | } | |
913 | ||
914 | float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale) | |
915 | { | |
916 | return float32_scalbn(uint32_to_float32(v, &env->fp_status), | |
917 | (int32_t)scale, &env->fp_status); | |
918 | } | |
919 | ||
920 | static inline void set_br(CPUXtensaState *env, bool v, uint32_t br) | |
921 | { | |
922 | if (v) { | |
923 | env->sregs[BR] |= br; | |
924 | } else { | |
925 | env->sregs[BR] &= ~br; | |
926 | } | |
927 | } | |
928 | ||
929 | void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
930 | { | |
931 | set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br); | |
932 | } | |
933 | ||
934 | void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
935 | { | |
936 | set_br(env, float32_eq_quiet(a, b, &env->fp_status), br); | |
937 | } | |
938 | ||
939 | void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
940 | { | |
941 | int v = float32_compare_quiet(a, b, &env->fp_status); | |
942 | set_br(env, v == float_relation_equal || v == float_relation_unordered, br); | |
943 | } | |
944 | ||
945 | void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
946 | { | |
947 | set_br(env, float32_lt_quiet(a, b, &env->fp_status), br); | |
948 | } | |
949 | ||
950 | void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
951 | { | |
952 | int v = float32_compare_quiet(a, b, &env->fp_status); | |
953 | set_br(env, v == float_relation_less || v == float_relation_unordered, br); | |
954 | } | |
955 | ||
956 | void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
957 | { | |
958 | set_br(env, float32_le_quiet(a, b, &env->fp_status), br); | |
959 | } | |
960 | ||
961 | void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) | |
962 | { | |
963 | int v = float32_compare_quiet(a, b, &env->fp_status); | |
964 | set_br(env, v != float_relation_greater, br); | |
965 | } |