]> git.proxmox.com Git - mirror_qemu.git/blob - target/mips/op_helper.c
target/mips: Update ITU to utilize SAARI and SAAR CP0 registers
[mirror_qemu.git] / target / mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "internal.h"
23 #include "qemu/host-utils.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "sysemu/kvm.h"
28
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
31
32 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
33 int error_code)
34 {
35 do_raise_exception_err(env, exception, error_code, 0);
36 }
37
38 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
39 {
40 do_raise_exception(env, exception, GETPC());
41 }
42
43 void helper_raise_exception_debug(CPUMIPSState *env)
44 {
45 do_raise_exception(env, EXCP_DEBUG, 0);
46 }
47
48 static void raise_exception(CPUMIPSState *env, uint32_t exception)
49 {
50 do_raise_exception(env, exception, 0);
51 }
52
53 #if defined(CONFIG_USER_ONLY)
54 #define HELPER_LD(name, insn, type) \
55 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
56 int mem_idx, uintptr_t retaddr) \
57 { \
58 return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
59 }
60 #else
61 #define HELPER_LD(name, insn, type) \
62 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
63 int mem_idx, uintptr_t retaddr) \
64 { \
65 switch (mem_idx) \
66 { \
67 case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
68 case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
69 default: \
70 case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
71 case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
72 } \
73 }
74 #endif
75 HELPER_LD(lw, ldl, int32_t)
76 #if defined(TARGET_MIPS64)
77 HELPER_LD(ld, ldq, int64_t)
78 #endif
79 #undef HELPER_LD
80
81 #if defined(CONFIG_USER_ONLY)
82 #define HELPER_ST(name, insn, type) \
83 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
84 type val, int mem_idx, uintptr_t retaddr) \
85 { \
86 cpu_##insn##_data_ra(env, addr, val, retaddr); \
87 }
88 #else
89 #define HELPER_ST(name, insn, type) \
90 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
91 type val, int mem_idx, uintptr_t retaddr) \
92 { \
93 switch (mem_idx) \
94 { \
95 case 0: cpu_##insn##_kernel_ra(env, addr, val, retaddr); break; \
96 case 1: cpu_##insn##_super_ra(env, addr, val, retaddr); break; \
97 default: \
98 case 2: cpu_##insn##_user_ra(env, addr, val, retaddr); break; \
99 case 3: \
100 cpu_##insn##_error_ra(env, addr, val, retaddr); \
101 break; \
102 } \
103 }
104 #endif
105 HELPER_ST(sb, stb, uint8_t)
106 HELPER_ST(sw, stl, uint32_t)
107 #if defined(TARGET_MIPS64)
108 HELPER_ST(sd, stq, uint64_t)
109 #endif
110 #undef HELPER_ST
111
112 /* 64 bits arithmetic for 32 bits hosts */
113 static inline uint64_t get_HILO(CPUMIPSState *env)
114 {
115 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
116 }
117
118 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
119 {
120 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
121 return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
122 }
123
124 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
125 {
126 target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
127 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
128 return tmp;
129 }
130
131 /* Multiplication variants of the vr54xx. */
132 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
133 target_ulong arg2)
134 {
135 return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
136 (int64_t)(int32_t)arg2));
137 }
138
139 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
140 target_ulong arg2)
141 {
142 return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
143 (uint64_t)(uint32_t)arg2);
144 }
145
146 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
147 target_ulong arg2)
148 {
149 return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
150 (int64_t)(int32_t)arg2);
151 }
152
153 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
154 target_ulong arg2)
155 {
156 return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
157 (int64_t)(int32_t)arg2);
158 }
159
160 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
161 target_ulong arg2)
162 {
163 return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
164 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
165 }
166
167 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
168 target_ulong arg2)
169 {
170 return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
171 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
172 }
173
174 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
175 target_ulong arg2)
176 {
177 return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
178 (int64_t)(int32_t)arg2);
179 }
180
181 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
182 target_ulong arg2)
183 {
184 return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
185 (int64_t)(int32_t)arg2);
186 }
187
188 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
189 target_ulong arg2)
190 {
191 return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
192 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
193 }
194
195 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
196 target_ulong arg2)
197 {
198 return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
199 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
200 }
201
202 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
203 target_ulong arg2)
204 {
205 return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
206 }
207
208 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
209 target_ulong arg2)
210 {
211 return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
212 (uint64_t)(uint32_t)arg2);
213 }
214
215 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
216 target_ulong arg2)
217 {
218 return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
219 (int64_t)(int32_t)arg2);
220 }
221
222 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
223 target_ulong arg2)
224 {
225 return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
226 (uint64_t)(uint32_t)arg2);
227 }
228
229 static inline target_ulong bitswap(target_ulong v)
230 {
231 v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
232 ((v & (target_ulong)0x5555555555555555ULL) << 1);
233 v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
234 ((v & (target_ulong)0x3333333333333333ULL) << 2);
235 v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
236 ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
237 return v;
238 }
239
240 #ifdef TARGET_MIPS64
241 target_ulong helper_dbitswap(target_ulong rt)
242 {
243 return bitswap(rt);
244 }
245 #endif
246
247 target_ulong helper_bitswap(target_ulong rt)
248 {
249 return (int32_t)bitswap(rt);
250 }
251
252 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
253 uint32_t stripe)
254 {
255 int i;
256 uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
257 uint64_t tmp1 = tmp0;
258 for (i = 0; i <= 46; i++) {
259 int s;
260 if (i & 0x8) {
261 s = shift;
262 } else {
263 s = shiftx;
264 }
265
266 if (stripe != 0 && !(i & 0x4)) {
267 s = ~s;
268 }
269 if (s & 0x10) {
270 if (tmp0 & (1LL << (i + 16))) {
271 tmp1 |= 1LL << i;
272 } else {
273 tmp1 &= ~(1LL << i);
274 }
275 }
276 }
277
278 uint64_t tmp2 = tmp1;
279 for (i = 0; i <= 38; i++) {
280 int s;
281 if (i & 0x4) {
282 s = shift;
283 } else {
284 s = shiftx;
285 }
286
287 if (s & 0x8) {
288 if (tmp1 & (1LL << (i + 8))) {
289 tmp2 |= 1LL << i;
290 } else {
291 tmp2 &= ~(1LL << i);
292 }
293 }
294 }
295
296 uint64_t tmp3 = tmp2;
297 for (i = 0; i <= 34; i++) {
298 int s;
299 if (i & 0x2) {
300 s = shift;
301 } else {
302 s = shiftx;
303 }
304 if (s & 0x4) {
305 if (tmp2 & (1LL << (i + 4))) {
306 tmp3 |= 1LL << i;
307 } else {
308 tmp3 &= ~(1LL << i);
309 }
310 }
311 }
312
313 uint64_t tmp4 = tmp3;
314 for (i = 0; i <= 32; i++) {
315 int s;
316 if (i & 0x1) {
317 s = shift;
318 } else {
319 s = shiftx;
320 }
321 if (s & 0x2) {
322 if (tmp3 & (1LL << (i + 2))) {
323 tmp4 |= 1LL << i;
324 } else {
325 tmp4 &= ~(1LL << i);
326 }
327 }
328 }
329
330 uint64_t tmp5 = tmp4;
331 for (i = 0; i <= 31; i++) {
332 int s;
333 s = shift;
334 if (s & 0x1) {
335 if (tmp4 & (1LL << (i + 1))) {
336 tmp5 |= 1LL << i;
337 } else {
338 tmp5 &= ~(1LL << i);
339 }
340 }
341 }
342
343 return (int64_t)(int32_t)(uint32_t)tmp5;
344 }
345
346 #ifndef CONFIG_USER_ONLY
347
348 static inline hwaddr do_translate_address(CPUMIPSState *env,
349 target_ulong address,
350 int rw, uintptr_t retaddr)
351 {
352 hwaddr lladdr;
353 CPUState *cs = CPU(mips_env_get_cpu(env));
354
355 lladdr = cpu_mips_translate_address(env, address, rw);
356
357 if (lladdr == -1LL) {
358 cpu_loop_exit_restore(cs, retaddr);
359 } else {
360 return lladdr;
361 }
362 }
363
364 #define HELPER_LD_ATOMIC(name, insn, almask) \
365 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
366 { \
367 if (arg & almask) { \
368 if (!(env->hflags & MIPS_HFLAG_DM)) { \
369 env->CP0_BadVAddr = arg; \
370 } \
371 do_raise_exception(env, EXCP_AdEL, GETPC()); \
372 } \
373 env->lladdr = do_translate_address(env, arg, 0, GETPC()); \
374 env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
375 return env->llval; \
376 }
377 HELPER_LD_ATOMIC(ll, lw, 0x3)
378 #ifdef TARGET_MIPS64
379 HELPER_LD_ATOMIC(lld, ld, 0x7)
380 #endif
381 #undef HELPER_LD_ATOMIC
382
383 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
384 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
385 target_ulong arg2, int mem_idx) \
386 { \
387 target_long tmp; \
388 \
389 if (arg2 & almask) { \
390 if (!(env->hflags & MIPS_HFLAG_DM)) { \
391 env->CP0_BadVAddr = arg2; \
392 } \
393 do_raise_exception(env, EXCP_AdES, GETPC()); \
394 } \
395 if (do_translate_address(env, arg2, 1, GETPC()) == env->lladdr) { \
396 tmp = do_##ld_insn(env, arg2, mem_idx, GETPC()); \
397 if (tmp == env->llval) { \
398 do_##st_insn(env, arg2, arg1, mem_idx, GETPC()); \
399 return 1; \
400 } \
401 } \
402 return 0; \
403 }
404 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
405 #ifdef TARGET_MIPS64
406 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
407 #endif
408 #undef HELPER_ST_ATOMIC
409 #endif
410
411 #ifdef TARGET_WORDS_BIGENDIAN
412 #define GET_LMASK(v) ((v) & 3)
413 #define GET_OFFSET(addr, offset) (addr + (offset))
414 #else
415 #define GET_LMASK(v) (((v) & 3) ^ 3)
416 #define GET_OFFSET(addr, offset) (addr - (offset))
417 #endif
418
419 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
420 int mem_idx)
421 {
422 do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
423
424 if (GET_LMASK(arg2) <= 2) {
425 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx,
426 GETPC());
427 }
428
429 if (GET_LMASK(arg2) <= 1) {
430 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx,
431 GETPC());
432 }
433
434 if (GET_LMASK(arg2) == 0) {
435 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx,
436 GETPC());
437 }
438 }
439
440 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
441 int mem_idx)
442 {
443 do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
444
445 if (GET_LMASK(arg2) >= 1) {
446 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
447 GETPC());
448 }
449
450 if (GET_LMASK(arg2) >= 2) {
451 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
452 GETPC());
453 }
454
455 if (GET_LMASK(arg2) == 3) {
456 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
457 GETPC());
458 }
459 }
460
461 #if defined(TARGET_MIPS64)
462 /* "half" load and stores. We must do the memory access inline,
463 or fault handling won't work. */
464
465 #ifdef TARGET_WORDS_BIGENDIAN
466 #define GET_LMASK64(v) ((v) & 7)
467 #else
468 #define GET_LMASK64(v) (((v) & 7) ^ 7)
469 #endif
470
471 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
472 int mem_idx)
473 {
474 do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
475
476 if (GET_LMASK64(arg2) <= 6) {
477 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx,
478 GETPC());
479 }
480
481 if (GET_LMASK64(arg2) <= 5) {
482 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx,
483 GETPC());
484 }
485
486 if (GET_LMASK64(arg2) <= 4) {
487 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx,
488 GETPC());
489 }
490
491 if (GET_LMASK64(arg2) <= 3) {
492 do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx,
493 GETPC());
494 }
495
496 if (GET_LMASK64(arg2) <= 2) {
497 do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx,
498 GETPC());
499 }
500
501 if (GET_LMASK64(arg2) <= 1) {
502 do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx,
503 GETPC());
504 }
505
506 if (GET_LMASK64(arg2) <= 0) {
507 do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx,
508 GETPC());
509 }
510 }
511
512 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
513 int mem_idx)
514 {
515 do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
516
517 if (GET_LMASK64(arg2) >= 1) {
518 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
519 GETPC());
520 }
521
522 if (GET_LMASK64(arg2) >= 2) {
523 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
524 GETPC());
525 }
526
527 if (GET_LMASK64(arg2) >= 3) {
528 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
529 GETPC());
530 }
531
532 if (GET_LMASK64(arg2) >= 4) {
533 do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx,
534 GETPC());
535 }
536
537 if (GET_LMASK64(arg2) >= 5) {
538 do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx,
539 GETPC());
540 }
541
542 if (GET_LMASK64(arg2) >= 6) {
543 do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx,
544 GETPC());
545 }
546
547 if (GET_LMASK64(arg2) == 7) {
548 do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx,
549 GETPC());
550 }
551 }
552 #endif /* TARGET_MIPS64 */
553
554 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
555
556 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
557 uint32_t mem_idx)
558 {
559 target_ulong base_reglist = reglist & 0xf;
560 target_ulong do_r31 = reglist & 0x10;
561
562 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
563 target_ulong i;
564
565 for (i = 0; i < base_reglist; i++) {
566 env->active_tc.gpr[multiple_regs[i]] =
567 (target_long)do_lw(env, addr, mem_idx, GETPC());
568 addr += 4;
569 }
570 }
571
572 if (do_r31) {
573 env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx,
574 GETPC());
575 }
576 }
577
578 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
579 uint32_t mem_idx)
580 {
581 target_ulong base_reglist = reglist & 0xf;
582 target_ulong do_r31 = reglist & 0x10;
583
584 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
585 target_ulong i;
586
587 for (i = 0; i < base_reglist; i++) {
588 do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
589 GETPC());
590 addr += 4;
591 }
592 }
593
594 if (do_r31) {
595 do_sw(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
596 }
597 }
598
599 #if defined(TARGET_MIPS64)
600 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
601 uint32_t mem_idx)
602 {
603 target_ulong base_reglist = reglist & 0xf;
604 target_ulong do_r31 = reglist & 0x10;
605
606 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
607 target_ulong i;
608
609 for (i = 0; i < base_reglist; i++) {
610 env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx,
611 GETPC());
612 addr += 8;
613 }
614 }
615
616 if (do_r31) {
617 env->active_tc.gpr[31] = do_ld(env, addr, mem_idx, GETPC());
618 }
619 }
620
621 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
622 uint32_t mem_idx)
623 {
624 target_ulong base_reglist = reglist & 0xf;
625 target_ulong do_r31 = reglist & 0x10;
626
627 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
628 target_ulong i;
629
630 for (i = 0; i < base_reglist; i++) {
631 do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
632 GETPC());
633 addr += 8;
634 }
635 }
636
637 if (do_r31) {
638 do_sd(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
639 }
640 }
641 #endif
642
643 #ifndef CONFIG_USER_ONLY
644 /* SMP helpers. */
645 static bool mips_vpe_is_wfi(MIPSCPU *c)
646 {
647 CPUState *cpu = CPU(c);
648 CPUMIPSState *env = &c->env;
649
650 /* If the VPE is halted but otherwise active, it means it's waiting for
651 an interrupt. */
652 return cpu->halted && mips_vpe_active(env);
653 }
654
655 static bool mips_vp_is_wfi(MIPSCPU *c)
656 {
657 CPUState *cpu = CPU(c);
658 CPUMIPSState *env = &c->env;
659
660 return cpu->halted && mips_vp_active(env);
661 }
662
663 static inline void mips_vpe_wake(MIPSCPU *c)
664 {
665 /* Don't set ->halted = 0 directly, let it be done via cpu_has_work
666 because there might be other conditions that state that c should
667 be sleeping. */
668 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
669 }
670
671 static inline void mips_vpe_sleep(MIPSCPU *cpu)
672 {
673 CPUState *cs = CPU(cpu);
674
675 /* The VPE was shut off, really go to bed.
676 Reset any old _WAKE requests. */
677 cs->halted = 1;
678 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
679 }
680
681 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
682 {
683 CPUMIPSState *c = &cpu->env;
684
685 /* FIXME: TC reschedule. */
686 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
687 mips_vpe_wake(cpu);
688 }
689 }
690
691 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
692 {
693 CPUMIPSState *c = &cpu->env;
694
695 /* FIXME: TC reschedule. */
696 if (!mips_vpe_active(c)) {
697 mips_vpe_sleep(cpu);
698 }
699 }
700
701 /**
702 * mips_cpu_map_tc:
703 * @env: CPU from which mapping is performed.
704 * @tc: Should point to an int with the value of the global TC index.
705 *
706 * This function will transform @tc into a local index within the
707 * returned #CPUMIPSState.
708 */
709 /* FIXME: This code assumes that all VPEs have the same number of TCs,
710 which depends on runtime setup. Can probably be fixed by
711 walking the list of CPUMIPSStates. */
712 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
713 {
714 MIPSCPU *cpu;
715 CPUState *cs;
716 CPUState *other_cs;
717 int vpe_idx;
718 int tc_idx = *tc;
719
720 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
721 /* Not allowed to address other CPUs. */
722 *tc = env->current_tc;
723 return env;
724 }
725
726 cs = CPU(mips_env_get_cpu(env));
727 vpe_idx = tc_idx / cs->nr_threads;
728 *tc = tc_idx % cs->nr_threads;
729 other_cs = qemu_get_cpu(vpe_idx);
730 if (other_cs == NULL) {
731 return env;
732 }
733 cpu = MIPS_CPU(other_cs);
734 return &cpu->env;
735 }
736
737 /* The per VPE CP0_Status register shares some fields with the per TC
738 CP0_TCStatus registers. These fields are wired to the same registers,
739 so changes to either of them should be reflected on both registers.
740
741 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
742
743 These helper call synchronizes the regs for a given cpu. */
744
745 /* Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. */
746 /* static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
747 int tc); */
748
749 /* Called for updates to CP0_TCStatus. */
750 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
751 target_ulong v)
752 {
753 uint32_t status;
754 uint32_t tcu, tmx, tasid, tksu;
755 uint32_t mask = ((1U << CP0St_CU3)
756 | (1 << CP0St_CU2)
757 | (1 << CP0St_CU1)
758 | (1 << CP0St_CU0)
759 | (1 << CP0St_MX)
760 | (3 << CP0St_KSU));
761
762 tcu = (v >> CP0TCSt_TCU0) & 0xf;
763 tmx = (v >> CP0TCSt_TMX) & 0x1;
764 tasid = v & cpu->CP0_EntryHi_ASID_mask;
765 tksu = (v >> CP0TCSt_TKSU) & 0x3;
766
767 status = tcu << CP0St_CU0;
768 status |= tmx << CP0St_MX;
769 status |= tksu << CP0St_KSU;
770
771 cpu->CP0_Status &= ~mask;
772 cpu->CP0_Status |= status;
773
774 /* Sync the TASID with EntryHi. */
775 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
776 cpu->CP0_EntryHi |= tasid;
777
778 compute_hflags(cpu);
779 }
780
781 /* Called for updates to CP0_EntryHi. */
782 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
783 {
784 int32_t *tcst;
785 uint32_t asid, v = cpu->CP0_EntryHi;
786
787 asid = v & cpu->CP0_EntryHi_ASID_mask;
788
789 if (tc == cpu->current_tc) {
790 tcst = &cpu->active_tc.CP0_TCStatus;
791 } else {
792 tcst = &cpu->tcs[tc].CP0_TCStatus;
793 }
794
795 *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
796 *tcst |= asid;
797 }
798
799 /* CP0 helpers */
800 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
801 {
802 return env->mvp->CP0_MVPControl;
803 }
804
805 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
806 {
807 return env->mvp->CP0_MVPConf0;
808 }
809
810 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
811 {
812 return env->mvp->CP0_MVPConf1;
813 }
814
815 target_ulong helper_mfc0_random(CPUMIPSState *env)
816 {
817 return (int32_t)cpu_mips_get_random(env);
818 }
819
820 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
821 {
822 return env->active_tc.CP0_TCStatus;
823 }
824
825 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
826 {
827 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
828 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
829
830 if (other_tc == other->current_tc)
831 return other->active_tc.CP0_TCStatus;
832 else
833 return other->tcs[other_tc].CP0_TCStatus;
834 }
835
836 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
837 {
838 return env->active_tc.CP0_TCBind;
839 }
840
841 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
842 {
843 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
844 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
845
846 if (other_tc == other->current_tc)
847 return other->active_tc.CP0_TCBind;
848 else
849 return other->tcs[other_tc].CP0_TCBind;
850 }
851
852 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
853 {
854 return env->active_tc.PC;
855 }
856
857 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
858 {
859 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
860 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
861
862 if (other_tc == other->current_tc)
863 return other->active_tc.PC;
864 else
865 return other->tcs[other_tc].PC;
866 }
867
868 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
869 {
870 return env->active_tc.CP0_TCHalt;
871 }
872
873 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
874 {
875 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
876 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
877
878 if (other_tc == other->current_tc)
879 return other->active_tc.CP0_TCHalt;
880 else
881 return other->tcs[other_tc].CP0_TCHalt;
882 }
883
884 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
885 {
886 return env->active_tc.CP0_TCContext;
887 }
888
889 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
890 {
891 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
892 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
893
894 if (other_tc == other->current_tc)
895 return other->active_tc.CP0_TCContext;
896 else
897 return other->tcs[other_tc].CP0_TCContext;
898 }
899
900 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
901 {
902 return env->active_tc.CP0_TCSchedule;
903 }
904
905 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
906 {
907 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
908 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
909
910 if (other_tc == other->current_tc)
911 return other->active_tc.CP0_TCSchedule;
912 else
913 return other->tcs[other_tc].CP0_TCSchedule;
914 }
915
916 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
917 {
918 return env->active_tc.CP0_TCScheFBack;
919 }
920
921 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
922 {
923 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
924 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
925
926 if (other_tc == other->current_tc)
927 return other->active_tc.CP0_TCScheFBack;
928 else
929 return other->tcs[other_tc].CP0_TCScheFBack;
930 }
931
932 target_ulong helper_mfc0_count(CPUMIPSState *env)
933 {
934 int32_t count;
935 qemu_mutex_lock_iothread();
936 count = (int32_t) cpu_mips_get_count(env);
937 qemu_mutex_unlock_iothread();
938 return count;
939 }
940
941 target_ulong helper_mfc0_saar(CPUMIPSState *env)
942 {
943 if ((env->CP0_SAARI & 0x3f) < 2) {
944 return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f];
945 }
946 return 0;
947 }
948
949 target_ulong helper_mfhc0_saar(CPUMIPSState *env)
950 {
951 if ((env->CP0_SAARI & 0x3f) < 2) {
952 return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32;
953 }
954 return 0;
955 }
956
957 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
958 {
959 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
960 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
961
962 return other->CP0_EntryHi;
963 }
964
965 target_ulong helper_mftc0_cause(CPUMIPSState *env)
966 {
967 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
968 int32_t tccause;
969 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
970
971 if (other_tc == other->current_tc) {
972 tccause = other->CP0_Cause;
973 } else {
974 tccause = other->CP0_Cause;
975 }
976
977 return tccause;
978 }
979
980 target_ulong helper_mftc0_status(CPUMIPSState *env)
981 {
982 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
983 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
984
985 return other->CP0_Status;
986 }
987
988 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
989 {
990 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
991 }
992
993 target_ulong helper_mfc0_maar(CPUMIPSState *env)
994 {
995 return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
996 }
997
998 target_ulong helper_mfhc0_maar(CPUMIPSState *env)
999 {
1000 return env->CP0_MAAR[env->CP0_MAARI] >> 32;
1001 }
1002
1003 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
1004 {
1005 return (int32_t)env->CP0_WatchLo[sel];
1006 }
1007
1008 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
1009 {
1010 return env->CP0_WatchHi[sel];
1011 }
1012
1013 target_ulong helper_mfc0_debug(CPUMIPSState *env)
1014 {
1015 target_ulong t0 = env->CP0_Debug;
1016 if (env->hflags & MIPS_HFLAG_DM)
1017 t0 |= 1 << CP0DB_DM;
1018
1019 return t0;
1020 }
1021
1022 target_ulong helper_mftc0_debug(CPUMIPSState *env)
1023 {
1024 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1025 int32_t tcstatus;
1026 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1027
1028 if (other_tc == other->current_tc)
1029 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1030 else
1031 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1032
1033 /* XXX: Might be wrong, check with EJTAG spec. */
1034 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1035 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1036 }
1037
1038 #if defined(TARGET_MIPS64)
1039 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
1040 {
1041 return env->active_tc.PC;
1042 }
1043
1044 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
1045 {
1046 return env->active_tc.CP0_TCHalt;
1047 }
1048
1049 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
1050 {
1051 return env->active_tc.CP0_TCContext;
1052 }
1053
1054 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
1055 {
1056 return env->active_tc.CP0_TCSchedule;
1057 }
1058
1059 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
1060 {
1061 return env->active_tc.CP0_TCScheFBack;
1062 }
1063
1064 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
1065 {
1066 return env->lladdr >> env->CP0_LLAddr_shift;
1067 }
1068
1069 target_ulong helper_dmfc0_maar(CPUMIPSState *env)
1070 {
1071 return env->CP0_MAAR[env->CP0_MAARI];
1072 }
1073
1074 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
1075 {
1076 return env->CP0_WatchLo[sel];
1077 }
1078
1079 target_ulong helper_dmfc0_saar(CPUMIPSState *env)
1080 {
1081 if ((env->CP0_SAARI & 0x3f) < 2) {
1082 return env->CP0_SAAR[env->CP0_SAARI & 0x3f];
1083 }
1084 return 0;
1085 }
1086 #endif /* TARGET_MIPS64 */
1087
1088 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
1089 {
1090 uint32_t index_p = env->CP0_Index & 0x80000000;
1091 uint32_t tlb_index = arg1 & 0x7fffffff;
1092 if (tlb_index < env->tlb->nb_tlb) {
1093 if (env->insn_flags & ISA_MIPS32R6) {
1094 index_p |= arg1 & 0x80000000;
1095 }
1096 env->CP0_Index = index_p | tlb_index;
1097 }
1098 }
1099
1100 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
1101 {
1102 uint32_t mask = 0;
1103 uint32_t newval;
1104
1105 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1106 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1107 (1 << CP0MVPCo_EVP);
1108 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1109 mask |= (1 << CP0MVPCo_STLB);
1110 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1111
1112 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1113
1114 env->mvp->CP0_MVPControl = newval;
1115 }
1116
1117 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1118 {
1119 uint32_t mask;
1120 uint32_t newval;
1121
1122 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1123 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1124 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1125
1126 /* Yield scheduler intercept not implemented. */
1127 /* Gating storage scheduler intercept not implemented. */
1128
1129 // TODO: Enable/disable TCs.
1130
1131 env->CP0_VPEControl = newval;
1132 }
1133
1134 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1135 {
1136 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1137 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1138 uint32_t mask;
1139 uint32_t newval;
1140
1141 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1142 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1143 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1144
1145 /* TODO: Enable/disable TCs. */
1146
1147 other->CP0_VPEControl = newval;
1148 }
1149
1150 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
1151 {
1152 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1153 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1154 /* FIXME: Mask away return zero on read bits. */
1155 return other->CP0_VPEControl;
1156 }
1157
1158 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
1159 {
1160 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1161 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1162
1163 return other->CP0_VPEConf0;
1164 }
1165
1166 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1167 {
1168 uint32_t mask = 0;
1169 uint32_t newval;
1170
1171 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1172 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1173 mask |= (0xff << CP0VPEC0_XTC);
1174 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1175 }
1176 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1177
1178 // TODO: TC exclusive handling due to ERL/EXL.
1179
1180 env->CP0_VPEConf0 = newval;
1181 }
1182
1183 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1184 {
1185 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1186 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1187 uint32_t mask = 0;
1188 uint32_t newval;
1189
1190 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1191 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1192
1193 /* TODO: TC exclusive handling due to ERL/EXL. */
1194 other->CP0_VPEConf0 = newval;
1195 }
1196
1197 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
1198 {
1199 uint32_t mask = 0;
1200 uint32_t newval;
1201
1202 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1203 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1204 (0xff << CP0VPEC1_NCP1);
1205 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1206
1207 /* UDI not implemented. */
1208 /* CP2 not implemented. */
1209
1210 // TODO: Handle FPU (CP1) binding.
1211
1212 env->CP0_VPEConf1 = newval;
1213 }
1214
1215 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
1216 {
1217 /* Yield qualifier inputs not implemented. */
1218 env->CP0_YQMask = 0x00000000;
1219 }
1220
1221 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
1222 {
1223 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1224 }
1225
1226 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
1227
1228 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
1229 {
1230 /* 1k pages not implemented */
1231 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
1232 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
1233 | (rxi << (CP0EnLo_XI - 30));
1234 }
1235
1236 #if defined(TARGET_MIPS64)
1237 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
1238
1239 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
1240 {
1241 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
1242 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
1243 }
1244 #endif
1245
1246 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1247 {
1248 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1249 uint32_t newval;
1250
1251 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1252
1253 env->active_tc.CP0_TCStatus = newval;
1254 sync_c0_tcstatus(env, env->current_tc, newval);
1255 }
1256
1257 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1258 {
1259 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1260 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1261
1262 if (other_tc == other->current_tc)
1263 other->active_tc.CP0_TCStatus = arg1;
1264 else
1265 other->tcs[other_tc].CP0_TCStatus = arg1;
1266 sync_c0_tcstatus(other, other_tc, arg1);
1267 }
1268
1269 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1270 {
1271 uint32_t mask = (1 << CP0TCBd_TBE);
1272 uint32_t newval;
1273
1274 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1275 mask |= (1 << CP0TCBd_CurVPE);
1276 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1277 env->active_tc.CP0_TCBind = newval;
1278 }
1279
1280 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1281 {
1282 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1283 uint32_t mask = (1 << CP0TCBd_TBE);
1284 uint32_t newval;
1285 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1286
1287 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1288 mask |= (1 << CP0TCBd_CurVPE);
1289 if (other_tc == other->current_tc) {
1290 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1291 other->active_tc.CP0_TCBind = newval;
1292 } else {
1293 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1294 other->tcs[other_tc].CP0_TCBind = newval;
1295 }
1296 }
1297
1298 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1299 {
1300 env->active_tc.PC = arg1;
1301 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1302 env->lladdr = 0ULL;
1303 /* MIPS16 not implemented. */
1304 }
1305
1306 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1307 {
1308 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1309 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1310
1311 if (other_tc == other->current_tc) {
1312 other->active_tc.PC = arg1;
1313 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1314 other->lladdr = 0ULL;
1315 /* MIPS16 not implemented. */
1316 } else {
1317 other->tcs[other_tc].PC = arg1;
1318 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1319 other->lladdr = 0ULL;
1320 /* MIPS16 not implemented. */
1321 }
1322 }
1323
1324 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1325 {
1326 MIPSCPU *cpu = mips_env_get_cpu(env);
1327
1328 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1329
1330 // TODO: Halt TC / Restart (if allocated+active) TC.
1331 if (env->active_tc.CP0_TCHalt & 1) {
1332 mips_tc_sleep(cpu, env->current_tc);
1333 } else {
1334 mips_tc_wake(cpu, env->current_tc);
1335 }
1336 }
1337
1338 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1339 {
1340 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1341 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1342 MIPSCPU *other_cpu = mips_env_get_cpu(other);
1343
1344 // TODO: Halt TC / Restart (if allocated+active) TC.
1345
1346 if (other_tc == other->current_tc)
1347 other->active_tc.CP0_TCHalt = arg1;
1348 else
1349 other->tcs[other_tc].CP0_TCHalt = arg1;
1350
1351 if (arg1 & 1) {
1352 mips_tc_sleep(other_cpu, other_tc);
1353 } else {
1354 mips_tc_wake(other_cpu, other_tc);
1355 }
1356 }
1357
1358 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1359 {
1360 env->active_tc.CP0_TCContext = arg1;
1361 }
1362
1363 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1364 {
1365 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1366 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1367
1368 if (other_tc == other->current_tc)
1369 other->active_tc.CP0_TCContext = arg1;
1370 else
1371 other->tcs[other_tc].CP0_TCContext = arg1;
1372 }
1373
1374 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1375 {
1376 env->active_tc.CP0_TCSchedule = arg1;
1377 }
1378
1379 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1380 {
1381 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1382 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1383
1384 if (other_tc == other->current_tc)
1385 other->active_tc.CP0_TCSchedule = arg1;
1386 else
1387 other->tcs[other_tc].CP0_TCSchedule = arg1;
1388 }
1389
1390 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1391 {
1392 env->active_tc.CP0_TCScheFBack = arg1;
1393 }
1394
1395 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1396 {
1397 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1398 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1399
1400 if (other_tc == other->current_tc)
1401 other->active_tc.CP0_TCScheFBack = arg1;
1402 else
1403 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1404 }
1405
1406 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
1407 {
1408 /* 1k pages not implemented */
1409 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
1410 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
1411 | (rxi << (CP0EnLo_XI - 30));
1412 }
1413
1414 #if defined(TARGET_MIPS64)
1415 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
1416 {
1417 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
1418 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
1419 }
1420 #endif
1421
1422 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
1423 {
1424 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1425 }
1426
1427 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
1428 {
1429 uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1);
1430 if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) ||
1431 (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
1432 mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
1433 mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
1434 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1435 }
1436 }
1437
1438 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
1439 {
1440 update_pagemask(env, arg1, &env->CP0_PageMask);
1441 }
1442
1443 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
1444 {
1445 /* SmartMIPS not implemented */
1446 /* 1k pages not implemented */
1447 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
1448 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
1449 compute_hflags(env);
1450 restore_pamask(env);
1451 }
1452
1453 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
1454 {
1455 CPUState *cs = CPU(mips_env_get_cpu(env));
1456
1457 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
1458 tlb_flush(cs);
1459 }
1460
1461 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
1462 {
1463 CPUState *cs = CPU(mips_env_get_cpu(env));
1464
1465 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
1466 tlb_flush(cs);
1467 }
1468
1469 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
1470 {
1471 CPUState *cs = CPU(mips_env_get_cpu(env));
1472
1473 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
1474 tlb_flush(cs);
1475 }
1476
1477 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
1478 {
1479 #if defined(TARGET_MIPS64)
1480 uint64_t mask = 0x3F3FFFFFFFULL;
1481 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
1482 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
1483
1484 if ((env->insn_flags & ISA_MIPS32R6)) {
1485 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
1486 mask &= ~(0x3FULL << CP0PF_BDI);
1487 }
1488 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
1489 mask &= ~(0x3FULL << CP0PF_GDI);
1490 }
1491 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
1492 mask &= ~(0x3FULL << CP0PF_UDI);
1493 }
1494 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
1495 mask &= ~(0x3FULL << CP0PF_MDI);
1496 }
1497 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
1498 mask &= ~(0x3FULL << CP0PF_PTI);
1499 }
1500 }
1501 env->CP0_PWField = arg1 & mask;
1502
1503 if ((new_ptei >= 32) ||
1504 ((env->insn_flags & ISA_MIPS32R6) &&
1505 (new_ptei == 0 || new_ptei == 1))) {
1506 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
1507 (old_ptei << CP0PF_PTEI);
1508 }
1509 #else
1510 uint32_t mask = 0x3FFFFFFF;
1511 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
1512 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
1513
1514 if ((env->insn_flags & ISA_MIPS32R6)) {
1515 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
1516 mask &= ~(0x3F << CP0PF_GDW);
1517 }
1518 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
1519 mask &= ~(0x3F << CP0PF_UDW);
1520 }
1521 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
1522 mask &= ~(0x3F << CP0PF_MDW);
1523 }
1524 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
1525 mask &= ~(0x3F << CP0PF_PTW);
1526 }
1527 }
1528 env->CP0_PWField = arg1 & mask;
1529
1530 if ((new_ptew >= 32) ||
1531 ((env->insn_flags & ISA_MIPS32R6) &&
1532 (new_ptew == 0 || new_ptew == 1))) {
1533 env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
1534 (old_ptew << CP0PF_PTEW);
1535 }
1536 #endif
1537 }
1538
1539 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
1540 {
1541 #if defined(TARGET_MIPS64)
1542 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
1543 #else
1544 env->CP0_PWSize = arg1 & 0x3FFFFFFF;
1545 #endif
1546 }
1547
1548 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1549 {
1550 if (env->insn_flags & ISA_MIPS32R6) {
1551 if (arg1 < env->tlb->nb_tlb) {
1552 env->CP0_Wired = arg1;
1553 }
1554 } else {
1555 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1556 }
1557 }
1558
1559 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1560 {
1561 #if defined(TARGET_MIPS64)
1562 /* PWEn = 0. Hardware page table walking is not implemented. */
1563 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1564 #else
1565 env->CP0_PWCtl = (arg1 & 0x800000FF);
1566 #endif
1567 }
1568
1569 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1570 {
1571 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1572 }
1573
1574 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1575 {
1576 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1577 }
1578
1579 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1580 {
1581 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1582 }
1583
1584 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1585 {
1586 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1587 }
1588
1589 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1590 {
1591 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1592 }
1593
1594 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1595 {
1596 uint32_t mask = 0x0000000F;
1597
1598 if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1599 (env->insn_flags & ISA_MIPS32R6)) {
1600 mask |= (1 << 4);
1601 }
1602 if (env->insn_flags & ISA_MIPS32R6) {
1603 mask |= (1 << 5);
1604 }
1605 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1606 mask |= (1 << 29);
1607
1608 if (arg1 & (1 << 29)) {
1609 env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1610 } else {
1611 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1612 }
1613 }
1614
1615 env->CP0_HWREna = arg1 & mask;
1616 }
1617
1618 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1619 {
1620 qemu_mutex_lock_iothread();
1621 cpu_mips_store_count(env, arg1);
1622 qemu_mutex_unlock_iothread();
1623 }
1624
1625 void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1)
1626 {
1627 uint32_t target = arg1 & 0x3f;
1628 if (target <= 1) {
1629 env->CP0_SAARI = target;
1630 }
1631 }
1632
1633 void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1)
1634 {
1635 uint32_t target = env->CP0_SAARI & 0x3f;
1636 if (target < 2) {
1637 env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL;
1638 switch (target) {
1639 case 0:
1640 if (env->itu) {
1641 itc_reconfigure(env->itu);
1642 }
1643 break;
1644 }
1645 }
1646 }
1647
1648 void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1)
1649 {
1650 uint32_t target = env->CP0_SAARI & 0x3f;
1651 if (target < 2) {
1652 env->CP0_SAAR[target] =
1653 (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) |
1654 (env->CP0_SAAR[target] & 0x00000000ffffffffULL);
1655 switch (target) {
1656 case 0:
1657 if (env->itu) {
1658 itc_reconfigure(env->itu);
1659 }
1660 break;
1661 }
1662 }
1663 }
1664
1665 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1666 {
1667 target_ulong old, val, mask;
1668 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1669 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1670 mask |= 1 << CP0EnHi_EHINV;
1671 }
1672
1673 /* 1k pages not implemented */
1674 #if defined(TARGET_MIPS64)
1675 if (env->insn_flags & ISA_MIPS32R6) {
1676 int entryhi_r = extract64(arg1, 62, 2);
1677 int config0_at = extract32(env->CP0_Config0, 13, 2);
1678 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1679 if ((entryhi_r == 2) ||
1680 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1681 /* skip EntryHi.R field if new value is reserved */
1682 mask &= ~(0x3ull << 62);
1683 }
1684 }
1685 mask &= env->SEGMask;
1686 #endif
1687 old = env->CP0_EntryHi;
1688 val = (arg1 & mask) | (old & ~mask);
1689 env->CP0_EntryHi = val;
1690 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1691 sync_c0_entryhi(env, env->current_tc);
1692 }
1693 /* If the ASID changes, flush qemu's TLB. */
1694 if ((old & env->CP0_EntryHi_ASID_mask) !=
1695 (val & env->CP0_EntryHi_ASID_mask)) {
1696 tlb_flush(CPU(mips_env_get_cpu(env)));
1697 }
1698 }
1699
1700 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1701 {
1702 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1703 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1704
1705 other->CP0_EntryHi = arg1;
1706 sync_c0_entryhi(other, other_tc);
1707 }
1708
1709 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1710 {
1711 qemu_mutex_lock_iothread();
1712 cpu_mips_store_compare(env, arg1);
1713 qemu_mutex_unlock_iothread();
1714 }
1715
1716 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1717 {
1718 MIPSCPU *cpu = mips_env_get_cpu(env);
1719 uint32_t val, old;
1720
1721 old = env->CP0_Status;
1722 cpu_mips_store_status(env, arg1);
1723 val = env->CP0_Status;
1724
1725 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1726 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1727 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1728 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1729 env->CP0_Cause);
1730 switch (cpu_mmu_index(env, false)) {
1731 case 3:
1732 qemu_log(", ERL\n");
1733 break;
1734 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1735 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1736 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1737 default:
1738 cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
1739 break;
1740 }
1741 }
1742 }
1743
1744 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1745 {
1746 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1747 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1748 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1749
1750 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1751 sync_c0_status(env, other, other_tc);
1752 }
1753
1754 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1755 {
1756 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1757 }
1758
1759 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1760 {
1761 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1762 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1763 }
1764
1765 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1766 {
1767 qemu_mutex_lock_iothread();
1768 cpu_mips_store_cause(env, arg1);
1769 qemu_mutex_unlock_iothread();
1770 }
1771
1772 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1773 {
1774 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1775 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1776
1777 cpu_mips_store_cause(other, arg1);
1778 }
1779
1780 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1781 {
1782 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1783 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1784
1785 return other->CP0_EPC;
1786 }
1787
1788 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1789 {
1790 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1791 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1792
1793 return other->CP0_EBase;
1794 }
1795
1796 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1797 {
1798 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1799 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1800 mask |= ~0x3FFFFFFF;
1801 }
1802 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1803 }
1804
1805 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1806 {
1807 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1808 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1809 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1810 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1811 mask |= ~0x3FFFFFFF;
1812 }
1813 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1814 }
1815
1816 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1817 {
1818 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1819 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1820
1821 switch (idx) {
1822 case 0: return other->CP0_Config0;
1823 case 1: return other->CP0_Config1;
1824 case 2: return other->CP0_Config2;
1825 case 3: return other->CP0_Config3;
1826 /* 4 and 5 are reserved. */
1827 case 6: return other->CP0_Config6;
1828 case 7: return other->CP0_Config7;
1829 default:
1830 break;
1831 }
1832 return 0;
1833 }
1834
1835 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1836 {
1837 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1838 }
1839
1840 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1841 {
1842 /* tertiary/secondary caches not implemented */
1843 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1844 }
1845
1846 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1847 {
1848 if (env->insn_flags & ASE_MICROMIPS) {
1849 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1850 (arg1 & (1 << CP0C3_ISA_ON_EXC));
1851 }
1852 }
1853
1854 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1855 {
1856 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1857 (arg1 & env->CP0_Config4_rw_bitmask);
1858 }
1859
1860 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1861 {
1862 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1863 (arg1 & env->CP0_Config5_rw_bitmask);
1864 compute_hflags(env);
1865 }
1866
1867 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1868 {
1869 target_long mask = env->CP0_LLAddr_rw_bitmask;
1870 arg1 = arg1 << env->CP0_LLAddr_shift;
1871 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1872 }
1873
1874 #define MTC0_MAAR_MASK(env) \
1875 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1876
1877 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1878 {
1879 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1880 }
1881
1882 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1883 {
1884 env->CP0_MAAR[env->CP0_MAARI] =
1885 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1886 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1887 }
1888
1889 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1890 {
1891 int index = arg1 & 0x3f;
1892 if (index == 0x3f) {
1893 /* Software may write all ones to INDEX to determine the
1894 maximum value supported. */
1895 env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1896 } else if (index < MIPS_MAAR_MAX) {
1897 env->CP0_MAARI = index;
1898 }
1899 /* Other than the all ones, if the
1900 value written is not supported, then INDEX is unchanged
1901 from its previous value. */
1902 }
1903
1904 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1905 {
1906 /* Watch exceptions for instructions, data loads, data stores
1907 not implemented. */
1908 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1909 }
1910
1911 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1912 {
1913 int mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1914 env->CP0_WatchHi[sel] = arg1 & mask;
1915 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1916 }
1917
1918 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1919 {
1920 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1921 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1922 }
1923
1924 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1925 {
1926 env->CP0_Framemask = arg1; /* XXX */
1927 }
1928
1929 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1930 {
1931 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1932 if (arg1 & (1 << CP0DB_DM))
1933 env->hflags |= MIPS_HFLAG_DM;
1934 else
1935 env->hflags &= ~MIPS_HFLAG_DM;
1936 }
1937
1938 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1939 {
1940 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1941 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1942 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1943
1944 /* XXX: Might be wrong, check with EJTAG spec. */
1945 if (other_tc == other->current_tc)
1946 other->active_tc.CP0_Debug_tcstatus = val;
1947 else
1948 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1949 other->CP0_Debug = (other->CP0_Debug &
1950 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1951 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1952 }
1953
1954 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1955 {
1956 env->CP0_Performance0 = arg1 & 0x000007ff;
1957 }
1958
1959 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1960 {
1961 int32_t wst = arg1 & (1 << CP0EC_WST);
1962 int32_t spr = arg1 & (1 << CP0EC_SPR);
1963 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1964
1965 env->CP0_ErrCtl = wst | spr | itc;
1966
1967 if (itc && !wst && !spr) {
1968 env->hflags |= MIPS_HFLAG_ITC_CACHE;
1969 } else {
1970 env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1971 }
1972 }
1973
1974 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1975 {
1976 if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
1977 /* If CACHE instruction is configured for ITC tags then make all
1978 CP0.TagLo bits writable. The actual write to ITC Configuration
1979 Tag will take care of the read-only bits. */
1980 env->CP0_TagLo = arg1;
1981 } else {
1982 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1983 }
1984 }
1985
1986 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1987 {
1988 env->CP0_DataLo = arg1; /* XXX */
1989 }
1990
1991 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1992 {
1993 env->CP0_TagHi = arg1; /* XXX */
1994 }
1995
1996 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1997 {
1998 env->CP0_DataHi = arg1; /* XXX */
1999 }
2000
2001 /* MIPS MT functions */
2002 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
2003 {
2004 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2005 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2006
2007 if (other_tc == other->current_tc)
2008 return other->active_tc.gpr[sel];
2009 else
2010 return other->tcs[other_tc].gpr[sel];
2011 }
2012
2013 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
2014 {
2015 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2016 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2017
2018 if (other_tc == other->current_tc)
2019 return other->active_tc.LO[sel];
2020 else
2021 return other->tcs[other_tc].LO[sel];
2022 }
2023
2024 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
2025 {
2026 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2027 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2028
2029 if (other_tc == other->current_tc)
2030 return other->active_tc.HI[sel];
2031 else
2032 return other->tcs[other_tc].HI[sel];
2033 }
2034
2035 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
2036 {
2037 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2038 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2039
2040 if (other_tc == other->current_tc)
2041 return other->active_tc.ACX[sel];
2042 else
2043 return other->tcs[other_tc].ACX[sel];
2044 }
2045
2046 target_ulong helper_mftdsp(CPUMIPSState *env)
2047 {
2048 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2049 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2050
2051 if (other_tc == other->current_tc)
2052 return other->active_tc.DSPControl;
2053 else
2054 return other->tcs[other_tc].DSPControl;
2055 }
2056
2057 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2058 {
2059 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2060 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2061
2062 if (other_tc == other->current_tc)
2063 other->active_tc.gpr[sel] = arg1;
2064 else
2065 other->tcs[other_tc].gpr[sel] = arg1;
2066 }
2067
2068 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2069 {
2070 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2071 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2072
2073 if (other_tc == other->current_tc)
2074 other->active_tc.LO[sel] = arg1;
2075 else
2076 other->tcs[other_tc].LO[sel] = arg1;
2077 }
2078
2079 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2080 {
2081 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2082 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2083
2084 if (other_tc == other->current_tc)
2085 other->active_tc.HI[sel] = arg1;
2086 else
2087 other->tcs[other_tc].HI[sel] = arg1;
2088 }
2089
2090 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2091 {
2092 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2093 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2094
2095 if (other_tc == other->current_tc)
2096 other->active_tc.ACX[sel] = arg1;
2097 else
2098 other->tcs[other_tc].ACX[sel] = arg1;
2099 }
2100
2101 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
2102 {
2103 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2104 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2105
2106 if (other_tc == other->current_tc)
2107 other->active_tc.DSPControl = arg1;
2108 else
2109 other->tcs[other_tc].DSPControl = arg1;
2110 }
2111
2112 /* MIPS MT functions */
2113 target_ulong helper_dmt(void)
2114 {
2115 // TODO
2116 return 0;
2117 }
2118
2119 target_ulong helper_emt(void)
2120 {
2121 // TODO
2122 return 0;
2123 }
2124
2125 target_ulong helper_dvpe(CPUMIPSState *env)
2126 {
2127 CPUState *other_cs = first_cpu;
2128 target_ulong prev = env->mvp->CP0_MVPControl;
2129
2130 CPU_FOREACH(other_cs) {
2131 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2132 /* Turn off all VPEs except the one executing the dvpe. */
2133 if (&other_cpu->env != env) {
2134 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
2135 mips_vpe_sleep(other_cpu);
2136 }
2137 }
2138 return prev;
2139 }
2140
2141 target_ulong helper_evpe(CPUMIPSState *env)
2142 {
2143 CPUState *other_cs = first_cpu;
2144 target_ulong prev = env->mvp->CP0_MVPControl;
2145
2146 CPU_FOREACH(other_cs) {
2147 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2148
2149 if (&other_cpu->env != env
2150 /* If the VPE is WFI, don't disturb its sleep. */
2151 && !mips_vpe_is_wfi(other_cpu)) {
2152 /* Enable the VPE. */
2153 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
2154 mips_vpe_wake(other_cpu); /* And wake it up. */
2155 }
2156 }
2157 return prev;
2158 }
2159 #endif /* !CONFIG_USER_ONLY */
2160
2161 void helper_fork(target_ulong arg1, target_ulong arg2)
2162 {
2163 // arg1 = rt, arg2 = rs
2164 // TODO: store to TC register
2165 }
2166
2167 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
2168 {
2169 target_long arg1 = arg;
2170
2171 if (arg1 < 0) {
2172 /* No scheduling policy implemented. */
2173 if (arg1 != -2) {
2174 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
2175 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
2176 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
2177 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
2178 do_raise_exception(env, EXCP_THREAD, GETPC());
2179 }
2180 }
2181 } else if (arg1 == 0) {
2182 if (0 /* TODO: TC underflow */) {
2183 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
2184 do_raise_exception(env, EXCP_THREAD, GETPC());
2185 } else {
2186 // TODO: Deallocate TC
2187 }
2188 } else if (arg1 > 0) {
2189 /* Yield qualifier inputs not implemented. */
2190 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
2191 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
2192 do_raise_exception(env, EXCP_THREAD, GETPC());
2193 }
2194 return env->CP0_YQMask;
2195 }
2196
2197 /* R6 Multi-threading */
2198 #ifndef CONFIG_USER_ONLY
2199 target_ulong helper_dvp(CPUMIPSState *env)
2200 {
2201 CPUState *other_cs = first_cpu;
2202 target_ulong prev = env->CP0_VPControl;
2203
2204 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
2205 CPU_FOREACH(other_cs) {
2206 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2207 /* Turn off all VPs except the one executing the dvp. */
2208 if (&other_cpu->env != env) {
2209 mips_vpe_sleep(other_cpu);
2210 }
2211 }
2212 env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
2213 }
2214 return prev;
2215 }
2216
2217 target_ulong helper_evp(CPUMIPSState *env)
2218 {
2219 CPUState *other_cs = first_cpu;
2220 target_ulong prev = env->CP0_VPControl;
2221
2222 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
2223 CPU_FOREACH(other_cs) {
2224 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2225 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
2226 /* If the VP is WFI, don't disturb its sleep.
2227 * Otherwise, wake it up. */
2228 mips_vpe_wake(other_cpu);
2229 }
2230 }
2231 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
2232 }
2233 return prev;
2234 }
2235 #endif /* !CONFIG_USER_ONLY */
2236
2237 #ifndef CONFIG_USER_ONLY
2238 /* TLB management */
2239 static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
2240 {
2241 /* Discard entries from env->tlb[first] onwards. */
2242 while (env->tlb->tlb_in_use > first) {
2243 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
2244 }
2245 }
2246
2247 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
2248 {
2249 #if defined(TARGET_MIPS64)
2250 return extract64(entrylo, 6, 54);
2251 #else
2252 return extract64(entrylo, 6, 24) | /* PFN */
2253 (extract64(entrylo, 32, 32) << 24); /* PFNX */
2254 #endif
2255 }
2256
2257 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
2258 {
2259 r4k_tlb_t *tlb;
2260 uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
2261
2262 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2263 tlb = &env->tlb->mmu.r4k.tlb[idx];
2264 if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
2265 tlb->EHINV = 1;
2266 return;
2267 }
2268 tlb->EHINV = 0;
2269 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2270 #if defined(TARGET_MIPS64)
2271 tlb->VPN &= env->SEGMask;
2272 #endif
2273 tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2274 tlb->PageMask = env->CP0_PageMask;
2275 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2276 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2277 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2278 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2279 tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
2280 tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
2281 tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
2282 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2283 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2284 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2285 tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
2286 tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
2287 tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
2288 }
2289
2290 void r4k_helper_tlbinv(CPUMIPSState *env)
2291 {
2292 int idx;
2293 r4k_tlb_t *tlb;
2294 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2295
2296 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
2297 tlb = &env->tlb->mmu.r4k.tlb[idx];
2298 if (!tlb->G && tlb->ASID == ASID) {
2299 tlb->EHINV = 1;
2300 }
2301 }
2302 cpu_mips_tlb_flush(env);
2303 }
2304
2305 void r4k_helper_tlbinvf(CPUMIPSState *env)
2306 {
2307 int idx;
2308
2309 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
2310 env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
2311 }
2312 cpu_mips_tlb_flush(env);
2313 }
2314
2315 void r4k_helper_tlbwi(CPUMIPSState *env)
2316 {
2317 r4k_tlb_t *tlb;
2318 int idx;
2319 target_ulong VPN;
2320 uint16_t ASID;
2321 bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
2322
2323 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2324 tlb = &env->tlb->mmu.r4k.tlb[idx];
2325 VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2326 #if defined(TARGET_MIPS64)
2327 VPN &= env->SEGMask;
2328 #endif
2329 ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2330 EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
2331 G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2332 V0 = (env->CP0_EntryLo0 & 2) != 0;
2333 D0 = (env->CP0_EntryLo0 & 4) != 0;
2334 XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
2335 RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
2336 V1 = (env->CP0_EntryLo1 & 2) != 0;
2337 D1 = (env->CP0_EntryLo1 & 4) != 0;
2338 XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
2339 RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
2340
2341 /* Discard cached TLB entries, unless tlbwi is just upgrading access
2342 permissions on the current entry. */
2343 if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G ||
2344 (!tlb->EHINV && EHINV) ||
2345 (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
2346 (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
2347 (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
2348 (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
2349 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2350 }
2351
2352 r4k_invalidate_tlb(env, idx, 0);
2353 r4k_fill_tlb(env, idx);
2354 }
2355
2356 void r4k_helper_tlbwr(CPUMIPSState *env)
2357 {
2358 int r = cpu_mips_get_random(env);
2359
2360 r4k_invalidate_tlb(env, r, 1);
2361 r4k_fill_tlb(env, r);
2362 }
2363
2364 void r4k_helper_tlbp(CPUMIPSState *env)
2365 {
2366 r4k_tlb_t *tlb;
2367 target_ulong mask;
2368 target_ulong tag;
2369 target_ulong VPN;
2370 uint16_t ASID;
2371 int i;
2372
2373 ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2374 for (i = 0; i < env->tlb->nb_tlb; i++) {
2375 tlb = &env->tlb->mmu.r4k.tlb[i];
2376 /* 1k pages are not supported. */
2377 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2378 tag = env->CP0_EntryHi & ~mask;
2379 VPN = tlb->VPN & ~mask;
2380 #if defined(TARGET_MIPS64)
2381 tag &= env->SEGMask;
2382 #endif
2383 /* Check ASID, virtual page number & size */
2384 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
2385 /* TLB match */
2386 env->CP0_Index = i;
2387 break;
2388 }
2389 }
2390 if (i == env->tlb->nb_tlb) {
2391 /* No match. Discard any shadow entries, if any of them match. */
2392 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2393 tlb = &env->tlb->mmu.r4k.tlb[i];
2394 /* 1k pages are not supported. */
2395 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2396 tag = env->CP0_EntryHi & ~mask;
2397 VPN = tlb->VPN & ~mask;
2398 #if defined(TARGET_MIPS64)
2399 tag &= env->SEGMask;
2400 #endif
2401 /* Check ASID, virtual page number & size */
2402 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2403 r4k_mips_tlb_flush_extra (env, i);
2404 break;
2405 }
2406 }
2407
2408 env->CP0_Index |= 0x80000000;
2409 }
2410 }
2411
2412 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
2413 {
2414 #if defined(TARGET_MIPS64)
2415 return tlb_pfn << 6;
2416 #else
2417 return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
2418 (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
2419 #endif
2420 }
2421
2422 void r4k_helper_tlbr(CPUMIPSState *env)
2423 {
2424 r4k_tlb_t *tlb;
2425 uint16_t ASID;
2426 int idx;
2427
2428 ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2429 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2430 tlb = &env->tlb->mmu.r4k.tlb[idx];
2431
2432 /* If this will change the current ASID, flush qemu's TLB. */
2433 if (ASID != tlb->ASID)
2434 cpu_mips_tlb_flush(env);
2435
2436 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2437
2438 if (tlb->EHINV) {
2439 env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
2440 env->CP0_PageMask = 0;
2441 env->CP0_EntryLo0 = 0;
2442 env->CP0_EntryLo1 = 0;
2443 } else {
2444 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2445 env->CP0_PageMask = tlb->PageMask;
2446 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2447 ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
2448 ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
2449 get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
2450 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2451 ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
2452 ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
2453 get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
2454 }
2455 }
2456
2457 void helper_tlbwi(CPUMIPSState *env)
2458 {
2459 env->tlb->helper_tlbwi(env);
2460 }
2461
2462 void helper_tlbwr(CPUMIPSState *env)
2463 {
2464 env->tlb->helper_tlbwr(env);
2465 }
2466
2467 void helper_tlbp(CPUMIPSState *env)
2468 {
2469 env->tlb->helper_tlbp(env);
2470 }
2471
2472 void helper_tlbr(CPUMIPSState *env)
2473 {
2474 env->tlb->helper_tlbr(env);
2475 }
2476
2477 void helper_tlbinv(CPUMIPSState *env)
2478 {
2479 env->tlb->helper_tlbinv(env);
2480 }
2481
2482 void helper_tlbinvf(CPUMIPSState *env)
2483 {
2484 env->tlb->helper_tlbinvf(env);
2485 }
2486
2487 /* Specials */
2488 target_ulong helper_di(CPUMIPSState *env)
2489 {
2490 target_ulong t0 = env->CP0_Status;
2491
2492 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2493 return t0;
2494 }
2495
2496 target_ulong helper_ei(CPUMIPSState *env)
2497 {
2498 target_ulong t0 = env->CP0_Status;
2499
2500 env->CP0_Status = t0 | (1 << CP0St_IE);
2501 return t0;
2502 }
2503
2504 static void debug_pre_eret(CPUMIPSState *env)
2505 {
2506 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2507 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2508 env->active_tc.PC, env->CP0_EPC);
2509 if (env->CP0_Status & (1 << CP0St_ERL))
2510 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2511 if (env->hflags & MIPS_HFLAG_DM)
2512 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2513 qemu_log("\n");
2514 }
2515 }
2516
2517 static void debug_post_eret(CPUMIPSState *env)
2518 {
2519 MIPSCPU *cpu = mips_env_get_cpu(env);
2520
2521 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2522 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2523 env->active_tc.PC, env->CP0_EPC);
2524 if (env->CP0_Status & (1 << CP0St_ERL))
2525 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2526 if (env->hflags & MIPS_HFLAG_DM)
2527 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2528 switch (cpu_mmu_index(env, false)) {
2529 case 3:
2530 qemu_log(", ERL\n");
2531 break;
2532 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2533 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2534 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2535 default:
2536 cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
2537 break;
2538 }
2539 }
2540 }
2541
2542 static void set_pc(CPUMIPSState *env, target_ulong error_pc)
2543 {
2544 env->active_tc.PC = error_pc & ~(target_ulong)1;
2545 if (error_pc & 1) {
2546 env->hflags |= MIPS_HFLAG_M16;
2547 } else {
2548 env->hflags &= ~(MIPS_HFLAG_M16);
2549 }
2550 }
2551
2552 static inline void exception_return(CPUMIPSState *env)
2553 {
2554 debug_pre_eret(env);
2555 if (env->CP0_Status & (1 << CP0St_ERL)) {
2556 set_pc(env, env->CP0_ErrorEPC);
2557 env->CP0_Status &= ~(1 << CP0St_ERL);
2558 } else {
2559 set_pc(env, env->CP0_EPC);
2560 env->CP0_Status &= ~(1 << CP0St_EXL);
2561 }
2562 compute_hflags(env);
2563 debug_post_eret(env);
2564 }
2565
2566 void helper_eret(CPUMIPSState *env)
2567 {
2568 exception_return(env);
2569 env->lladdr = 1;
2570 }
2571
2572 void helper_eretnc(CPUMIPSState *env)
2573 {
2574 exception_return(env);
2575 }
2576
2577 void helper_deret(CPUMIPSState *env)
2578 {
2579 debug_pre_eret(env);
2580
2581 env->hflags &= ~MIPS_HFLAG_DM;
2582 compute_hflags(env);
2583
2584 set_pc(env, env->CP0_DEPC);
2585
2586 debug_post_eret(env);
2587 }
2588 #endif /* !CONFIG_USER_ONLY */
2589
2590 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
2591 {
2592 if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
2593 return;
2594 }
2595 do_raise_exception(env, EXCP_RI, pc);
2596 }
2597
2598 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
2599 {
2600 check_hwrena(env, 0, GETPC());
2601 return env->CP0_EBase & 0x3ff;
2602 }
2603
2604 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
2605 {
2606 check_hwrena(env, 1, GETPC());
2607 return env->SYNCI_Step;
2608 }
2609
2610 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
2611 {
2612 int32_t count;
2613 check_hwrena(env, 2, GETPC());
2614 #ifdef CONFIG_USER_ONLY
2615 count = env->CP0_Count;
2616 #else
2617 qemu_mutex_lock_iothread();
2618 count = (int32_t)cpu_mips_get_count(env);
2619 qemu_mutex_unlock_iothread();
2620 #endif
2621 return count;
2622 }
2623
2624 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
2625 {
2626 check_hwrena(env, 3, GETPC());
2627 return env->CCRes;
2628 }
2629
2630 target_ulong helper_rdhwr_performance(CPUMIPSState *env)
2631 {
2632 check_hwrena(env, 4, GETPC());
2633 return env->CP0_Performance0;
2634 }
2635
2636 target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
2637 {
2638 check_hwrena(env, 5, GETPC());
2639 return (env->CP0_Config5 >> CP0C5_XNP) & 1;
2640 }
2641
2642 void helper_pmon(CPUMIPSState *env, int function)
2643 {
2644 function /= 2;
2645 switch (function) {
2646 case 2: /* TODO: char inbyte(int waitflag); */
2647 if (env->active_tc.gpr[4] == 0)
2648 env->active_tc.gpr[2] = -1;
2649 /* Fall through */
2650 case 11: /* TODO: char inbyte (void); */
2651 env->active_tc.gpr[2] = -1;
2652 break;
2653 case 3:
2654 case 12:
2655 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2656 break;
2657 case 17:
2658 break;
2659 case 158:
2660 {
2661 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2662 printf("%s", fmt);
2663 }
2664 break;
2665 }
2666 }
2667
2668 void helper_wait(CPUMIPSState *env)
2669 {
2670 CPUState *cs = CPU(mips_env_get_cpu(env));
2671
2672 cs->halted = 1;
2673 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
2674 /* Last instruction in the block, PC was updated before
2675 - no need to recover PC and icount */
2676 raise_exception(env, EXCP_HLT);
2677 }
2678
2679 #if !defined(CONFIG_USER_ONLY)
2680
2681 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
2682 MMUAccessType access_type,
2683 int mmu_idx, uintptr_t retaddr)
2684 {
2685 MIPSCPU *cpu = MIPS_CPU(cs);
2686 CPUMIPSState *env = &cpu->env;
2687 int error_code = 0;
2688 int excp;
2689
2690 if (!(env->hflags & MIPS_HFLAG_DM)) {
2691 env->CP0_BadVAddr = addr;
2692 }
2693
2694 if (access_type == MMU_DATA_STORE) {
2695 excp = EXCP_AdES;
2696 } else {
2697 excp = EXCP_AdEL;
2698 if (access_type == MMU_INST_FETCH) {
2699 error_code |= EXCP_INST_NOTAVAIL;
2700 }
2701 }
2702
2703 do_raise_exception_err(env, excp, error_code, retaddr);
2704 }
2705
2706 void tlb_fill(CPUState *cs, target_ulong addr, int size,
2707 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
2708 {
2709 int ret;
2710
2711 ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
2712 if (ret) {
2713 MIPSCPU *cpu = MIPS_CPU(cs);
2714 CPUMIPSState *env = &cpu->env;
2715
2716 do_raise_exception_err(env, cs->exception_index,
2717 env->error_code, retaddr);
2718 }
2719 }
2720
2721 void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
2722 bool is_write, bool is_exec, int unused,
2723 unsigned size)
2724 {
2725 MIPSCPU *cpu = MIPS_CPU(cs);
2726 CPUMIPSState *env = &cpu->env;
2727
2728 /*
2729 * Raising an exception with KVM enabled will crash because it won't be from
2730 * the main execution loop so the longjmp won't have a matching setjmp.
2731 * Until we can trigger a bus error exception through KVM lets just ignore
2732 * the access.
2733 */
2734 if (kvm_enabled()) {
2735 return;
2736 }
2737
2738 if (is_exec) {
2739 raise_exception(env, EXCP_IBE);
2740 } else {
2741 raise_exception(env, EXCP_DBE);
2742 }
2743 }
2744 #endif /* !CONFIG_USER_ONLY */
2745
2746 /* Complex FPU operations which may need stack space. */
2747
2748 #define FLOAT_TWO32 make_float32(1 << 30)
2749 #define FLOAT_TWO64 make_float64(1ULL << 62)
2750
2751 #define FP_TO_INT32_OVERFLOW 0x7fffffff
2752 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
2753
2754 /* convert MIPS rounding mode in FCR31 to IEEE library */
2755 unsigned int ieee_rm[] = {
2756 float_round_nearest_even,
2757 float_round_to_zero,
2758 float_round_up,
2759 float_round_down
2760 };
2761
2762 target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
2763 {
2764 target_ulong arg1 = 0;
2765
2766 switch (reg) {
2767 case 0:
2768 arg1 = (int32_t)env->active_fpu.fcr0;
2769 break;
2770 case 1:
2771 /* UFR Support - Read Status FR */
2772 if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) {
2773 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2774 arg1 = (int32_t)
2775 ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR);
2776 } else {
2777 do_raise_exception(env, EXCP_RI, GETPC());
2778 }
2779 }
2780 break;
2781 case 5:
2782 /* FRE Support - read Config5.FRE bit */
2783 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
2784 if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
2785 arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1;
2786 } else {
2787 helper_raise_exception(env, EXCP_RI);
2788 }
2789 }
2790 break;
2791 case 25:
2792 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2793 break;
2794 case 26:
2795 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2796 break;
2797 case 28:
2798 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2799 break;
2800 default:
2801 arg1 = (int32_t)env->active_fpu.fcr31;
2802 break;
2803 }
2804
2805 return arg1;
2806 }
2807
2808 void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt)
2809 {
2810 switch (fs) {
2811 case 1:
2812 /* UFR Alias - Reset Status FR */
2813 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
2814 return;
2815 }
2816 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2817 env->CP0_Status &= ~(1 << CP0St_FR);
2818 compute_hflags(env);
2819 } else {
2820 do_raise_exception(env, EXCP_RI, GETPC());
2821 }
2822 break;
2823 case 4:
2824 /* UNFR Alias - Set Status FR */
2825 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
2826 return;
2827 }
2828 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2829 env->CP0_Status |= (1 << CP0St_FR);
2830 compute_hflags(env);
2831 } else {
2832 do_raise_exception(env, EXCP_RI, GETPC());
2833 }
2834 break;
2835 case 5:
2836 /* FRE Support - clear Config5.FRE bit */
2837 if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
2838 return;
2839 }
2840 if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
2841 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
2842 compute_hflags(env);
2843 } else {
2844 helper_raise_exception(env, EXCP_RI);
2845 }
2846 break;
2847 case 6:
2848 /* FRE Support - set Config5.FRE bit */
2849 if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
2850 return;
2851 }
2852 if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
2853 env->CP0_Config5 |= (1 << CP0C5_FRE);
2854 compute_hflags(env);
2855 } else {
2856 helper_raise_exception(env, EXCP_RI);
2857 }
2858 break;
2859 case 25:
2860 if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) {
2861 return;
2862 }
2863 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2864 ((arg1 & 0x1) << 23);
2865 break;
2866 case 26:
2867 if (arg1 & 0x007c0000)
2868 return;
2869 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2870 break;
2871 case 28:
2872 if (arg1 & 0x007c0000)
2873 return;
2874 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2875 ((arg1 & 0x4) << 22);
2876 break;
2877 case 31:
2878 env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) |
2879 (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
2880 break;
2881 default:
2882 if (env->insn_flags & ISA_MIPS32R6) {
2883 do_raise_exception(env, EXCP_RI, GETPC());
2884 }
2885 return;
2886 }
2887 restore_fp_status(env);
2888 set_float_exception_flags(0, &env->active_fpu.fp_status);
2889 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2890 do_raise_exception(env, EXCP_FPE, GETPC());
2891 }
2892
2893 int ieee_ex_to_mips(int xcpt)
2894 {
2895 int ret = 0;
2896 if (xcpt) {
2897 if (xcpt & float_flag_invalid) {
2898 ret |= FP_INVALID;
2899 }
2900 if (xcpt & float_flag_overflow) {
2901 ret |= FP_OVERFLOW;
2902 }
2903 if (xcpt & float_flag_underflow) {
2904 ret |= FP_UNDERFLOW;
2905 }
2906 if (xcpt & float_flag_divbyzero) {
2907 ret |= FP_DIV0;
2908 }
2909 if (xcpt & float_flag_inexact) {
2910 ret |= FP_INEXACT;
2911 }
2912 }
2913 return ret;
2914 }
2915
2916 static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
2917 {
2918 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2919
2920 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2921
2922 if (tmp) {
2923 set_float_exception_flags(0, &env->active_fpu.fp_status);
2924
2925 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) {
2926 do_raise_exception(env, EXCP_FPE, pc);
2927 } else {
2928 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2929 }
2930 }
2931 }
2932
2933 /* Float support.
2934 Single precition routines have a "s" suffix, double precision a
2935 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2936 paired single lower "pl", paired single upper "pu". */
2937
2938 /* unary operations, modifying fp status */
2939 uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
2940 {
2941 fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2942 update_fcr31(env, GETPC());
2943 return fdt0;
2944 }
2945
2946 uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
2947 {
2948 fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2949 update_fcr31(env, GETPC());
2950 return fst0;
2951 }
2952
2953 uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
2954 {
2955 uint64_t fdt2;
2956
2957 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2958 update_fcr31(env, GETPC());
2959 return fdt2;
2960 }
2961
2962 uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
2963 {
2964 uint64_t fdt2;
2965
2966 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2967 update_fcr31(env, GETPC());
2968 return fdt2;
2969 }
2970
2971 uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
2972 {
2973 uint64_t fdt2;
2974
2975 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2976 update_fcr31(env, GETPC());
2977 return fdt2;
2978 }
2979
2980 uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0)
2981 {
2982 uint64_t dt2;
2983
2984 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2985 if (get_float_exception_flags(&env->active_fpu.fp_status)
2986 & (float_flag_invalid | float_flag_overflow)) {
2987 dt2 = FP_TO_INT64_OVERFLOW;
2988 }
2989 update_fcr31(env, GETPC());
2990 return dt2;
2991 }
2992
2993 uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0)
2994 {
2995 uint64_t dt2;
2996
2997 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2998 if (get_float_exception_flags(&env->active_fpu.fp_status)
2999 & (float_flag_invalid | float_flag_overflow)) {
3000 dt2 = FP_TO_INT64_OVERFLOW;
3001 }
3002 update_fcr31(env, GETPC());
3003 return dt2;
3004 }
3005
3006 uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
3007 {
3008 uint32_t fst2;
3009 uint32_t fsth2;
3010
3011 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3012 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
3013 update_fcr31(env, GETPC());
3014 return ((uint64_t)fsth2 << 32) | fst2;
3015 }
3016
3017 uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
3018 {
3019 uint32_t wt2;
3020 uint32_t wth2;
3021 int excp, excph;
3022
3023 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3024 excp = get_float_exception_flags(&env->active_fpu.fp_status);
3025 if (excp & (float_flag_overflow | float_flag_invalid)) {
3026 wt2 = FP_TO_INT32_OVERFLOW;
3027 }
3028
3029 set_float_exception_flags(0, &env->active_fpu.fp_status);
3030 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
3031 excph = get_float_exception_flags(&env->active_fpu.fp_status);
3032 if (excph & (float_flag_overflow | float_flag_invalid)) {
3033 wth2 = FP_TO_INT32_OVERFLOW;
3034 }
3035
3036 set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
3037 update_fcr31(env, GETPC());
3038
3039 return ((uint64_t)wth2 << 32) | wt2;
3040 }
3041
3042 uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
3043 {
3044 uint32_t fst2;
3045
3046 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
3047 update_fcr31(env, GETPC());
3048 return fst2;
3049 }
3050
3051 uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
3052 {
3053 uint32_t fst2;
3054
3055 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
3056 update_fcr31(env, GETPC());
3057 return fst2;
3058 }
3059
3060 uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
3061 {
3062 uint32_t fst2;
3063
3064 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
3065 update_fcr31(env, GETPC());
3066 return fst2;
3067 }
3068
3069 uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
3070 {
3071 uint32_t wt2;
3072
3073 wt2 = wt0;
3074 update_fcr31(env, GETPC());
3075 return wt2;
3076 }
3077
3078 uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
3079 {
3080 uint32_t wt2;
3081
3082 wt2 = wth0;
3083 update_fcr31(env, GETPC());
3084 return wt2;
3085 }
3086
3087 uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0)
3088 {
3089 uint32_t wt2;
3090
3091 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3092 if (get_float_exception_flags(&env->active_fpu.fp_status)
3093 & (float_flag_invalid | float_flag_overflow)) {
3094 wt2 = FP_TO_INT32_OVERFLOW;
3095 }
3096 update_fcr31(env, GETPC());
3097 return wt2;
3098 }
3099
3100 uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0)
3101 {
3102 uint32_t wt2;
3103
3104 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3105 if (get_float_exception_flags(&env->active_fpu.fp_status)
3106 & (float_flag_invalid | float_flag_overflow)) {
3107 wt2 = FP_TO_INT32_OVERFLOW;
3108 }
3109 update_fcr31(env, GETPC());
3110 return wt2;
3111 }
3112
3113 uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0)
3114 {
3115 uint64_t dt2;
3116
3117 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
3118 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3119 restore_rounding_mode(env);
3120 if (get_float_exception_flags(&env->active_fpu.fp_status)
3121 & (float_flag_invalid | float_flag_overflow)) {
3122 dt2 = FP_TO_INT64_OVERFLOW;
3123 }
3124 update_fcr31(env, GETPC());
3125 return dt2;
3126 }
3127
3128 uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0)
3129 {
3130 uint64_t dt2;
3131
3132 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
3133 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3134 restore_rounding_mode(env);
3135 if (get_float_exception_flags(&env->active_fpu.fp_status)
3136 & (float_flag_invalid | float_flag_overflow)) {
3137 dt2 = FP_TO_INT64_OVERFLOW;
3138 }
3139 update_fcr31(env, GETPC());
3140 return dt2;
3141 }
3142
3143 uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0)
3144 {
3145 uint32_t wt2;
3146
3147 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
3148 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3149 restore_rounding_mode(env);
3150 if (get_float_exception_flags(&env->active_fpu.fp_status)
3151 & (float_flag_invalid | float_flag_overflow)) {
3152 wt2 = FP_TO_INT32_OVERFLOW;
3153 }
3154 update_fcr31(env, GETPC());
3155 return wt2;
3156 }
3157
3158 uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0)
3159 {
3160 uint32_t wt2;
3161
3162 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
3163 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3164 restore_rounding_mode(env);
3165 if (get_float_exception_flags(&env->active_fpu.fp_status)
3166 & (float_flag_invalid | float_flag_overflow)) {
3167 wt2 = FP_TO_INT32_OVERFLOW;
3168 }
3169 update_fcr31(env, GETPC());
3170 return wt2;
3171 }
3172
3173 uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0)
3174 {
3175 uint64_t dt2;
3176
3177 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
3178 if (get_float_exception_flags(&env->active_fpu.fp_status)
3179 & (float_flag_invalid | float_flag_overflow)) {
3180 dt2 = FP_TO_INT64_OVERFLOW;
3181 }
3182 update_fcr31(env, GETPC());
3183 return dt2;
3184 }
3185
3186 uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0)
3187 {
3188 uint64_t dt2;
3189
3190 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
3191 if (get_float_exception_flags(&env->active_fpu.fp_status)
3192 & (float_flag_invalid | float_flag_overflow)) {
3193 dt2 = FP_TO_INT64_OVERFLOW;
3194 }
3195 update_fcr31(env, GETPC());
3196 return dt2;
3197 }
3198
3199 uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0)
3200 {
3201 uint32_t wt2;
3202
3203 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
3204 if (get_float_exception_flags(&env->active_fpu.fp_status)
3205 & (float_flag_invalid | float_flag_overflow)) {
3206 wt2 = FP_TO_INT32_OVERFLOW;
3207 }
3208 update_fcr31(env, GETPC());
3209 return wt2;
3210 }
3211
3212 uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0)
3213 {
3214 uint32_t wt2;
3215
3216 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
3217 if (get_float_exception_flags(&env->active_fpu.fp_status)
3218 & (float_flag_invalid | float_flag_overflow)) {
3219 wt2 = FP_TO_INT32_OVERFLOW;
3220 }
3221 update_fcr31(env, GETPC());
3222 return wt2;
3223 }
3224
3225 uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0)
3226 {
3227 uint64_t dt2;
3228
3229 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3230 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3231 restore_rounding_mode(env);
3232 if (get_float_exception_flags(&env->active_fpu.fp_status)
3233 & (float_flag_invalid | float_flag_overflow)) {
3234 dt2 = FP_TO_INT64_OVERFLOW;
3235 }
3236 update_fcr31(env, GETPC());
3237 return dt2;
3238 }
3239
3240 uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0)
3241 {
3242 uint64_t dt2;
3243
3244 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3245 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3246 restore_rounding_mode(env);
3247 if (get_float_exception_flags(&env->active_fpu.fp_status)
3248 & (float_flag_invalid | float_flag_overflow)) {
3249 dt2 = FP_TO_INT64_OVERFLOW;
3250 }
3251 update_fcr31(env, GETPC());
3252 return dt2;
3253 }
3254
3255 uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0)
3256 {
3257 uint32_t wt2;
3258
3259 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3260 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3261 restore_rounding_mode(env);
3262 if (get_float_exception_flags(&env->active_fpu.fp_status)
3263 & (float_flag_invalid | float_flag_overflow)) {
3264 wt2 = FP_TO_INT32_OVERFLOW;
3265 }
3266 update_fcr31(env, GETPC());
3267 return wt2;
3268 }
3269
3270 uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0)
3271 {
3272 uint32_t wt2;
3273
3274 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3275 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3276 restore_rounding_mode(env);
3277 if (get_float_exception_flags(&env->active_fpu.fp_status)
3278 & (float_flag_invalid | float_flag_overflow)) {
3279 wt2 = FP_TO_INT32_OVERFLOW;
3280 }
3281 update_fcr31(env, GETPC());
3282 return wt2;
3283 }
3284
3285 uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0)
3286 {
3287 uint64_t dt2;
3288
3289 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3290 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3291 restore_rounding_mode(env);
3292 if (get_float_exception_flags(&env->active_fpu.fp_status)
3293 & (float_flag_invalid | float_flag_overflow)) {
3294 dt2 = FP_TO_INT64_OVERFLOW;
3295 }
3296 update_fcr31(env, GETPC());
3297 return dt2;
3298 }
3299
3300 uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0)
3301 {
3302 uint64_t dt2;
3303
3304 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3305 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3306 restore_rounding_mode(env);
3307 if (get_float_exception_flags(&env->active_fpu.fp_status)
3308 & (float_flag_invalid | float_flag_overflow)) {
3309 dt2 = FP_TO_INT64_OVERFLOW;
3310 }
3311 update_fcr31(env, GETPC());
3312 return dt2;
3313 }
3314
3315 uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0)
3316 {
3317 uint32_t wt2;
3318
3319 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3320 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3321 restore_rounding_mode(env);
3322 if (get_float_exception_flags(&env->active_fpu.fp_status)
3323 & (float_flag_invalid | float_flag_overflow)) {
3324 wt2 = FP_TO_INT32_OVERFLOW;
3325 }
3326 update_fcr31(env, GETPC());
3327 return wt2;
3328 }
3329
3330 uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0)
3331 {
3332 uint32_t wt2;
3333
3334 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3335 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3336 restore_rounding_mode(env);
3337 if (get_float_exception_flags(&env->active_fpu.fp_status)
3338 & (float_flag_invalid | float_flag_overflow)) {
3339 wt2 = FP_TO_INT32_OVERFLOW;
3340 }
3341 update_fcr31(env, GETPC());
3342 return wt2;
3343 }
3344
3345 uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3346 {
3347 uint64_t dt2;
3348
3349 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3350 if (get_float_exception_flags(&env->active_fpu.fp_status)
3351 & float_flag_invalid) {
3352 if (float64_is_any_nan(fdt0)) {
3353 dt2 = 0;
3354 }
3355 }
3356 update_fcr31(env, GETPC());
3357 return dt2;
3358 }
3359
3360 uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3361 {
3362 uint64_t dt2;
3363
3364 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3365 if (get_float_exception_flags(&env->active_fpu.fp_status)
3366 & float_flag_invalid) {
3367 if (float32_is_any_nan(fst0)) {
3368 dt2 = 0;
3369 }
3370 }
3371 update_fcr31(env, GETPC());
3372 return dt2;
3373 }
3374
3375 uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3376 {
3377 uint32_t wt2;
3378
3379 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3380 if (get_float_exception_flags(&env->active_fpu.fp_status)
3381 & float_flag_invalid) {
3382 if (float64_is_any_nan(fdt0)) {
3383 wt2 = 0;
3384 }
3385 }
3386 update_fcr31(env, GETPC());
3387 return wt2;
3388 }
3389
3390 uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3391 {
3392 uint32_t wt2;
3393
3394 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3395 if (get_float_exception_flags(&env->active_fpu.fp_status)
3396 & float_flag_invalid) {
3397 if (float32_is_any_nan(fst0)) {
3398 wt2 = 0;
3399 }
3400 }
3401 update_fcr31(env, GETPC());
3402 return wt2;
3403 }
3404
3405 uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3406 {
3407 uint64_t dt2;
3408
3409 set_float_rounding_mode(float_round_nearest_even,
3410 &env->active_fpu.fp_status);
3411 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3412 restore_rounding_mode(env);
3413 if (get_float_exception_flags(&env->active_fpu.fp_status)
3414 & float_flag_invalid) {
3415 if (float64_is_any_nan(fdt0)) {
3416 dt2 = 0;
3417 }
3418 }
3419 update_fcr31(env, GETPC());
3420 return dt2;
3421 }
3422
3423 uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3424 {
3425 uint64_t dt2;
3426
3427 set_float_rounding_mode(float_round_nearest_even,
3428 &env->active_fpu.fp_status);
3429 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3430 restore_rounding_mode(env);
3431 if (get_float_exception_flags(&env->active_fpu.fp_status)
3432 & float_flag_invalid) {
3433 if (float32_is_any_nan(fst0)) {
3434 dt2 = 0;
3435 }
3436 }
3437 update_fcr31(env, GETPC());
3438 return dt2;
3439 }
3440
3441 uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3442 {
3443 uint32_t wt2;
3444
3445 set_float_rounding_mode(float_round_nearest_even,
3446 &env->active_fpu.fp_status);
3447 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3448 restore_rounding_mode(env);
3449 if (get_float_exception_flags(&env->active_fpu.fp_status)
3450 & float_flag_invalid) {
3451 if (float64_is_any_nan(fdt0)) {
3452 wt2 = 0;
3453 }
3454 }
3455 update_fcr31(env, GETPC());
3456 return wt2;
3457 }
3458
3459 uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3460 {
3461 uint32_t wt2;
3462
3463 set_float_rounding_mode(float_round_nearest_even,
3464 &env->active_fpu.fp_status);
3465 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3466 restore_rounding_mode(env);
3467 if (get_float_exception_flags(&env->active_fpu.fp_status)
3468 & float_flag_invalid) {
3469 if (float32_is_any_nan(fst0)) {
3470 wt2 = 0;
3471 }
3472 }
3473 update_fcr31(env, GETPC());
3474 return wt2;
3475 }
3476
3477 uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3478 {
3479 uint64_t dt2;
3480
3481 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
3482 if (get_float_exception_flags(&env->active_fpu.fp_status)
3483 & float_flag_invalid) {
3484 if (float64_is_any_nan(fdt0)) {
3485 dt2 = 0;
3486 }
3487 }
3488 update_fcr31(env, GETPC());
3489 return dt2;
3490 }
3491
3492 uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3493 {
3494 uint64_t dt2;
3495
3496 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
3497 if (get_float_exception_flags(&env->active_fpu.fp_status)
3498 & float_flag_invalid) {
3499 if (float32_is_any_nan(fst0)) {
3500 dt2 = 0;
3501 }
3502 }
3503 update_fcr31(env, GETPC());
3504 return dt2;
3505 }
3506
3507 uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3508 {
3509 uint32_t wt2;
3510
3511 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
3512 if (get_float_exception_flags(&env->active_fpu.fp_status)
3513 & float_flag_invalid) {
3514 if (float64_is_any_nan(fdt0)) {
3515 wt2 = 0;
3516 }
3517 }
3518 update_fcr31(env, GETPC());
3519 return wt2;
3520 }
3521
3522 uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3523 {
3524 uint32_t wt2;
3525
3526 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
3527 if (get_float_exception_flags(&env->active_fpu.fp_status)
3528 & float_flag_invalid) {
3529 if (float32_is_any_nan(fst0)) {
3530 wt2 = 0;
3531 }
3532 }
3533 update_fcr31(env, GETPC());
3534 return wt2;
3535 }
3536
3537 uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3538 {
3539 uint64_t dt2;
3540
3541 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3542 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3543 restore_rounding_mode(env);
3544 if (get_float_exception_flags(&env->active_fpu.fp_status)
3545 & float_flag_invalid) {
3546 if (float64_is_any_nan(fdt0)) {
3547 dt2 = 0;
3548 }
3549 }
3550 update_fcr31(env, GETPC());
3551 return dt2;
3552 }
3553
3554 uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3555 {
3556 uint64_t dt2;
3557
3558 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3559 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3560 restore_rounding_mode(env);
3561 if (get_float_exception_flags(&env->active_fpu.fp_status)
3562 & float_flag_invalid) {
3563 if (float32_is_any_nan(fst0)) {
3564 dt2 = 0;
3565 }
3566 }
3567 update_fcr31(env, GETPC());
3568 return dt2;
3569 }
3570
3571 uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3572 {
3573 uint32_t wt2;
3574
3575 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3576 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3577 restore_rounding_mode(env);
3578 if (get_float_exception_flags(&env->active_fpu.fp_status)
3579 & float_flag_invalid) {
3580 if (float64_is_any_nan(fdt0)) {
3581 wt2 = 0;
3582 }
3583 }
3584 update_fcr31(env, GETPC());
3585 return wt2;
3586 }
3587
3588 uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3589 {
3590 uint32_t wt2;
3591
3592 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3593 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3594 restore_rounding_mode(env);
3595 if (get_float_exception_flags(&env->active_fpu.fp_status)
3596 & float_flag_invalid) {
3597 if (float32_is_any_nan(fst0)) {
3598 wt2 = 0;
3599 }
3600 }
3601 update_fcr31(env, GETPC());
3602 return wt2;
3603 }
3604
3605 uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3606 {
3607 uint64_t dt2;
3608
3609 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3610 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3611 restore_rounding_mode(env);
3612 if (get_float_exception_flags(&env->active_fpu.fp_status)
3613 & float_flag_invalid) {
3614 if (float64_is_any_nan(fdt0)) {
3615 dt2 = 0;
3616 }
3617 }
3618 update_fcr31(env, GETPC());
3619 return dt2;
3620 }
3621
3622 uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3623 {
3624 uint64_t dt2;
3625
3626 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3627 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3628 restore_rounding_mode(env);
3629 if (get_float_exception_flags(&env->active_fpu.fp_status)
3630 & float_flag_invalid) {
3631 if (float32_is_any_nan(fst0)) {
3632 dt2 = 0;
3633 }
3634 }
3635 update_fcr31(env, GETPC());
3636 return dt2;
3637 }
3638
3639 uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3640 {
3641 uint32_t wt2;
3642
3643 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3644 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3645 restore_rounding_mode(env);
3646 if (get_float_exception_flags(&env->active_fpu.fp_status)
3647 & float_flag_invalid) {
3648 if (float64_is_any_nan(fdt0)) {
3649 wt2 = 0;
3650 }
3651 }
3652 update_fcr31(env, GETPC());
3653 return wt2;
3654 }
3655
3656 uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3657 {
3658 uint32_t wt2;
3659
3660 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3661 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3662 restore_rounding_mode(env);
3663 if (get_float_exception_flags(&env->active_fpu.fp_status)
3664 & float_flag_invalid) {
3665 if (float32_is_any_nan(fst0)) {
3666 wt2 = 0;
3667 }
3668 }
3669 update_fcr31(env, GETPC());
3670 return wt2;
3671 }
3672
3673 /* unary operations, not modifying fp status */
3674 #define FLOAT_UNOP(name) \
3675 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
3676 { \
3677 return float64_ ## name(fdt0); \
3678 } \
3679 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
3680 { \
3681 return float32_ ## name(fst0); \
3682 } \
3683 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
3684 { \
3685 uint32_t wt0; \
3686 uint32_t wth0; \
3687 \
3688 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
3689 wth0 = float32_ ## name(fdt0 >> 32); \
3690 return ((uint64_t)wth0 << 32) | wt0; \
3691 }
3692 FLOAT_UNOP(abs)
3693 FLOAT_UNOP(chs)
3694 #undef FLOAT_UNOP
3695
3696 /* MIPS specific unary operations */
3697 uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
3698 {
3699 uint64_t fdt2;
3700
3701 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
3702 update_fcr31(env, GETPC());
3703 return fdt2;
3704 }
3705
3706 uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
3707 {
3708 uint32_t fst2;
3709
3710 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
3711 update_fcr31(env, GETPC());
3712 return fst2;
3713 }
3714
3715 uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
3716 {
3717 uint64_t fdt2;
3718
3719 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
3720 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
3721 update_fcr31(env, GETPC());
3722 return fdt2;
3723 }
3724
3725 uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
3726 {
3727 uint32_t fst2;
3728
3729 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
3730 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
3731 update_fcr31(env, GETPC());
3732 return fst2;
3733 }
3734
3735 uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
3736 {
3737 uint64_t fdt2;
3738
3739 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
3740 update_fcr31(env, GETPC());
3741 return fdt2;
3742 }
3743
3744 uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
3745 {
3746 uint32_t fst2;
3747
3748 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
3749 update_fcr31(env, GETPC());
3750 return fst2;
3751 }
3752
3753 uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
3754 {
3755 uint32_t fst2;
3756 uint32_t fsth2;
3757
3758 fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3759 fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
3760 update_fcr31(env, GETPC());
3761 return ((uint64_t)fsth2 << 32) | fst2;
3762 }
3763
3764 uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
3765 {
3766 uint64_t fdt2;
3767
3768 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
3769 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
3770 update_fcr31(env, GETPC());
3771 return fdt2;
3772 }
3773
3774 uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
3775 {
3776 uint32_t fst2;
3777
3778 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
3779 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
3780 update_fcr31(env, GETPC());
3781 return fst2;
3782 }
3783
3784 uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
3785 {
3786 uint32_t fst2;
3787 uint32_t fsth2;
3788
3789 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3790 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
3791 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
3792 fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
3793 update_fcr31(env, GETPC());
3794 return ((uint64_t)fsth2 << 32) | fst2;
3795 }
3796
3797 #define FLOAT_RINT(name, bits) \
3798 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3799 uint ## bits ## _t fs) \
3800 { \
3801 uint ## bits ## _t fdret; \
3802 \
3803 fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \
3804 update_fcr31(env, GETPC()); \
3805 return fdret; \
3806 }
3807
3808 FLOAT_RINT(rint_s, 32)
3809 FLOAT_RINT(rint_d, 64)
3810 #undef FLOAT_RINT
3811
3812 #define FLOAT_CLASS_SIGNALING_NAN 0x001
3813 #define FLOAT_CLASS_QUIET_NAN 0x002
3814 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
3815 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
3816 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
3817 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020
3818 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040
3819 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080
3820 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
3821 #define FLOAT_CLASS_POSITIVE_ZERO 0x200
3822
3823 #define FLOAT_CLASS(name, bits) \
3824 uint ## bits ## _t float_ ## name (uint ## bits ## _t arg, \
3825 float_status *status) \
3826 { \
3827 if (float ## bits ## _is_signaling_nan(arg, status)) { \
3828 return FLOAT_CLASS_SIGNALING_NAN; \
3829 } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
3830 return FLOAT_CLASS_QUIET_NAN; \
3831 } else if (float ## bits ## _is_neg(arg)) { \
3832 if (float ## bits ## _is_infinity(arg)) { \
3833 return FLOAT_CLASS_NEGATIVE_INFINITY; \
3834 } else if (float ## bits ## _is_zero(arg)) { \
3835 return FLOAT_CLASS_NEGATIVE_ZERO; \
3836 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3837 return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
3838 } else { \
3839 return FLOAT_CLASS_NEGATIVE_NORMAL; \
3840 } \
3841 } else { \
3842 if (float ## bits ## _is_infinity(arg)) { \
3843 return FLOAT_CLASS_POSITIVE_INFINITY; \
3844 } else if (float ## bits ## _is_zero(arg)) { \
3845 return FLOAT_CLASS_POSITIVE_ZERO; \
3846 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3847 return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
3848 } else { \
3849 return FLOAT_CLASS_POSITIVE_NORMAL; \
3850 } \
3851 } \
3852 } \
3853 \
3854 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3855 uint ## bits ## _t arg) \
3856 { \
3857 return float_ ## name(arg, &env->active_fpu.fp_status); \
3858 }
3859
3860 FLOAT_CLASS(class_s, 32)
3861 FLOAT_CLASS(class_d, 64)
3862 #undef FLOAT_CLASS
3863
3864 /* binary operations */
3865 #define FLOAT_BINOP(name) \
3866 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3867 uint64_t fdt0, uint64_t fdt1) \
3868 { \
3869 uint64_t dt2; \
3870 \
3871 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3872 update_fcr31(env, GETPC()); \
3873 return dt2; \
3874 } \
3875 \
3876 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3877 uint32_t fst0, uint32_t fst1) \
3878 { \
3879 uint32_t wt2; \
3880 \
3881 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3882 update_fcr31(env, GETPC()); \
3883 return wt2; \
3884 } \
3885 \
3886 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3887 uint64_t fdt0, \
3888 uint64_t fdt1) \
3889 { \
3890 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3891 uint32_t fsth0 = fdt0 >> 32; \
3892 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3893 uint32_t fsth1 = fdt1 >> 32; \
3894 uint32_t wt2; \
3895 uint32_t wth2; \
3896 \
3897 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3898 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3899 update_fcr31(env, GETPC()); \
3900 return ((uint64_t)wth2 << 32) | wt2; \
3901 }
3902
3903 FLOAT_BINOP(add)
3904 FLOAT_BINOP(sub)
3905 FLOAT_BINOP(mul)
3906 FLOAT_BINOP(div)
3907 #undef FLOAT_BINOP
3908
3909 /* MIPS specific binary operations */
3910 uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3911 {
3912 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3913 fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status));
3914 update_fcr31(env, GETPC());
3915 return fdt2;
3916 }
3917
3918 uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3919 {
3920 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3921 fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
3922 update_fcr31(env, GETPC());
3923 return fst2;
3924 }
3925
3926 uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3927 {
3928 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3929 uint32_t fsth0 = fdt0 >> 32;
3930 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3931 uint32_t fsth2 = fdt2 >> 32;
3932
3933 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3934 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3935 fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
3936 fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status));
3937 update_fcr31(env, GETPC());
3938 return ((uint64_t)fsth2 << 32) | fst2;
3939 }
3940
3941 uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3942 {
3943 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3944 fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
3945 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3946 update_fcr31(env, GETPC());
3947 return fdt2;
3948 }
3949
3950 uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3951 {
3952 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3953 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
3954 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3955 update_fcr31(env, GETPC());
3956 return fst2;
3957 }
3958
3959 uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3960 {
3961 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3962 uint32_t fsth0 = fdt0 >> 32;
3963 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3964 uint32_t fsth2 = fdt2 >> 32;
3965
3966 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3967 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3968 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
3969 fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
3970 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3971 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3972 update_fcr31(env, GETPC());
3973 return ((uint64_t)fsth2 << 32) | fst2;
3974 }
3975
3976 uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3977 {
3978 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3979 uint32_t fsth0 = fdt0 >> 32;
3980 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3981 uint32_t fsth1 = fdt1 >> 32;
3982 uint32_t fst2;
3983 uint32_t fsth2;
3984
3985 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3986 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3987 update_fcr31(env, GETPC());
3988 return ((uint64_t)fsth2 << 32) | fst2;
3989 }
3990
3991 uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3992 {
3993 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3994 uint32_t fsth0 = fdt0 >> 32;
3995 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3996 uint32_t fsth1 = fdt1 >> 32;
3997 uint32_t fst2;
3998 uint32_t fsth2;
3999
4000 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
4001 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
4002 update_fcr31(env, GETPC());
4003 return ((uint64_t)fsth2 << 32) | fst2;
4004 }
4005
4006 #define FLOAT_MINMAX(name, bits, minmaxfunc) \
4007 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
4008 uint ## bits ## _t fs, \
4009 uint ## bits ## _t ft) \
4010 { \
4011 uint ## bits ## _t fdret; \
4012 \
4013 fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
4014 &env->active_fpu.fp_status); \
4015 update_fcr31(env, GETPC()); \
4016 return fdret; \
4017 }
4018
4019 FLOAT_MINMAX(max_s, 32, maxnum)
4020 FLOAT_MINMAX(max_d, 64, maxnum)
4021 FLOAT_MINMAX(maxa_s, 32, maxnummag)
4022 FLOAT_MINMAX(maxa_d, 64, maxnummag)
4023
4024 FLOAT_MINMAX(min_s, 32, minnum)
4025 FLOAT_MINMAX(min_d, 64, minnum)
4026 FLOAT_MINMAX(mina_s, 32, minnummag)
4027 FLOAT_MINMAX(mina_d, 64, minnummag)
4028 #undef FLOAT_MINMAX
4029
4030 /* ternary operations */
4031 #define UNFUSED_FMA(prefix, a, b, c, flags) \
4032 { \
4033 a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
4034 if ((flags) & float_muladd_negate_c) { \
4035 a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
4036 } else { \
4037 a = prefix##_add(a, c, &env->active_fpu.fp_status); \
4038 } \
4039 if ((flags) & float_muladd_negate_result) { \
4040 a = prefix##_chs(a); \
4041 } \
4042 }
4043
4044 /* FMA based operations */
4045 #define FLOAT_FMA(name, type) \
4046 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
4047 uint64_t fdt0, uint64_t fdt1, \
4048 uint64_t fdt2) \
4049 { \
4050 UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
4051 update_fcr31(env, GETPC()); \
4052 return fdt0; \
4053 } \
4054 \
4055 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
4056 uint32_t fst0, uint32_t fst1, \
4057 uint32_t fst2) \
4058 { \
4059 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
4060 update_fcr31(env, GETPC()); \
4061 return fst0; \
4062 } \
4063 \
4064 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
4065 uint64_t fdt0, uint64_t fdt1, \
4066 uint64_t fdt2) \
4067 { \
4068 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
4069 uint32_t fsth0 = fdt0 >> 32; \
4070 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
4071 uint32_t fsth1 = fdt1 >> 32; \
4072 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
4073 uint32_t fsth2 = fdt2 >> 32; \
4074 \
4075 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
4076 UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
4077 update_fcr31(env, GETPC()); \
4078 return ((uint64_t)fsth0 << 32) | fst0; \
4079 }
4080 FLOAT_FMA(madd, 0)
4081 FLOAT_FMA(msub, float_muladd_negate_c)
4082 FLOAT_FMA(nmadd, float_muladd_negate_result)
4083 FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c)
4084 #undef FLOAT_FMA
4085
4086 #define FLOAT_FMADDSUB(name, bits, muladd_arg) \
4087 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
4088 uint ## bits ## _t fs, \
4089 uint ## bits ## _t ft, \
4090 uint ## bits ## _t fd) \
4091 { \
4092 uint ## bits ## _t fdret; \
4093 \
4094 fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
4095 &env->active_fpu.fp_status); \
4096 update_fcr31(env, GETPC()); \
4097 return fdret; \
4098 }
4099
4100 FLOAT_FMADDSUB(maddf_s, 32, 0)
4101 FLOAT_FMADDSUB(maddf_d, 64, 0)
4102 FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product)
4103 FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product)
4104 #undef FLOAT_FMADDSUB
4105
4106 /* compare operations */
4107 #define FOP_COND_D(op, cond) \
4108 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4109 uint64_t fdt1, int cc) \
4110 { \
4111 int c; \
4112 c = cond; \
4113 update_fcr31(env, GETPC()); \
4114 if (c) \
4115 SET_FP_COND(cc, env->active_fpu); \
4116 else \
4117 CLEAR_FP_COND(cc, env->active_fpu); \
4118 } \
4119 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4120 uint64_t fdt1, int cc) \
4121 { \
4122 int c; \
4123 fdt0 = float64_abs(fdt0); \
4124 fdt1 = float64_abs(fdt1); \
4125 c = cond; \
4126 update_fcr31(env, GETPC()); \
4127 if (c) \
4128 SET_FP_COND(cc, env->active_fpu); \
4129 else \
4130 CLEAR_FP_COND(cc, env->active_fpu); \
4131 }
4132
4133 /* NOTE: the comma operator will make "cond" to eval to false,
4134 * but float64_unordered_quiet() is still called. */
4135 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
4136 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
4137 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
4138 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
4139 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
4140 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
4141 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
4142 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
4143 /* NOTE: the comma operator will make "cond" to eval to false,
4144 * but float64_unordered() is still called. */
4145 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
4146 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
4147 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
4148 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
4149 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
4150 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
4151 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
4152 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
4153
4154 #define FOP_COND_S(op, cond) \
4155 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4156 uint32_t fst1, int cc) \
4157 { \
4158 int c; \
4159 c = cond; \
4160 update_fcr31(env, GETPC()); \
4161 if (c) \
4162 SET_FP_COND(cc, env->active_fpu); \
4163 else \
4164 CLEAR_FP_COND(cc, env->active_fpu); \
4165 } \
4166 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4167 uint32_t fst1, int cc) \
4168 { \
4169 int c; \
4170 fst0 = float32_abs(fst0); \
4171 fst1 = float32_abs(fst1); \
4172 c = cond; \
4173 update_fcr31(env, GETPC()); \
4174 if (c) \
4175 SET_FP_COND(cc, env->active_fpu); \
4176 else \
4177 CLEAR_FP_COND(cc, env->active_fpu); \
4178 }
4179
4180 /* NOTE: the comma operator will make "cond" to eval to false,
4181 * but float32_unordered_quiet() is still called. */
4182 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
4183 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
4184 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
4185 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
4186 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
4187 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
4188 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
4189 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
4190 /* NOTE: the comma operator will make "cond" to eval to false,
4191 * but float32_unordered() is still called. */
4192 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
4193 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
4194 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
4195 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
4196 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
4197 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
4198 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
4199 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
4200
4201 #define FOP_COND_PS(op, condl, condh) \
4202 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4203 uint64_t fdt1, int cc) \
4204 { \
4205 uint32_t fst0, fsth0, fst1, fsth1; \
4206 int ch, cl; \
4207 fst0 = fdt0 & 0XFFFFFFFF; \
4208 fsth0 = fdt0 >> 32; \
4209 fst1 = fdt1 & 0XFFFFFFFF; \
4210 fsth1 = fdt1 >> 32; \
4211 cl = condl; \
4212 ch = condh; \
4213 update_fcr31(env, GETPC()); \
4214 if (cl) \
4215 SET_FP_COND(cc, env->active_fpu); \
4216 else \
4217 CLEAR_FP_COND(cc, env->active_fpu); \
4218 if (ch) \
4219 SET_FP_COND(cc + 1, env->active_fpu); \
4220 else \
4221 CLEAR_FP_COND(cc + 1, env->active_fpu); \
4222 } \
4223 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4224 uint64_t fdt1, int cc) \
4225 { \
4226 uint32_t fst0, fsth0, fst1, fsth1; \
4227 int ch, cl; \
4228 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
4229 fsth0 = float32_abs(fdt0 >> 32); \
4230 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
4231 fsth1 = float32_abs(fdt1 >> 32); \
4232 cl = condl; \
4233 ch = condh; \
4234 update_fcr31(env, GETPC()); \
4235 if (cl) \
4236 SET_FP_COND(cc, env->active_fpu); \
4237 else \
4238 CLEAR_FP_COND(cc, env->active_fpu); \
4239 if (ch) \
4240 SET_FP_COND(cc + 1, env->active_fpu); \
4241 else \
4242 CLEAR_FP_COND(cc + 1, env->active_fpu); \
4243 }
4244
4245 /* NOTE: the comma operator will make "cond" to eval to false,
4246 * but float32_unordered_quiet() is still called. */
4247 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
4248 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
4249 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
4250 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
4251 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
4252 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
4253 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
4254 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
4255 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
4256 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
4257 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
4258 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
4259 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
4260 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
4261 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
4262 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
4263 /* NOTE: the comma operator will make "cond" to eval to false,
4264 * but float32_unordered() is still called. */
4265 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
4266 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
4267 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
4268 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
4269 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
4270 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
4271 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
4272 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
4273 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
4274 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
4275 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
4276 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
4277 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
4278 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
4279 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
4280 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
4281
4282 /* R6 compare operations */
4283 #define FOP_CONDN_D(op, cond) \
4284 uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState * env, uint64_t fdt0, \
4285 uint64_t fdt1) \
4286 { \
4287 uint64_t c; \
4288 c = cond; \
4289 update_fcr31(env, GETPC()); \
4290 if (c) { \
4291 return -1; \
4292 } else { \
4293 return 0; \
4294 } \
4295 }
4296
4297 /* NOTE: the comma operator will make "cond" to eval to false,
4298 * but float64_unordered_quiet() is still called. */
4299 FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
4300 FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)))
4301 FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4302 FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4303 || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4304 FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4305 FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4306 || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4307 FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4308 FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4309 || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4310 /* NOTE: the comma operator will make "cond" to eval to false,
4311 * but float64_unordered() is still called. */
4312 FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
4313 FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)))
4314 FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
4315 FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
4316 || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
4317 FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
4318 FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
4319 || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
4320 FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
4321 FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
4322 || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
4323 FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4324 || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4325 FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4326 || float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4327 || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4328 FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
4329 || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
4330 FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status)
4331 || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
4332 FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
4333 || float64_lt(fdt1, fdt0, &env->active_fpu.fp_status)
4334 || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
4335 FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status)
4336 || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
4337
4338 #define FOP_CONDN_S(op, cond) \
4339 uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState * env, uint32_t fst0, \
4340 uint32_t fst1) \
4341 { \
4342 uint64_t c; \
4343 c = cond; \
4344 update_fcr31(env, GETPC()); \
4345 if (c) { \
4346 return -1; \
4347 } else { \
4348 return 0; \
4349 } \
4350 }
4351
4352 /* NOTE: the comma operator will make "cond" to eval to false,
4353 * but float32_unordered_quiet() is still called. */
4354 FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
4355 FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)))
4356 FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4357 FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
4358 || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4359 FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4360 FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
4361 || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4362 FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4363 FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
4364 || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4365 /* NOTE: the comma operator will make "cond" to eval to false,
4366 * but float32_unordered() is still called. */
4367 FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
4368 FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)))
4369 FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
4370 FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
4371 || float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
4372 FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
4373 FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
4374 || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
4375 FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status)))
4376 FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
4377 || float32_le(fst0, fst1, &env->active_fpu.fp_status)))
4378 FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status)
4379 || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4380 FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
4381 || float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status)
4382 || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4383 FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status)
4384 || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
4385 FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status)
4386 || float32_le(fst0, fst1, &env->active_fpu.fp_status)))
4387 FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
4388 || float32_lt(fst1, fst0, &env->active_fpu.fp_status)
4389 || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
4390 FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
4391 || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
4392
4393 /* MSA */
4394 /* Data format min and max values */
4395 #define DF_BITS(df) (1 << ((df) + 3))
4396
4397 /* Element-by-element access macros */
4398 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
4399
4400 #if !defined(CONFIG_USER_ONLY)
4401 #define MEMOP_IDX(DF) \
4402 TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
4403 cpu_mmu_index(env, false));
4404 #else
4405 #define MEMOP_IDX(DF)
4406 #endif
4407
4408 #define MSA_LD_DF(DF, TYPE, LD_INSN, ...) \
4409 void helper_msa_ld_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
4410 target_ulong addr) \
4411 { \
4412 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
4413 wr_t wx; \
4414 int i; \
4415 MEMOP_IDX(DF) \
4416 for (i = 0; i < DF_ELEMENTS(DF); i++) { \
4417 wx.TYPE[i] = LD_INSN(env, addr + (i << DF), ##__VA_ARGS__); \
4418 } \
4419 memcpy(pwd, &wx, sizeof(wr_t)); \
4420 }
4421
4422 #if !defined(CONFIG_USER_ONLY)
4423 MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETPC())
4424 MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETPC())
4425 MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETPC())
4426 MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETPC())
4427 #else
4428 MSA_LD_DF(DF_BYTE, b, cpu_ldub_data)
4429 MSA_LD_DF(DF_HALF, h, cpu_lduw_data)
4430 MSA_LD_DF(DF_WORD, w, cpu_ldl_data)
4431 MSA_LD_DF(DF_DOUBLE, d, cpu_ldq_data)
4432 #endif
4433
4434 #define MSA_PAGESPAN(x) \
4435 ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN/8 - 1) >= TARGET_PAGE_SIZE)
4436
4437 static inline void ensure_writable_pages(CPUMIPSState *env,
4438 target_ulong addr,
4439 int mmu_idx,
4440 uintptr_t retaddr)
4441 {
4442 #if !defined(CONFIG_USER_ONLY)
4443 target_ulong page_addr;
4444 if (unlikely(MSA_PAGESPAN(addr))) {
4445 /* first page */
4446 probe_write(env, addr, 0, mmu_idx, retaddr);
4447 /* second page */
4448 page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4449 probe_write(env, page_addr, 0, mmu_idx, retaddr);
4450 }
4451 #endif
4452 }
4453
4454 #define MSA_ST_DF(DF, TYPE, ST_INSN, ...) \
4455 void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
4456 target_ulong addr) \
4457 { \
4458 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
4459 int mmu_idx = cpu_mmu_index(env, false); \
4460 int i; \
4461 MEMOP_IDX(DF) \
4462 ensure_writable_pages(env, addr, mmu_idx, GETPC()); \
4463 for (i = 0; i < DF_ELEMENTS(DF); i++) { \
4464 ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
4465 } \
4466 }
4467
4468 #if !defined(CONFIG_USER_ONLY)
4469 MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETPC())
4470 MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETPC())
4471 MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETPC())
4472 MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETPC())
4473 #else
4474 MSA_ST_DF(DF_BYTE, b, cpu_stb_data)
4475 MSA_ST_DF(DF_HALF, h, cpu_stw_data)
4476 MSA_ST_DF(DF_WORD, w, cpu_stl_data)
4477 MSA_ST_DF(DF_DOUBLE, d, cpu_stq_data)
4478 #endif
4479
4480 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
4481 {
4482 #ifndef CONFIG_USER_ONLY
4483 target_ulong index = addr & 0x1fffffff;
4484 if (op == 9) {
4485 /* Index Store Tag */
4486 memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
4487 8, MEMTXATTRS_UNSPECIFIED);
4488 } else if (op == 5) {
4489 /* Index Load Tag */
4490 memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
4491 8, MEMTXATTRS_UNSPECIFIED);
4492 }
4493 #endif
4494 }