]> git.proxmox.com Git - qemu.git/blob - target-mips/op_helper.c
Remove unused is_softmmu parameter from cpu_handle_mmu_fault
[qemu.git] / target-mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "dyngen-exec.h"
22
23 #include "host-utils.h"
24
25 #include "helper.h"
26
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
30
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33 #endif
34
35 static inline void compute_hflags(CPUState *env)
36 {
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39 MIPS_HFLAG_UX);
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
44 }
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
50 }
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
53 }
54 #endif
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
58 }
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
61 }
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
64 }
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
68 }
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
72 }
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
80 }
81 }
82 }
83
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
86
87 void helper_raise_exception_err (uint32_t exception, int error_code)
88 {
89 #if 1
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
92 #endif
93 env->exception_index = exception;
94 env->error_code = error_code;
95 cpu_loop_exit(env);
96 }
97
98 void helper_raise_exception (uint32_t exception)
99 {
100 helper_raise_exception_err(exception, 0);
101 }
102
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state (void *pc_ptr)
105 {
106 TranslationBlock *tb;
107 unsigned long pc = (unsigned long) pc_ptr;
108
109 tb = tb_find_pc (pc);
110 if (tb) {
111 cpu_restore_state(tb, env, pc);
112 }
113 }
114 #endif
115
116 #if defined(CONFIG_USER_ONLY)
117 #define HELPER_LD(name, insn, type) \
118 static inline type do_##name(target_ulong addr, int mem_idx) \
119 { \
120 return (type) insn##_raw(addr); \
121 }
122 #else
123 #define HELPER_LD(name, insn, type) \
124 static inline type do_##name(target_ulong addr, int mem_idx) \
125 { \
126 switch (mem_idx) \
127 { \
128 case 0: return (type) insn##_kernel(addr); break; \
129 case 1: return (type) insn##_super(addr); break; \
130 default: \
131 case 2: return (type) insn##_user(addr); break; \
132 } \
133 }
134 #endif
135 HELPER_LD(lbu, ldub, uint8_t)
136 HELPER_LD(lw, ldl, int32_t)
137 #ifdef TARGET_MIPS64
138 HELPER_LD(ld, ldq, int64_t)
139 #endif
140 #undef HELPER_LD
141
142 #if defined(CONFIG_USER_ONLY)
143 #define HELPER_ST(name, insn, type) \
144 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
145 { \
146 insn##_raw(addr, val); \
147 }
148 #else
149 #define HELPER_ST(name, insn, type) \
150 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
151 { \
152 switch (mem_idx) \
153 { \
154 case 0: insn##_kernel(addr, val); break; \
155 case 1: insn##_super(addr, val); break; \
156 default: \
157 case 2: insn##_user(addr, val); break; \
158 } \
159 }
160 #endif
161 HELPER_ST(sb, stb, uint8_t)
162 HELPER_ST(sw, stl, uint32_t)
163 #ifdef TARGET_MIPS64
164 HELPER_ST(sd, stq, uint64_t)
165 #endif
166 #undef HELPER_ST
167
168 target_ulong helper_clo (target_ulong arg1)
169 {
170 return clo32(arg1);
171 }
172
173 target_ulong helper_clz (target_ulong arg1)
174 {
175 return clz32(arg1);
176 }
177
178 #if defined(TARGET_MIPS64)
179 target_ulong helper_dclo (target_ulong arg1)
180 {
181 return clo64(arg1);
182 }
183
184 target_ulong helper_dclz (target_ulong arg1)
185 {
186 return clz64(arg1);
187 }
188 #endif /* TARGET_MIPS64 */
189
190 /* 64 bits arithmetic for 32 bits hosts */
191 static inline uint64_t get_HILO (void)
192 {
193 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
194 }
195
196 static inline void set_HILO (uint64_t HILO)
197 {
198 env->active_tc.LO[0] = (int32_t)HILO;
199 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
200 }
201
202 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
203 {
204 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
206 }
207
208 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
209 {
210 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
212 }
213
214 /* Multiplication variants of the vr54xx. */
215 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
216 {
217 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
218
219 return arg1;
220 }
221
222 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
223 {
224 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
225
226 return arg1;
227 }
228
229 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
230 {
231 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
232
233 return arg1;
234 }
235
236 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
237 {
238 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
239
240 return arg1;
241 }
242
243 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
244 {
245 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
246
247 return arg1;
248 }
249
250 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
251 {
252 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
253
254 return arg1;
255 }
256
257 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
258 {
259 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
260
261 return arg1;
262 }
263
264 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
265 {
266 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
267
268 return arg1;
269 }
270
271 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
272 {
273 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
274
275 return arg1;
276 }
277
278 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
279 {
280 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
281
282 return arg1;
283 }
284
285 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
286 {
287 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
288
289 return arg1;
290 }
291
292 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
293 {
294 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
295
296 return arg1;
297 }
298
299 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
300 {
301 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
302
303 return arg1;
304 }
305
306 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
307 {
308 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
309
310 return arg1;
311 }
312
313 #ifdef TARGET_MIPS64
314 void helper_dmult (target_ulong arg1, target_ulong arg2)
315 {
316 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
317 }
318
319 void helper_dmultu (target_ulong arg1, target_ulong arg2)
320 {
321 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
322 }
323 #endif
324
325 #ifndef CONFIG_USER_ONLY
326
327 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
328 {
329 target_phys_addr_t lladdr;
330
331 lladdr = cpu_mips_translate_address(env, address, rw);
332
333 if (lladdr == -1LL) {
334 cpu_loop_exit(env);
335 } else {
336 return lladdr;
337 }
338 }
339
340 #define HELPER_LD_ATOMIC(name, insn) \
341 target_ulong helper_##name(target_ulong arg, int mem_idx) \
342 { \
343 env->lladdr = do_translate_address(arg, 0); \
344 env->llval = do_##insn(arg, mem_idx); \
345 return env->llval; \
346 }
347 HELPER_LD_ATOMIC(ll, lw)
348 #ifdef TARGET_MIPS64
349 HELPER_LD_ATOMIC(lld, ld)
350 #endif
351 #undef HELPER_LD_ATOMIC
352
353 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
354 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
355 { \
356 target_long tmp; \
357 \
358 if (arg2 & almask) { \
359 env->CP0_BadVAddr = arg2; \
360 helper_raise_exception(EXCP_AdES); \
361 } \
362 if (do_translate_address(arg2, 1) == env->lladdr) { \
363 tmp = do_##ld_insn(arg2, mem_idx); \
364 if (tmp == env->llval) { \
365 do_##st_insn(arg2, arg1, mem_idx); \
366 return 1; \
367 } \
368 } \
369 return 0; \
370 }
371 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372 #ifdef TARGET_MIPS64
373 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374 #endif
375 #undef HELPER_ST_ATOMIC
376 #endif
377
378 #ifdef TARGET_WORDS_BIGENDIAN
379 #define GET_LMASK(v) ((v) & 3)
380 #define GET_OFFSET(addr, offset) (addr + (offset))
381 #else
382 #define GET_LMASK(v) (((v) & 3) ^ 3)
383 #define GET_OFFSET(addr, offset) (addr - (offset))
384 #endif
385
386 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
387 {
388 target_ulong tmp;
389
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
392
393 if (GET_LMASK(arg2) <= 2) {
394 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
396 }
397
398 if (GET_LMASK(arg2) <= 1) {
399 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
401 }
402
403 if (GET_LMASK(arg2) == 0) {
404 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405 arg1 = (arg1 & 0xFFFFFF00) | tmp;
406 }
407 return (int32_t)arg1;
408 }
409
410 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
411 {
412 target_ulong tmp;
413
414 tmp = do_lbu(arg2, mem_idx);
415 arg1 = (arg1 & 0xFFFFFF00) | tmp;
416
417 if (GET_LMASK(arg2) >= 1) {
418 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
420 }
421
422 if (GET_LMASK(arg2) >= 2) {
423 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
425 }
426
427 if (GET_LMASK(arg2) == 3) {
428 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
430 }
431 return (int32_t)arg1;
432 }
433
434 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
435 {
436 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
437
438 if (GET_LMASK(arg2) <= 2)
439 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
440
441 if (GET_LMASK(arg2) <= 1)
442 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
443
444 if (GET_LMASK(arg2) == 0)
445 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
446 }
447
448 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
449 {
450 do_sb(arg2, (uint8_t)arg1, mem_idx);
451
452 if (GET_LMASK(arg2) >= 1)
453 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
454
455 if (GET_LMASK(arg2) >= 2)
456 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
457
458 if (GET_LMASK(arg2) == 3)
459 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
460 }
461
462 #if defined(TARGET_MIPS64)
463 /* "half" load and stores. We must do the memory access inline,
464 or fault handling won't work. */
465
466 #ifdef TARGET_WORDS_BIGENDIAN
467 #define GET_LMASK64(v) ((v) & 7)
468 #else
469 #define GET_LMASK64(v) (((v) & 7) ^ 7)
470 #endif
471
472 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
473 {
474 uint64_t tmp;
475
476 tmp = do_lbu(arg2, mem_idx);
477 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
478
479 if (GET_LMASK64(arg2) <= 6) {
480 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
482 }
483
484 if (GET_LMASK64(arg2) <= 5) {
485 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
487 }
488
489 if (GET_LMASK64(arg2) <= 4) {
490 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
492 }
493
494 if (GET_LMASK64(arg2) <= 3) {
495 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
497 }
498
499 if (GET_LMASK64(arg2) <= 2) {
500 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
502 }
503
504 if (GET_LMASK64(arg2) <= 1) {
505 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
507 }
508
509 if (GET_LMASK64(arg2) == 0) {
510 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
512 }
513
514 return arg1;
515 }
516
517 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
518 {
519 uint64_t tmp;
520
521 tmp = do_lbu(arg2, mem_idx);
522 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
523
524 if (GET_LMASK64(arg2) >= 1) {
525 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
527 }
528
529 if (GET_LMASK64(arg2) >= 2) {
530 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
532 }
533
534 if (GET_LMASK64(arg2) >= 3) {
535 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
537 }
538
539 if (GET_LMASK64(arg2) >= 4) {
540 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
542 }
543
544 if (GET_LMASK64(arg2) >= 5) {
545 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
547 }
548
549 if (GET_LMASK64(arg2) >= 6) {
550 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
552 }
553
554 if (GET_LMASK64(arg2) == 7) {
555 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
557 }
558
559 return arg1;
560 }
561
562 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
563 {
564 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
565
566 if (GET_LMASK64(arg2) <= 6)
567 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
568
569 if (GET_LMASK64(arg2) <= 5)
570 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
571
572 if (GET_LMASK64(arg2) <= 4)
573 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
574
575 if (GET_LMASK64(arg2) <= 3)
576 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
577
578 if (GET_LMASK64(arg2) <= 2)
579 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
580
581 if (GET_LMASK64(arg2) <= 1)
582 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
583
584 if (GET_LMASK64(arg2) <= 0)
585 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
586 }
587
588 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
589 {
590 do_sb(arg2, (uint8_t)arg1, mem_idx);
591
592 if (GET_LMASK64(arg2) >= 1)
593 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
594
595 if (GET_LMASK64(arg2) >= 2)
596 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
597
598 if (GET_LMASK64(arg2) >= 3)
599 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
600
601 if (GET_LMASK64(arg2) >= 4)
602 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
603
604 if (GET_LMASK64(arg2) >= 5)
605 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
606
607 if (GET_LMASK64(arg2) >= 6)
608 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
609
610 if (GET_LMASK64(arg2) == 7)
611 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
612 }
613 #endif /* TARGET_MIPS64 */
614
615 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
616
617 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
618 {
619 target_ulong base_reglist = reglist & 0xf;
620 target_ulong do_r31 = reglist & 0x10;
621 #ifdef CONFIG_USER_ONLY
622 #undef ldfun
623 #define ldfun ldl_raw
624 #else
625 uint32_t (*ldfun)(target_ulong);
626
627 switch (mem_idx)
628 {
629 case 0: ldfun = ldl_kernel; break;
630 case 1: ldfun = ldl_super; break;
631 default:
632 case 2: ldfun = ldl_user; break;
633 }
634 #endif
635
636 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637 target_ulong i;
638
639 for (i = 0; i < base_reglist; i++) {
640 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641 addr += 4;
642 }
643 }
644
645 if (do_r31) {
646 env->active_tc.gpr[31] = (target_long) ldfun(addr);
647 }
648 }
649
650 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
651 {
652 target_ulong base_reglist = reglist & 0xf;
653 target_ulong do_r31 = reglist & 0x10;
654 #ifdef CONFIG_USER_ONLY
655 #undef stfun
656 #define stfun stl_raw
657 #else
658 void (*stfun)(target_ulong, uint32_t);
659
660 switch (mem_idx)
661 {
662 case 0: stfun = stl_kernel; break;
663 case 1: stfun = stl_super; break;
664 default:
665 case 2: stfun = stl_user; break;
666 }
667 #endif
668
669 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670 target_ulong i;
671
672 for (i = 0; i < base_reglist; i++) {
673 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674 addr += 4;
675 }
676 }
677
678 if (do_r31) {
679 stfun(addr, env->active_tc.gpr[31]);
680 }
681 }
682
683 #if defined(TARGET_MIPS64)
684 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
685 {
686 target_ulong base_reglist = reglist & 0xf;
687 target_ulong do_r31 = reglist & 0x10;
688 #ifdef CONFIG_USER_ONLY
689 #undef ldfun
690 #define ldfun ldq_raw
691 #else
692 uint64_t (*ldfun)(target_ulong);
693
694 switch (mem_idx)
695 {
696 case 0: ldfun = ldq_kernel; break;
697 case 1: ldfun = ldq_super; break;
698 default:
699 case 2: ldfun = ldq_user; break;
700 }
701 #endif
702
703 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704 target_ulong i;
705
706 for (i = 0; i < base_reglist; i++) {
707 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708 addr += 8;
709 }
710 }
711
712 if (do_r31) {
713 env->active_tc.gpr[31] = ldfun(addr);
714 }
715 }
716
717 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
718 {
719 target_ulong base_reglist = reglist & 0xf;
720 target_ulong do_r31 = reglist & 0x10;
721 #ifdef CONFIG_USER_ONLY
722 #undef stfun
723 #define stfun stq_raw
724 #else
725 void (*stfun)(target_ulong, uint64_t);
726
727 switch (mem_idx)
728 {
729 case 0: stfun = stq_kernel; break;
730 case 1: stfun = stq_super; break;
731 default:
732 case 2: stfun = stq_user; break;
733 }
734 #endif
735
736 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737 target_ulong i;
738
739 for (i = 0; i < base_reglist; i++) {
740 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741 addr += 8;
742 }
743 }
744
745 if (do_r31) {
746 stfun(addr, env->active_tc.gpr[31]);
747 }
748 }
749 #endif
750
751 #ifndef CONFIG_USER_ONLY
752 /* CP0 helpers */
753 target_ulong helper_mfc0_mvpcontrol (void)
754 {
755 return env->mvp->CP0_MVPControl;
756 }
757
758 target_ulong helper_mfc0_mvpconf0 (void)
759 {
760 return env->mvp->CP0_MVPConf0;
761 }
762
763 target_ulong helper_mfc0_mvpconf1 (void)
764 {
765 return env->mvp->CP0_MVPConf1;
766 }
767
768 target_ulong helper_mfc0_random (void)
769 {
770 return (int32_t)cpu_mips_get_random(env);
771 }
772
773 target_ulong helper_mfc0_tcstatus (void)
774 {
775 return env->active_tc.CP0_TCStatus;
776 }
777
778 target_ulong helper_mftc0_tcstatus(void)
779 {
780 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
781
782 if (other_tc == env->current_tc)
783 return env->active_tc.CP0_TCStatus;
784 else
785 return env->tcs[other_tc].CP0_TCStatus;
786 }
787
788 target_ulong helper_mfc0_tcbind (void)
789 {
790 return env->active_tc.CP0_TCBind;
791 }
792
793 target_ulong helper_mftc0_tcbind(void)
794 {
795 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
796
797 if (other_tc == env->current_tc)
798 return env->active_tc.CP0_TCBind;
799 else
800 return env->tcs[other_tc].CP0_TCBind;
801 }
802
803 target_ulong helper_mfc0_tcrestart (void)
804 {
805 return env->active_tc.PC;
806 }
807
808 target_ulong helper_mftc0_tcrestart(void)
809 {
810 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
811
812 if (other_tc == env->current_tc)
813 return env->active_tc.PC;
814 else
815 return env->tcs[other_tc].PC;
816 }
817
818 target_ulong helper_mfc0_tchalt (void)
819 {
820 return env->active_tc.CP0_TCHalt;
821 }
822
823 target_ulong helper_mftc0_tchalt(void)
824 {
825 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
826
827 if (other_tc == env->current_tc)
828 return env->active_tc.CP0_TCHalt;
829 else
830 return env->tcs[other_tc].CP0_TCHalt;
831 }
832
833 target_ulong helper_mfc0_tccontext (void)
834 {
835 return env->active_tc.CP0_TCContext;
836 }
837
838 target_ulong helper_mftc0_tccontext(void)
839 {
840 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
841
842 if (other_tc == env->current_tc)
843 return env->active_tc.CP0_TCContext;
844 else
845 return env->tcs[other_tc].CP0_TCContext;
846 }
847
848 target_ulong helper_mfc0_tcschedule (void)
849 {
850 return env->active_tc.CP0_TCSchedule;
851 }
852
853 target_ulong helper_mftc0_tcschedule(void)
854 {
855 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
856
857 if (other_tc == env->current_tc)
858 return env->active_tc.CP0_TCSchedule;
859 else
860 return env->tcs[other_tc].CP0_TCSchedule;
861 }
862
863 target_ulong helper_mfc0_tcschefback (void)
864 {
865 return env->active_tc.CP0_TCScheFBack;
866 }
867
868 target_ulong helper_mftc0_tcschefback(void)
869 {
870 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
871
872 if (other_tc == env->current_tc)
873 return env->active_tc.CP0_TCScheFBack;
874 else
875 return env->tcs[other_tc].CP0_TCScheFBack;
876 }
877
878 target_ulong helper_mfc0_count (void)
879 {
880 return (int32_t)cpu_mips_get_count(env);
881 }
882
883 target_ulong helper_mftc0_entryhi(void)
884 {
885 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
886 int32_t tcstatus;
887
888 if (other_tc == env->current_tc)
889 tcstatus = env->active_tc.CP0_TCStatus;
890 else
891 tcstatus = env->tcs[other_tc].CP0_TCStatus;
892
893 return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
894 }
895
896 target_ulong helper_mftc0_status(void)
897 {
898 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
899 target_ulong t0;
900 int32_t tcstatus;
901
902 if (other_tc == env->current_tc)
903 tcstatus = env->active_tc.CP0_TCStatus;
904 else
905 tcstatus = env->tcs[other_tc].CP0_TCStatus;
906
907 t0 = env->CP0_Status & ~0xf1000018;
908 t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
909 t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
910 t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
911
912 return t0;
913 }
914
915 target_ulong helper_mfc0_lladdr (void)
916 {
917 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
918 }
919
920 target_ulong helper_mfc0_watchlo (uint32_t sel)
921 {
922 return (int32_t)env->CP0_WatchLo[sel];
923 }
924
925 target_ulong helper_mfc0_watchhi (uint32_t sel)
926 {
927 return env->CP0_WatchHi[sel];
928 }
929
930 target_ulong helper_mfc0_debug (void)
931 {
932 target_ulong t0 = env->CP0_Debug;
933 if (env->hflags & MIPS_HFLAG_DM)
934 t0 |= 1 << CP0DB_DM;
935
936 return t0;
937 }
938
939 target_ulong helper_mftc0_debug(void)
940 {
941 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
942 int32_t tcstatus;
943
944 if (other_tc == env->current_tc)
945 tcstatus = env->active_tc.CP0_Debug_tcstatus;
946 else
947 tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
948
949 /* XXX: Might be wrong, check with EJTAG spec. */
950 return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
951 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
952 }
953
954 #if defined(TARGET_MIPS64)
955 target_ulong helper_dmfc0_tcrestart (void)
956 {
957 return env->active_tc.PC;
958 }
959
960 target_ulong helper_dmfc0_tchalt (void)
961 {
962 return env->active_tc.CP0_TCHalt;
963 }
964
965 target_ulong helper_dmfc0_tccontext (void)
966 {
967 return env->active_tc.CP0_TCContext;
968 }
969
970 target_ulong helper_dmfc0_tcschedule (void)
971 {
972 return env->active_tc.CP0_TCSchedule;
973 }
974
975 target_ulong helper_dmfc0_tcschefback (void)
976 {
977 return env->active_tc.CP0_TCScheFBack;
978 }
979
980 target_ulong helper_dmfc0_lladdr (void)
981 {
982 return env->lladdr >> env->CP0_LLAddr_shift;
983 }
984
985 target_ulong helper_dmfc0_watchlo (uint32_t sel)
986 {
987 return env->CP0_WatchLo[sel];
988 }
989 #endif /* TARGET_MIPS64 */
990
991 void helper_mtc0_index (target_ulong arg1)
992 {
993 int num = 1;
994 unsigned int tmp = env->tlb->nb_tlb;
995
996 do {
997 tmp >>= 1;
998 num <<= 1;
999 } while (tmp);
1000 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1001 }
1002
1003 void helper_mtc0_mvpcontrol (target_ulong arg1)
1004 {
1005 uint32_t mask = 0;
1006 uint32_t newval;
1007
1008 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1009 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1010 (1 << CP0MVPCo_EVP);
1011 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1012 mask |= (1 << CP0MVPCo_STLB);
1013 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1014
1015 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1016
1017 env->mvp->CP0_MVPControl = newval;
1018 }
1019
1020 void helper_mtc0_vpecontrol (target_ulong arg1)
1021 {
1022 uint32_t mask;
1023 uint32_t newval;
1024
1025 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1026 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1027 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1028
1029 /* Yield scheduler intercept not implemented. */
1030 /* Gating storage scheduler intercept not implemented. */
1031
1032 // TODO: Enable/disable TCs.
1033
1034 env->CP0_VPEControl = newval;
1035 }
1036
1037 void helper_mtc0_vpeconf0 (target_ulong arg1)
1038 {
1039 uint32_t mask = 0;
1040 uint32_t newval;
1041
1042 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1043 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1044 mask |= (0xff << CP0VPEC0_XTC);
1045 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1046 }
1047 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1048
1049 // TODO: TC exclusive handling due to ERL/EXL.
1050
1051 env->CP0_VPEConf0 = newval;
1052 }
1053
1054 void helper_mtc0_vpeconf1 (target_ulong arg1)
1055 {
1056 uint32_t mask = 0;
1057 uint32_t newval;
1058
1059 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1060 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1061 (0xff << CP0VPEC1_NCP1);
1062 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1063
1064 /* UDI not implemented. */
1065 /* CP2 not implemented. */
1066
1067 // TODO: Handle FPU (CP1) binding.
1068
1069 env->CP0_VPEConf1 = newval;
1070 }
1071
1072 void helper_mtc0_yqmask (target_ulong arg1)
1073 {
1074 /* Yield qualifier inputs not implemented. */
1075 env->CP0_YQMask = 0x00000000;
1076 }
1077
1078 void helper_mtc0_vpeopt (target_ulong arg1)
1079 {
1080 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1081 }
1082
1083 void helper_mtc0_entrylo0 (target_ulong arg1)
1084 {
1085 /* Large physaddr (PABITS) not implemented */
1086 /* 1k pages not implemented */
1087 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1088 }
1089
1090 void helper_mtc0_tcstatus (target_ulong arg1)
1091 {
1092 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1093 uint32_t newval;
1094
1095 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1096
1097 // TODO: Sync with CP0_Status.
1098
1099 env->active_tc.CP0_TCStatus = newval;
1100 }
1101
1102 void helper_mttc0_tcstatus (target_ulong arg1)
1103 {
1104 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1105
1106 // TODO: Sync with CP0_Status.
1107
1108 if (other_tc == env->current_tc)
1109 env->active_tc.CP0_TCStatus = arg1;
1110 else
1111 env->tcs[other_tc].CP0_TCStatus = arg1;
1112 }
1113
1114 void helper_mtc0_tcbind (target_ulong arg1)
1115 {
1116 uint32_t mask = (1 << CP0TCBd_TBE);
1117 uint32_t newval;
1118
1119 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1120 mask |= (1 << CP0TCBd_CurVPE);
1121 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1122 env->active_tc.CP0_TCBind = newval;
1123 }
1124
1125 void helper_mttc0_tcbind (target_ulong arg1)
1126 {
1127 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1128 uint32_t mask = (1 << CP0TCBd_TBE);
1129 uint32_t newval;
1130
1131 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1132 mask |= (1 << CP0TCBd_CurVPE);
1133 if (other_tc == env->current_tc) {
1134 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1135 env->active_tc.CP0_TCBind = newval;
1136 } else {
1137 newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1138 env->tcs[other_tc].CP0_TCBind = newval;
1139 }
1140 }
1141
1142 void helper_mtc0_tcrestart (target_ulong arg1)
1143 {
1144 env->active_tc.PC = arg1;
1145 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1146 env->lladdr = 0ULL;
1147 /* MIPS16 not implemented. */
1148 }
1149
1150 void helper_mttc0_tcrestart (target_ulong arg1)
1151 {
1152 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1153
1154 if (other_tc == env->current_tc) {
1155 env->active_tc.PC = arg1;
1156 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1157 env->lladdr = 0ULL;
1158 /* MIPS16 not implemented. */
1159 } else {
1160 env->tcs[other_tc].PC = arg1;
1161 env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1162 env->lladdr = 0ULL;
1163 /* MIPS16 not implemented. */
1164 }
1165 }
1166
1167 void helper_mtc0_tchalt (target_ulong arg1)
1168 {
1169 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1170
1171 // TODO: Halt TC / Restart (if allocated+active) TC.
1172 }
1173
1174 void helper_mttc0_tchalt (target_ulong arg1)
1175 {
1176 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1177
1178 // TODO: Halt TC / Restart (if allocated+active) TC.
1179
1180 if (other_tc == env->current_tc)
1181 env->active_tc.CP0_TCHalt = arg1;
1182 else
1183 env->tcs[other_tc].CP0_TCHalt = arg1;
1184 }
1185
1186 void helper_mtc0_tccontext (target_ulong arg1)
1187 {
1188 env->active_tc.CP0_TCContext = arg1;
1189 }
1190
1191 void helper_mttc0_tccontext (target_ulong arg1)
1192 {
1193 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1194
1195 if (other_tc == env->current_tc)
1196 env->active_tc.CP0_TCContext = arg1;
1197 else
1198 env->tcs[other_tc].CP0_TCContext = arg1;
1199 }
1200
1201 void helper_mtc0_tcschedule (target_ulong arg1)
1202 {
1203 env->active_tc.CP0_TCSchedule = arg1;
1204 }
1205
1206 void helper_mttc0_tcschedule (target_ulong arg1)
1207 {
1208 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1209
1210 if (other_tc == env->current_tc)
1211 env->active_tc.CP0_TCSchedule = arg1;
1212 else
1213 env->tcs[other_tc].CP0_TCSchedule = arg1;
1214 }
1215
1216 void helper_mtc0_tcschefback (target_ulong arg1)
1217 {
1218 env->active_tc.CP0_TCScheFBack = arg1;
1219 }
1220
1221 void helper_mttc0_tcschefback (target_ulong arg1)
1222 {
1223 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1224
1225 if (other_tc == env->current_tc)
1226 env->active_tc.CP0_TCScheFBack = arg1;
1227 else
1228 env->tcs[other_tc].CP0_TCScheFBack = arg1;
1229 }
1230
1231 void helper_mtc0_entrylo1 (target_ulong arg1)
1232 {
1233 /* Large physaddr (PABITS) not implemented */
1234 /* 1k pages not implemented */
1235 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1236 }
1237
1238 void helper_mtc0_context (target_ulong arg1)
1239 {
1240 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1241 }
1242
1243 void helper_mtc0_pagemask (target_ulong arg1)
1244 {
1245 /* 1k pages not implemented */
1246 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1247 }
1248
1249 void helper_mtc0_pagegrain (target_ulong arg1)
1250 {
1251 /* SmartMIPS not implemented */
1252 /* Large physaddr (PABITS) not implemented */
1253 /* 1k pages not implemented */
1254 env->CP0_PageGrain = 0;
1255 }
1256
1257 void helper_mtc0_wired (target_ulong arg1)
1258 {
1259 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1260 }
1261
1262 void helper_mtc0_srsconf0 (target_ulong arg1)
1263 {
1264 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1265 }
1266
1267 void helper_mtc0_srsconf1 (target_ulong arg1)
1268 {
1269 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1270 }
1271
1272 void helper_mtc0_srsconf2 (target_ulong arg1)
1273 {
1274 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1275 }
1276
1277 void helper_mtc0_srsconf3 (target_ulong arg1)
1278 {
1279 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1280 }
1281
1282 void helper_mtc0_srsconf4 (target_ulong arg1)
1283 {
1284 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1285 }
1286
1287 void helper_mtc0_hwrena (target_ulong arg1)
1288 {
1289 env->CP0_HWREna = arg1 & 0x0000000F;
1290 }
1291
1292 void helper_mtc0_count (target_ulong arg1)
1293 {
1294 cpu_mips_store_count(env, arg1);
1295 }
1296
1297 void helper_mtc0_entryhi (target_ulong arg1)
1298 {
1299 target_ulong old, val;
1300
1301 /* 1k pages not implemented */
1302 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1303 #if defined(TARGET_MIPS64)
1304 val &= env->SEGMask;
1305 #endif
1306 old = env->CP0_EntryHi;
1307 env->CP0_EntryHi = val;
1308 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1309 uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1310 env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1311 }
1312 /* If the ASID changes, flush qemu's TLB. */
1313 if ((old & 0xFF) != (val & 0xFF))
1314 cpu_mips_tlb_flush(env, 1);
1315 }
1316
1317 void helper_mttc0_entryhi(target_ulong arg1)
1318 {
1319 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1320 int32_t tcstatus;
1321
1322 env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1323 if (other_tc == env->current_tc) {
1324 tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1325 env->active_tc.CP0_TCStatus = tcstatus;
1326 } else {
1327 tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1328 env->tcs[other_tc].CP0_TCStatus = tcstatus;
1329 }
1330 }
1331
1332 void helper_mtc0_compare (target_ulong arg1)
1333 {
1334 cpu_mips_store_compare(env, arg1);
1335 }
1336
1337 void helper_mtc0_status (target_ulong arg1)
1338 {
1339 uint32_t val, old;
1340 uint32_t mask = env->CP0_Status_rw_bitmask;
1341
1342 val = arg1 & mask;
1343 old = env->CP0_Status;
1344 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1345 compute_hflags(env);
1346 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1347 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1348 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1349 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1350 env->CP0_Cause);
1351 switch (env->hflags & MIPS_HFLAG_KSU) {
1352 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1353 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1354 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1355 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1356 }
1357 }
1358 }
1359
1360 void helper_mttc0_status(target_ulong arg1)
1361 {
1362 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363 int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1364
1365 env->CP0_Status = arg1 & ~0xf1000018;
1366 tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1367 tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1368 tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1369 if (other_tc == env->current_tc)
1370 env->active_tc.CP0_TCStatus = tcstatus;
1371 else
1372 env->tcs[other_tc].CP0_TCStatus = tcstatus;
1373 }
1374
1375 void helper_mtc0_intctl (target_ulong arg1)
1376 {
1377 /* vectored interrupts not implemented, no performance counters. */
1378 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1379 }
1380
1381 void helper_mtc0_srsctl (target_ulong arg1)
1382 {
1383 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1384 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1385 }
1386
1387 void helper_mtc0_cause (target_ulong arg1)
1388 {
1389 uint32_t mask = 0x00C00300;
1390 uint32_t old = env->CP0_Cause;
1391 int i;
1392
1393 if (env->insn_flags & ISA_MIPS32R2)
1394 mask |= 1 << CP0Ca_DC;
1395
1396 env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1397
1398 if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1399 if (env->CP0_Cause & (1 << CP0Ca_DC))
1400 cpu_mips_stop_count(env);
1401 else
1402 cpu_mips_start_count(env);
1403 }
1404
1405 /* Set/reset software interrupts */
1406 for (i = 0 ; i < 2 ; i++) {
1407 if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1408 cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
1409 }
1410 }
1411 }
1412
1413 void helper_mtc0_ebase (target_ulong arg1)
1414 {
1415 /* vectored interrupts not implemented */
1416 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1417 }
1418
1419 void helper_mtc0_config0 (target_ulong arg1)
1420 {
1421 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1422 }
1423
1424 void helper_mtc0_config2 (target_ulong arg1)
1425 {
1426 /* tertiary/secondary caches not implemented */
1427 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1428 }
1429
1430 void helper_mtc0_lladdr (target_ulong arg1)
1431 {
1432 target_long mask = env->CP0_LLAddr_rw_bitmask;
1433 arg1 = arg1 << env->CP0_LLAddr_shift;
1434 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1435 }
1436
1437 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1438 {
1439 /* Watch exceptions for instructions, data loads, data stores
1440 not implemented. */
1441 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1442 }
1443
1444 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1445 {
1446 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1447 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1448 }
1449
1450 void helper_mtc0_xcontext (target_ulong arg1)
1451 {
1452 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1453 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1454 }
1455
1456 void helper_mtc0_framemask (target_ulong arg1)
1457 {
1458 env->CP0_Framemask = arg1; /* XXX */
1459 }
1460
1461 void helper_mtc0_debug (target_ulong arg1)
1462 {
1463 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1464 if (arg1 & (1 << CP0DB_DM))
1465 env->hflags |= MIPS_HFLAG_DM;
1466 else
1467 env->hflags &= ~MIPS_HFLAG_DM;
1468 }
1469
1470 void helper_mttc0_debug(target_ulong arg1)
1471 {
1472 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1473 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1474
1475 /* XXX: Might be wrong, check with EJTAG spec. */
1476 if (other_tc == env->current_tc)
1477 env->active_tc.CP0_Debug_tcstatus = val;
1478 else
1479 env->tcs[other_tc].CP0_Debug_tcstatus = val;
1480 env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1481 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1482 }
1483
1484 void helper_mtc0_performance0 (target_ulong arg1)
1485 {
1486 env->CP0_Performance0 = arg1 & 0x000007ff;
1487 }
1488
1489 void helper_mtc0_taglo (target_ulong arg1)
1490 {
1491 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1492 }
1493
1494 void helper_mtc0_datalo (target_ulong arg1)
1495 {
1496 env->CP0_DataLo = arg1; /* XXX */
1497 }
1498
1499 void helper_mtc0_taghi (target_ulong arg1)
1500 {
1501 env->CP0_TagHi = arg1; /* XXX */
1502 }
1503
1504 void helper_mtc0_datahi (target_ulong arg1)
1505 {
1506 env->CP0_DataHi = arg1; /* XXX */
1507 }
1508
1509 /* MIPS MT functions */
1510 target_ulong helper_mftgpr(uint32_t sel)
1511 {
1512 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1513
1514 if (other_tc == env->current_tc)
1515 return env->active_tc.gpr[sel];
1516 else
1517 return env->tcs[other_tc].gpr[sel];
1518 }
1519
1520 target_ulong helper_mftlo(uint32_t sel)
1521 {
1522 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1523
1524 if (other_tc == env->current_tc)
1525 return env->active_tc.LO[sel];
1526 else
1527 return env->tcs[other_tc].LO[sel];
1528 }
1529
1530 target_ulong helper_mfthi(uint32_t sel)
1531 {
1532 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1533
1534 if (other_tc == env->current_tc)
1535 return env->active_tc.HI[sel];
1536 else
1537 return env->tcs[other_tc].HI[sel];
1538 }
1539
1540 target_ulong helper_mftacx(uint32_t sel)
1541 {
1542 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1543
1544 if (other_tc == env->current_tc)
1545 return env->active_tc.ACX[sel];
1546 else
1547 return env->tcs[other_tc].ACX[sel];
1548 }
1549
1550 target_ulong helper_mftdsp(void)
1551 {
1552 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1553
1554 if (other_tc == env->current_tc)
1555 return env->active_tc.DSPControl;
1556 else
1557 return env->tcs[other_tc].DSPControl;
1558 }
1559
1560 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1561 {
1562 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1563
1564 if (other_tc == env->current_tc)
1565 env->active_tc.gpr[sel] = arg1;
1566 else
1567 env->tcs[other_tc].gpr[sel] = arg1;
1568 }
1569
1570 void helper_mttlo(target_ulong arg1, uint32_t sel)
1571 {
1572 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1573
1574 if (other_tc == env->current_tc)
1575 env->active_tc.LO[sel] = arg1;
1576 else
1577 env->tcs[other_tc].LO[sel] = arg1;
1578 }
1579
1580 void helper_mtthi(target_ulong arg1, uint32_t sel)
1581 {
1582 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1583
1584 if (other_tc == env->current_tc)
1585 env->active_tc.HI[sel] = arg1;
1586 else
1587 env->tcs[other_tc].HI[sel] = arg1;
1588 }
1589
1590 void helper_mttacx(target_ulong arg1, uint32_t sel)
1591 {
1592 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1593
1594 if (other_tc == env->current_tc)
1595 env->active_tc.ACX[sel] = arg1;
1596 else
1597 env->tcs[other_tc].ACX[sel] = arg1;
1598 }
1599
1600 void helper_mttdsp(target_ulong arg1)
1601 {
1602 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1603
1604 if (other_tc == env->current_tc)
1605 env->active_tc.DSPControl = arg1;
1606 else
1607 env->tcs[other_tc].DSPControl = arg1;
1608 }
1609
1610 /* MIPS MT functions */
1611 target_ulong helper_dmt(void)
1612 {
1613 // TODO
1614 return 0;
1615 }
1616
1617 target_ulong helper_emt(void)
1618 {
1619 // TODO
1620 return 0;
1621 }
1622
1623 target_ulong helper_dvpe(void)
1624 {
1625 // TODO
1626 return 0;
1627 }
1628
1629 target_ulong helper_evpe(void)
1630 {
1631 // TODO
1632 return 0;
1633 }
1634 #endif /* !CONFIG_USER_ONLY */
1635
1636 void helper_fork(target_ulong arg1, target_ulong arg2)
1637 {
1638 // arg1 = rt, arg2 = rs
1639 arg1 = 0;
1640 // TODO: store to TC register
1641 }
1642
1643 target_ulong helper_yield(target_ulong arg)
1644 {
1645 target_long arg1 = arg;
1646
1647 if (arg1 < 0) {
1648 /* No scheduling policy implemented. */
1649 if (arg1 != -2) {
1650 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1651 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1652 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1653 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1654 helper_raise_exception(EXCP_THREAD);
1655 }
1656 }
1657 } else if (arg1 == 0) {
1658 if (0 /* TODO: TC underflow */) {
1659 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1660 helper_raise_exception(EXCP_THREAD);
1661 } else {
1662 // TODO: Deallocate TC
1663 }
1664 } else if (arg1 > 0) {
1665 /* Yield qualifier inputs not implemented. */
1666 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1667 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1668 helper_raise_exception(EXCP_THREAD);
1669 }
1670 return env->CP0_YQMask;
1671 }
1672
1673 #ifndef CONFIG_USER_ONLY
1674 /* TLB management */
1675 static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1676 {
1677 /* Flush qemu's TLB and discard all shadowed entries. */
1678 tlb_flush (env, flush_global);
1679 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1680 }
1681
1682 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1683 {
1684 /* Discard entries from env->tlb[first] onwards. */
1685 while (env->tlb->tlb_in_use > first) {
1686 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1687 }
1688 }
1689
1690 static void r4k_fill_tlb (int idx)
1691 {
1692 r4k_tlb_t *tlb;
1693
1694 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1695 tlb = &env->tlb->mmu.r4k.tlb[idx];
1696 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1697 #if defined(TARGET_MIPS64)
1698 tlb->VPN &= env->SEGMask;
1699 #endif
1700 tlb->ASID = env->CP0_EntryHi & 0xFF;
1701 tlb->PageMask = env->CP0_PageMask;
1702 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1703 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1704 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1705 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1706 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1707 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1708 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1709 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1710 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1711 }
1712
1713 void r4k_helper_tlbwi (void)
1714 {
1715 int idx;
1716
1717 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1718
1719 /* Discard cached TLB entries. We could avoid doing this if the
1720 tlbwi is just upgrading access permissions on the current entry;
1721 that might be a further win. */
1722 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1723
1724 r4k_invalidate_tlb(env, idx, 0);
1725 r4k_fill_tlb(idx);
1726 }
1727
1728 void r4k_helper_tlbwr (void)
1729 {
1730 int r = cpu_mips_get_random(env);
1731
1732 r4k_invalidate_tlb(env, r, 1);
1733 r4k_fill_tlb(r);
1734 }
1735
1736 void r4k_helper_tlbp (void)
1737 {
1738 r4k_tlb_t *tlb;
1739 target_ulong mask;
1740 target_ulong tag;
1741 target_ulong VPN;
1742 uint8_t ASID;
1743 int i;
1744
1745 ASID = env->CP0_EntryHi & 0xFF;
1746 for (i = 0; i < env->tlb->nb_tlb; i++) {
1747 tlb = &env->tlb->mmu.r4k.tlb[i];
1748 /* 1k pages are not supported. */
1749 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1750 tag = env->CP0_EntryHi & ~mask;
1751 VPN = tlb->VPN & ~mask;
1752 /* Check ASID, virtual page number & size */
1753 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1754 /* TLB match */
1755 env->CP0_Index = i;
1756 break;
1757 }
1758 }
1759 if (i == env->tlb->nb_tlb) {
1760 /* No match. Discard any shadow entries, if any of them match. */
1761 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1762 tlb = &env->tlb->mmu.r4k.tlb[i];
1763 /* 1k pages are not supported. */
1764 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1765 tag = env->CP0_EntryHi & ~mask;
1766 VPN = tlb->VPN & ~mask;
1767 /* Check ASID, virtual page number & size */
1768 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1769 r4k_mips_tlb_flush_extra (env, i);
1770 break;
1771 }
1772 }
1773
1774 env->CP0_Index |= 0x80000000;
1775 }
1776 }
1777
1778 void r4k_helper_tlbr (void)
1779 {
1780 r4k_tlb_t *tlb;
1781 uint8_t ASID;
1782 int idx;
1783
1784 ASID = env->CP0_EntryHi & 0xFF;
1785 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1786 tlb = &env->tlb->mmu.r4k.tlb[idx];
1787
1788 /* If this will change the current ASID, flush qemu's TLB. */
1789 if (ASID != tlb->ASID)
1790 cpu_mips_tlb_flush (env, 1);
1791
1792 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1793
1794 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1795 env->CP0_PageMask = tlb->PageMask;
1796 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1797 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1798 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1799 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1800 }
1801
1802 void helper_tlbwi(void)
1803 {
1804 env->tlb->helper_tlbwi();
1805 }
1806
1807 void helper_tlbwr(void)
1808 {
1809 env->tlb->helper_tlbwr();
1810 }
1811
1812 void helper_tlbp(void)
1813 {
1814 env->tlb->helper_tlbp();
1815 }
1816
1817 void helper_tlbr(void)
1818 {
1819 env->tlb->helper_tlbr();
1820 }
1821
1822 /* Specials */
1823 target_ulong helper_di (void)
1824 {
1825 target_ulong t0 = env->CP0_Status;
1826
1827 env->CP0_Status = t0 & ~(1 << CP0St_IE);
1828 return t0;
1829 }
1830
1831 target_ulong helper_ei (void)
1832 {
1833 target_ulong t0 = env->CP0_Status;
1834
1835 env->CP0_Status = t0 | (1 << CP0St_IE);
1836 return t0;
1837 }
1838
1839 static void debug_pre_eret (void)
1840 {
1841 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1842 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1843 env->active_tc.PC, env->CP0_EPC);
1844 if (env->CP0_Status & (1 << CP0St_ERL))
1845 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1846 if (env->hflags & MIPS_HFLAG_DM)
1847 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1848 qemu_log("\n");
1849 }
1850 }
1851
1852 static void debug_post_eret (void)
1853 {
1854 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1855 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1856 env->active_tc.PC, env->CP0_EPC);
1857 if (env->CP0_Status & (1 << CP0St_ERL))
1858 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1859 if (env->hflags & MIPS_HFLAG_DM)
1860 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1861 switch (env->hflags & MIPS_HFLAG_KSU) {
1862 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1863 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1864 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1865 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1866 }
1867 }
1868 }
1869
1870 static void set_pc (target_ulong error_pc)
1871 {
1872 env->active_tc.PC = error_pc & ~(target_ulong)1;
1873 if (error_pc & 1) {
1874 env->hflags |= MIPS_HFLAG_M16;
1875 } else {
1876 env->hflags &= ~(MIPS_HFLAG_M16);
1877 }
1878 }
1879
1880 void helper_eret (void)
1881 {
1882 debug_pre_eret();
1883 if (env->CP0_Status & (1 << CP0St_ERL)) {
1884 set_pc(env->CP0_ErrorEPC);
1885 env->CP0_Status &= ~(1 << CP0St_ERL);
1886 } else {
1887 set_pc(env->CP0_EPC);
1888 env->CP0_Status &= ~(1 << CP0St_EXL);
1889 }
1890 compute_hflags(env);
1891 debug_post_eret();
1892 env->lladdr = 1;
1893 }
1894
1895 void helper_deret (void)
1896 {
1897 debug_pre_eret();
1898 set_pc(env->CP0_DEPC);
1899
1900 env->hflags &= MIPS_HFLAG_DM;
1901 compute_hflags(env);
1902 debug_post_eret();
1903 env->lladdr = 1;
1904 }
1905 #endif /* !CONFIG_USER_ONLY */
1906
1907 target_ulong helper_rdhwr_cpunum(void)
1908 {
1909 if ((env->hflags & MIPS_HFLAG_CP0) ||
1910 (env->CP0_HWREna & (1 << 0)))
1911 return env->CP0_EBase & 0x3ff;
1912 else
1913 helper_raise_exception(EXCP_RI);
1914
1915 return 0;
1916 }
1917
1918 target_ulong helper_rdhwr_synci_step(void)
1919 {
1920 if ((env->hflags & MIPS_HFLAG_CP0) ||
1921 (env->CP0_HWREna & (1 << 1)))
1922 return env->SYNCI_Step;
1923 else
1924 helper_raise_exception(EXCP_RI);
1925
1926 return 0;
1927 }
1928
1929 target_ulong helper_rdhwr_cc(void)
1930 {
1931 if ((env->hflags & MIPS_HFLAG_CP0) ||
1932 (env->CP0_HWREna & (1 << 2)))
1933 return env->CP0_Count;
1934 else
1935 helper_raise_exception(EXCP_RI);
1936
1937 return 0;
1938 }
1939
1940 target_ulong helper_rdhwr_ccres(void)
1941 {
1942 if ((env->hflags & MIPS_HFLAG_CP0) ||
1943 (env->CP0_HWREna & (1 << 3)))
1944 return env->CCRes;
1945 else
1946 helper_raise_exception(EXCP_RI);
1947
1948 return 0;
1949 }
1950
1951 void helper_pmon (int function)
1952 {
1953 function /= 2;
1954 switch (function) {
1955 case 2: /* TODO: char inbyte(int waitflag); */
1956 if (env->active_tc.gpr[4] == 0)
1957 env->active_tc.gpr[2] = -1;
1958 /* Fall through */
1959 case 11: /* TODO: char inbyte (void); */
1960 env->active_tc.gpr[2] = -1;
1961 break;
1962 case 3:
1963 case 12:
1964 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1965 break;
1966 case 17:
1967 break;
1968 case 158:
1969 {
1970 unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1971 printf("%s", fmt);
1972 }
1973 break;
1974 }
1975 }
1976
1977 void helper_wait (void)
1978 {
1979 env->halted = 1;
1980 helper_raise_exception(EXCP_HLT);
1981 }
1982
1983 #if !defined(CONFIG_USER_ONLY)
1984
1985 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1986
1987 #define MMUSUFFIX _mmu
1988 #define ALIGNED_ONLY
1989
1990 #define SHIFT 0
1991 #include "softmmu_template.h"
1992
1993 #define SHIFT 1
1994 #include "softmmu_template.h"
1995
1996 #define SHIFT 2
1997 #include "softmmu_template.h"
1998
1999 #define SHIFT 3
2000 #include "softmmu_template.h"
2001
2002 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2003 {
2004 env->CP0_BadVAddr = addr;
2005 do_restore_state (retaddr);
2006 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2007 }
2008
2009 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2010 {
2011 TranslationBlock *tb;
2012 CPUState *saved_env;
2013 unsigned long pc;
2014 int ret;
2015
2016 /* XXX: hack to restore env in all cases, even if not called from
2017 generated code */
2018 saved_env = env;
2019 env = cpu_single_env;
2020 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2021 if (ret) {
2022 if (retaddr) {
2023 /* now we have a real cpu fault */
2024 pc = (unsigned long)retaddr;
2025 tb = tb_find_pc(pc);
2026 if (tb) {
2027 /* the PC is inside the translated code. It means that we have
2028 a virtual CPU fault */
2029 cpu_restore_state(tb, env, pc);
2030 }
2031 }
2032 helper_raise_exception_err(env->exception_index, env->error_code);
2033 }
2034 env = saved_env;
2035 }
2036
2037 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2038 int is_write, int is_exec, int unused, int size)
2039 {
2040 env = env1;
2041
2042 if (is_exec)
2043 helper_raise_exception(EXCP_IBE);
2044 else
2045 helper_raise_exception(EXCP_DBE);
2046 }
2047 #endif /* !CONFIG_USER_ONLY */
2048
2049 /* Complex FPU operations which may need stack space. */
2050
2051 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2052 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2053 #define FLOAT_TWO32 make_float32(1 << 30)
2054 #define FLOAT_TWO64 make_float64(1ULL << 62)
2055 #define FLOAT_QNAN32 0x7fbfffff
2056 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2057 #define FLOAT_SNAN32 0x7fffffff
2058 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2059
2060 /* convert MIPS rounding mode in FCR31 to IEEE library */
2061 static unsigned int ieee_rm[] = {
2062 float_round_nearest_even,
2063 float_round_to_zero,
2064 float_round_up,
2065 float_round_down
2066 };
2067
2068 #define RESTORE_ROUNDING_MODE \
2069 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2070
2071 #define RESTORE_FLUSH_MODE \
2072 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2073
2074 target_ulong helper_cfc1 (uint32_t reg)
2075 {
2076 target_ulong arg1;
2077
2078 switch (reg) {
2079 case 0:
2080 arg1 = (int32_t)env->active_fpu.fcr0;
2081 break;
2082 case 25:
2083 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2084 break;
2085 case 26:
2086 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2087 break;
2088 case 28:
2089 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2090 break;
2091 default:
2092 arg1 = (int32_t)env->active_fpu.fcr31;
2093 break;
2094 }
2095
2096 return arg1;
2097 }
2098
2099 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2100 {
2101 switch(reg) {
2102 case 25:
2103 if (arg1 & 0xffffff00)
2104 return;
2105 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2106 ((arg1 & 0x1) << 23);
2107 break;
2108 case 26:
2109 if (arg1 & 0x007c0000)
2110 return;
2111 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2112 break;
2113 case 28:
2114 if (arg1 & 0x007c0000)
2115 return;
2116 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2117 ((arg1 & 0x4) << 22);
2118 break;
2119 case 31:
2120 if (arg1 & 0x007c0000)
2121 return;
2122 env->active_fpu.fcr31 = arg1;
2123 break;
2124 default:
2125 return;
2126 }
2127 /* set rounding mode */
2128 RESTORE_ROUNDING_MODE;
2129 /* set flush-to-zero mode */
2130 RESTORE_FLUSH_MODE;
2131 set_float_exception_flags(0, &env->active_fpu.fp_status);
2132 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2133 helper_raise_exception(EXCP_FPE);
2134 }
2135
2136 static inline int ieee_ex_to_mips(int xcpt)
2137 {
2138 int ret = 0;
2139 if (xcpt) {
2140 if (xcpt & float_flag_invalid) {
2141 ret |= FP_INVALID;
2142 }
2143 if (xcpt & float_flag_overflow) {
2144 ret |= FP_OVERFLOW;
2145 }
2146 if (xcpt & float_flag_underflow) {
2147 ret |= FP_UNDERFLOW;
2148 }
2149 if (xcpt & float_flag_divbyzero) {
2150 ret |= FP_DIV0;
2151 }
2152 if (xcpt & float_flag_inexact) {
2153 ret |= FP_INEXACT;
2154 }
2155 }
2156 return ret;
2157 }
2158
2159 static inline void update_fcr31(void)
2160 {
2161 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2162
2163 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2164 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2165 helper_raise_exception(EXCP_FPE);
2166 else
2167 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2168 }
2169
2170 /* Float support.
2171 Single precition routines have a "s" suffix, double precision a
2172 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2173 paired single lower "pl", paired single upper "pu". */
2174
2175 /* unary operations, modifying fp status */
2176 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2177 {
2178 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2179 }
2180
2181 uint32_t helper_float_sqrt_s(uint32_t fst0)
2182 {
2183 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2184 }
2185
2186 uint64_t helper_float_cvtd_s(uint32_t fst0)
2187 {
2188 uint64_t fdt2;
2189
2190 set_float_exception_flags(0, &env->active_fpu.fp_status);
2191 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2192 update_fcr31();
2193 return fdt2;
2194 }
2195
2196 uint64_t helper_float_cvtd_w(uint32_t wt0)
2197 {
2198 uint64_t fdt2;
2199
2200 set_float_exception_flags(0, &env->active_fpu.fp_status);
2201 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2202 update_fcr31();
2203 return fdt2;
2204 }
2205
2206 uint64_t helper_float_cvtd_l(uint64_t dt0)
2207 {
2208 uint64_t fdt2;
2209
2210 set_float_exception_flags(0, &env->active_fpu.fp_status);
2211 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2212 update_fcr31();
2213 return fdt2;
2214 }
2215
2216 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2217 {
2218 uint64_t dt2;
2219
2220 set_float_exception_flags(0, &env->active_fpu.fp_status);
2221 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2222 update_fcr31();
2223 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2224 dt2 = FLOAT_SNAN64;
2225 return dt2;
2226 }
2227
2228 uint64_t helper_float_cvtl_s(uint32_t fst0)
2229 {
2230 uint64_t dt2;
2231
2232 set_float_exception_flags(0, &env->active_fpu.fp_status);
2233 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2234 update_fcr31();
2235 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2236 dt2 = FLOAT_SNAN64;
2237 return dt2;
2238 }
2239
2240 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2241 {
2242 uint32_t fst2;
2243 uint32_t fsth2;
2244
2245 set_float_exception_flags(0, &env->active_fpu.fp_status);
2246 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2247 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2248 update_fcr31();
2249 return ((uint64_t)fsth2 << 32) | fst2;
2250 }
2251
2252 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2253 {
2254 uint32_t wt2;
2255 uint32_t wth2;
2256
2257 set_float_exception_flags(0, &env->active_fpu.fp_status);
2258 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2259 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2260 update_fcr31();
2261 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2262 wt2 = FLOAT_SNAN32;
2263 wth2 = FLOAT_SNAN32;
2264 }
2265 return ((uint64_t)wth2 << 32) | wt2;
2266 }
2267
2268 uint32_t helper_float_cvts_d(uint64_t fdt0)
2269 {
2270 uint32_t fst2;
2271
2272 set_float_exception_flags(0, &env->active_fpu.fp_status);
2273 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2274 update_fcr31();
2275 return fst2;
2276 }
2277
2278 uint32_t helper_float_cvts_w(uint32_t wt0)
2279 {
2280 uint32_t fst2;
2281
2282 set_float_exception_flags(0, &env->active_fpu.fp_status);
2283 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2284 update_fcr31();
2285 return fst2;
2286 }
2287
2288 uint32_t helper_float_cvts_l(uint64_t dt0)
2289 {
2290 uint32_t fst2;
2291
2292 set_float_exception_flags(0, &env->active_fpu.fp_status);
2293 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2294 update_fcr31();
2295 return fst2;
2296 }
2297
2298 uint32_t helper_float_cvts_pl(uint32_t wt0)
2299 {
2300 uint32_t wt2;
2301
2302 set_float_exception_flags(0, &env->active_fpu.fp_status);
2303 wt2 = wt0;
2304 update_fcr31();
2305 return wt2;
2306 }
2307
2308 uint32_t helper_float_cvts_pu(uint32_t wth0)
2309 {
2310 uint32_t wt2;
2311
2312 set_float_exception_flags(0, &env->active_fpu.fp_status);
2313 wt2 = wth0;
2314 update_fcr31();
2315 return wt2;
2316 }
2317
2318 uint32_t helper_float_cvtw_s(uint32_t fst0)
2319 {
2320 uint32_t wt2;
2321
2322 set_float_exception_flags(0, &env->active_fpu.fp_status);
2323 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2324 update_fcr31();
2325 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2326 wt2 = FLOAT_SNAN32;
2327 return wt2;
2328 }
2329
2330 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2331 {
2332 uint32_t wt2;
2333
2334 set_float_exception_flags(0, &env->active_fpu.fp_status);
2335 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2336 update_fcr31();
2337 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2338 wt2 = FLOAT_SNAN32;
2339 return wt2;
2340 }
2341
2342 uint64_t helper_float_roundl_d(uint64_t fdt0)
2343 {
2344 uint64_t dt2;
2345
2346 set_float_exception_flags(0, &env->active_fpu.fp_status);
2347 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2348 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2349 RESTORE_ROUNDING_MODE;
2350 update_fcr31();
2351 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2352 dt2 = FLOAT_SNAN64;
2353 return dt2;
2354 }
2355
2356 uint64_t helper_float_roundl_s(uint32_t fst0)
2357 {
2358 uint64_t dt2;
2359
2360 set_float_exception_flags(0, &env->active_fpu.fp_status);
2361 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2362 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2363 RESTORE_ROUNDING_MODE;
2364 update_fcr31();
2365 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2366 dt2 = FLOAT_SNAN64;
2367 return dt2;
2368 }
2369
2370 uint32_t helper_float_roundw_d(uint64_t fdt0)
2371 {
2372 uint32_t wt2;
2373
2374 set_float_exception_flags(0, &env->active_fpu.fp_status);
2375 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2376 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2377 RESTORE_ROUNDING_MODE;
2378 update_fcr31();
2379 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2380 wt2 = FLOAT_SNAN32;
2381 return wt2;
2382 }
2383
2384 uint32_t helper_float_roundw_s(uint32_t fst0)
2385 {
2386 uint32_t wt2;
2387
2388 set_float_exception_flags(0, &env->active_fpu.fp_status);
2389 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2390 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2391 RESTORE_ROUNDING_MODE;
2392 update_fcr31();
2393 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2394 wt2 = FLOAT_SNAN32;
2395 return wt2;
2396 }
2397
2398 uint64_t helper_float_truncl_d(uint64_t fdt0)
2399 {
2400 uint64_t dt2;
2401
2402 set_float_exception_flags(0, &env->active_fpu.fp_status);
2403 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2404 update_fcr31();
2405 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2406 dt2 = FLOAT_SNAN64;
2407 return dt2;
2408 }
2409
2410 uint64_t helper_float_truncl_s(uint32_t fst0)
2411 {
2412 uint64_t dt2;
2413
2414 set_float_exception_flags(0, &env->active_fpu.fp_status);
2415 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2416 update_fcr31();
2417 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2418 dt2 = FLOAT_SNAN64;
2419 return dt2;
2420 }
2421
2422 uint32_t helper_float_truncw_d(uint64_t fdt0)
2423 {
2424 uint32_t wt2;
2425
2426 set_float_exception_flags(0, &env->active_fpu.fp_status);
2427 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2428 update_fcr31();
2429 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2430 wt2 = FLOAT_SNAN32;
2431 return wt2;
2432 }
2433
2434 uint32_t helper_float_truncw_s(uint32_t fst0)
2435 {
2436 uint32_t wt2;
2437
2438 set_float_exception_flags(0, &env->active_fpu.fp_status);
2439 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2440 update_fcr31();
2441 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2442 wt2 = FLOAT_SNAN32;
2443 return wt2;
2444 }
2445
2446 uint64_t helper_float_ceill_d(uint64_t fdt0)
2447 {
2448 uint64_t dt2;
2449
2450 set_float_exception_flags(0, &env->active_fpu.fp_status);
2451 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2452 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2453 RESTORE_ROUNDING_MODE;
2454 update_fcr31();
2455 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2456 dt2 = FLOAT_SNAN64;
2457 return dt2;
2458 }
2459
2460 uint64_t helper_float_ceill_s(uint32_t fst0)
2461 {
2462 uint64_t dt2;
2463
2464 set_float_exception_flags(0, &env->active_fpu.fp_status);
2465 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2466 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2467 RESTORE_ROUNDING_MODE;
2468 update_fcr31();
2469 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2470 dt2 = FLOAT_SNAN64;
2471 return dt2;
2472 }
2473
2474 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2475 {
2476 uint32_t wt2;
2477
2478 set_float_exception_flags(0, &env->active_fpu.fp_status);
2479 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2480 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2481 RESTORE_ROUNDING_MODE;
2482 update_fcr31();
2483 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2484 wt2 = FLOAT_SNAN32;
2485 return wt2;
2486 }
2487
2488 uint32_t helper_float_ceilw_s(uint32_t fst0)
2489 {
2490 uint32_t wt2;
2491
2492 set_float_exception_flags(0, &env->active_fpu.fp_status);
2493 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2494 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2495 RESTORE_ROUNDING_MODE;
2496 update_fcr31();
2497 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2498 wt2 = FLOAT_SNAN32;
2499 return wt2;
2500 }
2501
2502 uint64_t helper_float_floorl_d(uint64_t fdt0)
2503 {
2504 uint64_t dt2;
2505
2506 set_float_exception_flags(0, &env->active_fpu.fp_status);
2507 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2508 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2509 RESTORE_ROUNDING_MODE;
2510 update_fcr31();
2511 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2512 dt2 = FLOAT_SNAN64;
2513 return dt2;
2514 }
2515
2516 uint64_t helper_float_floorl_s(uint32_t fst0)
2517 {
2518 uint64_t dt2;
2519
2520 set_float_exception_flags(0, &env->active_fpu.fp_status);
2521 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2522 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2523 RESTORE_ROUNDING_MODE;
2524 update_fcr31();
2525 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2526 dt2 = FLOAT_SNAN64;
2527 return dt2;
2528 }
2529
2530 uint32_t helper_float_floorw_d(uint64_t fdt0)
2531 {
2532 uint32_t wt2;
2533
2534 set_float_exception_flags(0, &env->active_fpu.fp_status);
2535 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2536 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2537 RESTORE_ROUNDING_MODE;
2538 update_fcr31();
2539 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2540 wt2 = FLOAT_SNAN32;
2541 return wt2;
2542 }
2543
2544 uint32_t helper_float_floorw_s(uint32_t fst0)
2545 {
2546 uint32_t wt2;
2547
2548 set_float_exception_flags(0, &env->active_fpu.fp_status);
2549 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2550 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2551 RESTORE_ROUNDING_MODE;
2552 update_fcr31();
2553 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2554 wt2 = FLOAT_SNAN32;
2555 return wt2;
2556 }
2557
2558 /* unary operations, not modifying fp status */
2559 #define FLOAT_UNOP(name) \
2560 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2561 { \
2562 return float64_ ## name(fdt0); \
2563 } \
2564 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2565 { \
2566 return float32_ ## name(fst0); \
2567 } \
2568 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2569 { \
2570 uint32_t wt0; \
2571 uint32_t wth0; \
2572 \
2573 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2574 wth0 = float32_ ## name(fdt0 >> 32); \
2575 return ((uint64_t)wth0 << 32) | wt0; \
2576 }
2577 FLOAT_UNOP(abs)
2578 FLOAT_UNOP(chs)
2579 #undef FLOAT_UNOP
2580
2581 /* MIPS specific unary operations */
2582 uint64_t helper_float_recip_d(uint64_t fdt0)
2583 {
2584 uint64_t fdt2;
2585
2586 set_float_exception_flags(0, &env->active_fpu.fp_status);
2587 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2588 update_fcr31();
2589 return fdt2;
2590 }
2591
2592 uint32_t helper_float_recip_s(uint32_t fst0)
2593 {
2594 uint32_t fst2;
2595
2596 set_float_exception_flags(0, &env->active_fpu.fp_status);
2597 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2598 update_fcr31();
2599 return fst2;
2600 }
2601
2602 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2603 {
2604 uint64_t fdt2;
2605
2606 set_float_exception_flags(0, &env->active_fpu.fp_status);
2607 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2608 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2609 update_fcr31();
2610 return fdt2;
2611 }
2612
2613 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2614 {
2615 uint32_t fst2;
2616
2617 set_float_exception_flags(0, &env->active_fpu.fp_status);
2618 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2619 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2620 update_fcr31();
2621 return fst2;
2622 }
2623
2624 uint64_t helper_float_recip1_d(uint64_t fdt0)
2625 {
2626 uint64_t fdt2;
2627
2628 set_float_exception_flags(0, &env->active_fpu.fp_status);
2629 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2630 update_fcr31();
2631 return fdt2;
2632 }
2633
2634 uint32_t helper_float_recip1_s(uint32_t fst0)
2635 {
2636 uint32_t fst2;
2637
2638 set_float_exception_flags(0, &env->active_fpu.fp_status);
2639 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2640 update_fcr31();
2641 return fst2;
2642 }
2643
2644 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2645 {
2646 uint32_t fst2;
2647 uint32_t fsth2;
2648
2649 set_float_exception_flags(0, &env->active_fpu.fp_status);
2650 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2651 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2652 update_fcr31();
2653 return ((uint64_t)fsth2 << 32) | fst2;
2654 }
2655
2656 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2657 {
2658 uint64_t fdt2;
2659
2660 set_float_exception_flags(0, &env->active_fpu.fp_status);
2661 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2662 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2663 update_fcr31();
2664 return fdt2;
2665 }
2666
2667 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2668 {
2669 uint32_t fst2;
2670
2671 set_float_exception_flags(0, &env->active_fpu.fp_status);
2672 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2673 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2674 update_fcr31();
2675 return fst2;
2676 }
2677
2678 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2679 {
2680 uint32_t fst2;
2681 uint32_t fsth2;
2682
2683 set_float_exception_flags(0, &env->active_fpu.fp_status);
2684 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2685 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2686 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2687 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2688 update_fcr31();
2689 return ((uint64_t)fsth2 << 32) | fst2;
2690 }
2691
2692 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2693
2694 /* binary operations */
2695 #define FLOAT_BINOP(name) \
2696 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
2697 { \
2698 uint64_t dt2; \
2699 \
2700 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2701 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
2702 update_fcr31(); \
2703 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2704 dt2 = FLOAT_QNAN64; \
2705 return dt2; \
2706 } \
2707 \
2708 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
2709 { \
2710 uint32_t wt2; \
2711 \
2712 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2713 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2714 update_fcr31(); \
2715 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2716 wt2 = FLOAT_QNAN32; \
2717 return wt2; \
2718 } \
2719 \
2720 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
2721 { \
2722 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2723 uint32_t fsth0 = fdt0 >> 32; \
2724 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2725 uint32_t fsth1 = fdt1 >> 32; \
2726 uint32_t wt2; \
2727 uint32_t wth2; \
2728 \
2729 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2730 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2731 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
2732 update_fcr31(); \
2733 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
2734 wt2 = FLOAT_QNAN32; \
2735 wth2 = FLOAT_QNAN32; \
2736 } \
2737 return ((uint64_t)wth2 << 32) | wt2; \
2738 }
2739
2740 FLOAT_BINOP(add)
2741 FLOAT_BINOP(sub)
2742 FLOAT_BINOP(mul)
2743 FLOAT_BINOP(div)
2744 #undef FLOAT_BINOP
2745
2746 /* ternary operations */
2747 #define FLOAT_TERNOP(name1, name2) \
2748 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2749 uint64_t fdt2) \
2750 { \
2751 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2752 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2753 } \
2754 \
2755 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2756 uint32_t fst2) \
2757 { \
2758 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2759 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2760 } \
2761 \
2762 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2763 uint64_t fdt2) \
2764 { \
2765 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2766 uint32_t fsth0 = fdt0 >> 32; \
2767 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2768 uint32_t fsth1 = fdt1 >> 32; \
2769 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
2770 uint32_t fsth2 = fdt2 >> 32; \
2771 \
2772 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2773 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
2774 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2775 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
2776 return ((uint64_t)fsth2 << 32) | fst2; \
2777 }
2778
2779 FLOAT_TERNOP(mul, add)
2780 FLOAT_TERNOP(mul, sub)
2781 #undef FLOAT_TERNOP
2782
2783 /* negated ternary operations */
2784 #define FLOAT_NTERNOP(name1, name2) \
2785 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2786 uint64_t fdt2) \
2787 { \
2788 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2789 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2790 return float64_chs(fdt2); \
2791 } \
2792 \
2793 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2794 uint32_t fst2) \
2795 { \
2796 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2797 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2798 return float32_chs(fst2); \
2799 } \
2800 \
2801 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2802 uint64_t fdt2) \
2803 { \
2804 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2805 uint32_t fsth0 = fdt0 >> 32; \
2806 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2807 uint32_t fsth1 = fdt1 >> 32; \
2808 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
2809 uint32_t fsth2 = fdt2 >> 32; \
2810 \
2811 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2812 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
2813 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2814 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
2815 fst2 = float32_chs(fst2); \
2816 fsth2 = float32_chs(fsth2); \
2817 return ((uint64_t)fsth2 << 32) | fst2; \
2818 }
2819
2820 FLOAT_NTERNOP(mul, add)
2821 FLOAT_NTERNOP(mul, sub)
2822 #undef FLOAT_NTERNOP
2823
2824 /* MIPS specific binary operations */
2825 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2826 {
2827 set_float_exception_flags(0, &env->active_fpu.fp_status);
2828 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2829 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2830 update_fcr31();
2831 return fdt2;
2832 }
2833
2834 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2835 {
2836 set_float_exception_flags(0, &env->active_fpu.fp_status);
2837 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2838 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2839 update_fcr31();
2840 return fst2;
2841 }
2842
2843 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2844 {
2845 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2846 uint32_t fsth0 = fdt0 >> 32;
2847 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2848 uint32_t fsth2 = fdt2 >> 32;
2849
2850 set_float_exception_flags(0, &env->active_fpu.fp_status);
2851 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2852 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2853 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2854 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2855 update_fcr31();
2856 return ((uint64_t)fsth2 << 32) | fst2;
2857 }
2858
2859 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2860 {
2861 set_float_exception_flags(0, &env->active_fpu.fp_status);
2862 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2863 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2864 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2865 update_fcr31();
2866 return fdt2;
2867 }
2868
2869 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2870 {
2871 set_float_exception_flags(0, &env->active_fpu.fp_status);
2872 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2873 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2874 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2875 update_fcr31();
2876 return fst2;
2877 }
2878
2879 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2880 {
2881 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2882 uint32_t fsth0 = fdt0 >> 32;
2883 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2884 uint32_t fsth2 = fdt2 >> 32;
2885
2886 set_float_exception_flags(0, &env->active_fpu.fp_status);
2887 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2888 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2889 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2890 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2891 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2892 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2893 update_fcr31();
2894 return ((uint64_t)fsth2 << 32) | fst2;
2895 }
2896
2897 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2898 {
2899 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2900 uint32_t fsth0 = fdt0 >> 32;
2901 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2902 uint32_t fsth1 = fdt1 >> 32;
2903 uint32_t fst2;
2904 uint32_t fsth2;
2905
2906 set_float_exception_flags(0, &env->active_fpu.fp_status);
2907 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2908 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2909 update_fcr31();
2910 return ((uint64_t)fsth2 << 32) | fst2;
2911 }
2912
2913 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2914 {
2915 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2916 uint32_t fsth0 = fdt0 >> 32;
2917 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2918 uint32_t fsth1 = fdt1 >> 32;
2919 uint32_t fst2;
2920 uint32_t fsth2;
2921
2922 set_float_exception_flags(0, &env->active_fpu.fp_status);
2923 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2924 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2925 update_fcr31();
2926 return ((uint64_t)fsth2 << 32) | fst2;
2927 }
2928
2929 /* compare operations */
2930 #define FOP_COND_D(op, cond) \
2931 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2932 { \
2933 int c; \
2934 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2935 c = cond; \
2936 update_fcr31(); \
2937 if (c) \
2938 SET_FP_COND(cc, env->active_fpu); \
2939 else \
2940 CLEAR_FP_COND(cc, env->active_fpu); \
2941 } \
2942 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2943 { \
2944 int c; \
2945 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2946 fdt0 = float64_abs(fdt0); \
2947 fdt1 = float64_abs(fdt1); \
2948 c = cond; \
2949 update_fcr31(); \
2950 if (c) \
2951 SET_FP_COND(cc, env->active_fpu); \
2952 else \
2953 CLEAR_FP_COND(cc, env->active_fpu); \
2954 }
2955
2956 /* NOTE: the comma operator will make "cond" to eval to false,
2957 * but float64_unordered_quiet() is still called. */
2958 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
2959 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
2960 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2961 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2962 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2963 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2964 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2965 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2966 /* NOTE: the comma operator will make "cond" to eval to false,
2967 * but float64_unordered() is still called. */
2968 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
2969 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
2970 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2971 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2972 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2973 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2974 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2975 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2976
2977 #define FOP_COND_S(op, cond) \
2978 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2979 { \
2980 int c; \
2981 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2982 c = cond; \
2983 update_fcr31(); \
2984 if (c) \
2985 SET_FP_COND(cc, env->active_fpu); \
2986 else \
2987 CLEAR_FP_COND(cc, env->active_fpu); \
2988 } \
2989 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2990 { \
2991 int c; \
2992 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2993 fst0 = float32_abs(fst0); \
2994 fst1 = float32_abs(fst1); \
2995 c = cond; \
2996 update_fcr31(); \
2997 if (c) \
2998 SET_FP_COND(cc, env->active_fpu); \
2999 else \
3000 CLEAR_FP_COND(cc, env->active_fpu); \
3001 }
3002
3003 /* NOTE: the comma operator will make "cond" to eval to false,
3004 * but float32_unordered_quiet() is still called. */
3005 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3006 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3007 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3008 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3009 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3010 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3011 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3012 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3013 /* NOTE: the comma operator will make "cond" to eval to false,
3014 * but float32_unordered() is still called. */
3015 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3016 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3017 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3018 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3019 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3020 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3021 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3022 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3023
3024 #define FOP_COND_PS(op, condl, condh) \
3025 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3026 { \
3027 uint32_t fst0, fsth0, fst1, fsth1; \
3028 int ch, cl; \
3029 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3030 fst0 = fdt0 & 0XFFFFFFFF; \
3031 fsth0 = fdt0 >> 32; \
3032 fst1 = fdt1 & 0XFFFFFFFF; \
3033 fsth1 = fdt1 >> 32; \
3034 cl = condl; \
3035 ch = condh; \
3036 update_fcr31(); \
3037 if (cl) \
3038 SET_FP_COND(cc, env->active_fpu); \
3039 else \
3040 CLEAR_FP_COND(cc, env->active_fpu); \
3041 if (ch) \
3042 SET_FP_COND(cc + 1, env->active_fpu); \
3043 else \
3044 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3045 } \
3046 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3047 { \
3048 uint32_t fst0, fsth0, fst1, fsth1; \
3049 int ch, cl; \
3050 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3051 fsth0 = float32_abs(fdt0 >> 32); \
3052 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3053 fsth1 = float32_abs(fdt1 >> 32); \
3054 cl = condl; \
3055 ch = condh; \
3056 update_fcr31(); \
3057 if (cl) \
3058 SET_FP_COND(cc, env->active_fpu); \
3059 else \
3060 CLEAR_FP_COND(cc, env->active_fpu); \
3061 if (ch) \
3062 SET_FP_COND(cc + 1, env->active_fpu); \
3063 else \
3064 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3065 }
3066
3067 /* NOTE: the comma operator will make "cond" to eval to false,
3068 * but float32_unordered_quiet() is still called. */
3069 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3070 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3071 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3072 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3073 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3074 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3075 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3076 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3077 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3078 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3079 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3080 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3081 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3082 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3083 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3084 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3085 /* NOTE: the comma operator will make "cond" to eval to false,
3086 * but float32_unordered() is still called. */
3087 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3088 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3089 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3090 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3091 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3092 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3093 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3094 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3095 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3096 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3097 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3098 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3099 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3100 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3101 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3102 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))