]> git.proxmox.com Git - mirror_qemu.git/blob - target-mips/op_helper.c
9d2f99e668cbac4d3f5bbba3a73f1a6723d7a830
[mirror_qemu.git] / target-mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include "exec.h"
22
23 #define GETPC() (__builtin_return_address(0))
24
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27
28 void do_raise_exception_err (uint32_t exception, int error_code)
29 {
30 #if 1
31 if (logfile && exception < 0x100)
32 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33 #endif
34 env->exception_index = exception;
35 env->error_code = error_code;
36 T0 = 0;
37 cpu_loop_exit();
38 }
39
40 void do_raise_exception (uint32_t exception)
41 {
42 do_raise_exception_err(exception, 0);
43 }
44
45 void do_restore_state (void *pc_ptr)
46 {
47 TranslationBlock *tb;
48 unsigned long pc = (unsigned long) pc_ptr;
49
50 tb = tb_find_pc (pc);
51 cpu_restore_state (tb, env, pc, NULL);
52 }
53
54 void do_raise_exception_direct_err (uint32_t exception, int error_code)
55 {
56 do_restore_state (GETPC ());
57 do_raise_exception_err (exception, error_code);
58 }
59
60 void do_raise_exception_direct (uint32_t exception)
61 {
62 do_raise_exception_direct_err (exception, 0);
63 }
64
65 #define MEMSUFFIX _raw
66 #include "op_helper_mem.c"
67 #undef MEMSUFFIX
68 #if !defined(CONFIG_USER_ONLY)
69 #define MEMSUFFIX _user
70 #include "op_helper_mem.c"
71 #undef MEMSUFFIX
72 #define MEMSUFFIX _kernel
73 #include "op_helper_mem.c"
74 #undef MEMSUFFIX
75 #endif
76
77 #ifdef TARGET_MIPS64
78 #if TARGET_LONG_BITS > HOST_LONG_BITS
79 /* Those might call libgcc functions. */
80 void do_dsll (void)
81 {
82 T0 = T0 << T1;
83 }
84
85 void do_dsll32 (void)
86 {
87 T0 = T0 << (T1 + 32);
88 }
89
90 void do_dsra (void)
91 {
92 T0 = (int64_t)T0 >> T1;
93 }
94
95 void do_dsra32 (void)
96 {
97 T0 = (int64_t)T0 >> (T1 + 32);
98 }
99
100 void do_dsrl (void)
101 {
102 T0 = T0 >> T1;
103 }
104
105 void do_dsrl32 (void)
106 {
107 T0 = T0 >> (T1 + 32);
108 }
109
110 void do_drotr (void)
111 {
112 target_ulong tmp;
113
114 if (T1) {
115 tmp = T0 << (0x40 - T1);
116 T0 = (T0 >> T1) | tmp;
117 }
118 }
119
120 void do_drotr32 (void)
121 {
122 target_ulong tmp;
123
124 if (T1) {
125 tmp = T0 << (0x40 - (32 + T1));
126 T0 = (T0 >> (32 + T1)) | tmp;
127 }
128 }
129
130 void do_dsllv (void)
131 {
132 T0 = T1 << (T0 & 0x3F);
133 }
134
135 void do_dsrav (void)
136 {
137 T0 = (int64_t)T1 >> (T0 & 0x3F);
138 }
139
140 void do_dsrlv (void)
141 {
142 T0 = T1 >> (T0 & 0x3F);
143 }
144
145 void do_drotrv (void)
146 {
147 target_ulong tmp;
148
149 T0 &= 0x3F;
150 if (T0) {
151 tmp = T1 << (0x40 - T0);
152 T0 = (T1 >> T0) | tmp;
153 } else
154 T0 = T1;
155 }
156 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
157 #endif /* TARGET_MIPS64 */
158
159 /* 64 bits arithmetic for 32 bits hosts */
160 #if TARGET_LONG_BITS > HOST_LONG_BITS
161 static inline uint64_t get_HILO (void)
162 {
163 return (env->HI[0][env->current_tc] << 32) | (uint32_t)env->LO[0][env->current_tc];
164 }
165
166 static inline void set_HILO (uint64_t HILO)
167 {
168 env->LO[0][env->current_tc] = (int32_t)HILO;
169 env->HI[0][env->current_tc] = (int32_t)(HILO >> 32);
170 }
171
172 void do_mult (void)
173 {
174 set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
175 }
176
177 void do_multu (void)
178 {
179 set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
180 }
181
182 void do_madd (void)
183 {
184 int64_t tmp;
185
186 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
187 set_HILO((int64_t)get_HILO() + tmp);
188 }
189
190 void do_maddu (void)
191 {
192 uint64_t tmp;
193
194 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
195 set_HILO(get_HILO() + tmp);
196 }
197
198 void do_msub (void)
199 {
200 int64_t tmp;
201
202 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
203 set_HILO((int64_t)get_HILO() - tmp);
204 }
205
206 void do_msubu (void)
207 {
208 uint64_t tmp;
209
210 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
211 set_HILO(get_HILO() - tmp);
212 }
213 #endif
214
215 #if HOST_LONG_BITS < 64
216 void do_div (void)
217 {
218 /* 64bit datatypes because we may see overflow/underflow. */
219 if (T1 != 0) {
220 env->LO[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 / (int32_t)T1);
221 env->HI[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 % (int32_t)T1);
222 }
223 }
224 #endif
225
226 #ifdef TARGET_MIPS64
227 void do_ddiv (void)
228 {
229 if (T1 != 0) {
230 lldiv_t res = lldiv((int64_t)T0, (int64_t)T1);
231 env->LO[0][env->current_tc] = res.quot;
232 env->HI[0][env->current_tc] = res.rem;
233 }
234 }
235
236 #if TARGET_LONG_BITS > HOST_LONG_BITS
237 void do_ddivu (void)
238 {
239 if (T1 != 0) {
240 env->LO[0][env->current_tc] = T0 / T1;
241 env->HI[0][env->current_tc] = T0 % T1;
242 }
243 }
244 #endif
245 #endif /* TARGET_MIPS64 */
246
247 #if defined(CONFIG_USER_ONLY)
248 void do_mfc0_random (void)
249 {
250 cpu_abort(env, "mfc0 random\n");
251 }
252
253 void do_mfc0_count (void)
254 {
255 cpu_abort(env, "mfc0 count\n");
256 }
257
258 void cpu_mips_store_count(CPUState *env, uint32_t value)
259 {
260 cpu_abort(env, "mtc0 count\n");
261 }
262
263 void cpu_mips_store_compare(CPUState *env, uint32_t value)
264 {
265 cpu_abort(env, "mtc0 compare\n");
266 }
267
268 void cpu_mips_update_irq(CPUState *env)
269 {
270 cpu_abort(env, "mtc0 status / mtc0 cause\n");
271 }
272
273 void do_mtc0_status_debug(uint32_t old, uint32_t val)
274 {
275 cpu_abort(env, "mtc0 status debug\n");
276 }
277
278 void do_mtc0_status_irqraise_debug (void)
279 {
280 cpu_abort(env, "mtc0 status irqraise debug\n");
281 }
282
283 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
284 {
285 cpu_abort(env, "mips_tlb_flush\n");
286 }
287
288 #else
289
290 /* CP0 helpers */
291 void do_mfc0_random (void)
292 {
293 T0 = (int32_t)cpu_mips_get_random(env);
294 }
295
296 void do_mfc0_count (void)
297 {
298 T0 = (int32_t)cpu_mips_get_count(env);
299 }
300
301 void do_mtc0_status_debug(uint32_t old, uint32_t val)
302 {
303 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
304 old, old & env->CP0_Cause & CP0Ca_IP_mask,
305 val, val & env->CP0_Cause & CP0Ca_IP_mask,
306 env->CP0_Cause);
307 (env->hflags & MIPS_HFLAG_UM) ? fputs(", UM\n", logfile)
308 : fputs("\n", logfile);
309 }
310
311 void do_mtc0_status_irqraise_debug(void)
312 {
313 fprintf(logfile, "Raise pending IRQs\n");
314 }
315
316 void fpu_handle_exception(void)
317 {
318 #ifdef CONFIG_SOFTFLOAT
319 int flags = get_float_exception_flags(&env->fpu->fp_status);
320 unsigned int cpuflags = 0, enable, cause = 0;
321
322 enable = GET_FP_ENABLE(env->fpu->fcr31);
323
324 /* determine current flags */
325 if (flags & float_flag_invalid) {
326 cpuflags |= FP_INVALID;
327 cause |= FP_INVALID & enable;
328 }
329 if (flags & float_flag_divbyzero) {
330 cpuflags |= FP_DIV0;
331 cause |= FP_DIV0 & enable;
332 }
333 if (flags & float_flag_overflow) {
334 cpuflags |= FP_OVERFLOW;
335 cause |= FP_OVERFLOW & enable;
336 }
337 if (flags & float_flag_underflow) {
338 cpuflags |= FP_UNDERFLOW;
339 cause |= FP_UNDERFLOW & enable;
340 }
341 if (flags & float_flag_inexact) {
342 cpuflags |= FP_INEXACT;
343 cause |= FP_INEXACT & enable;
344 }
345 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
346 SET_FP_CAUSE(env->fpu->fcr31, cause);
347 #else
348 SET_FP_FLAGS(env->fpu->fcr31, 0);
349 SET_FP_CAUSE(env->fpu->fcr31, 0);
350 #endif
351 }
352
353 /* TLB management */
354 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
355 {
356 /* Flush qemu's TLB and discard all shadowed entries. */
357 tlb_flush (env, flush_global);
358 env->tlb->tlb_in_use = env->tlb->nb_tlb;
359 }
360
361 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
362 {
363 /* Discard entries from env->tlb[first] onwards. */
364 while (env->tlb->tlb_in_use > first) {
365 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
366 }
367 }
368
369 static void r4k_fill_tlb (int idx)
370 {
371 r4k_tlb_t *tlb;
372
373 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
374 tlb = &env->tlb->mmu.r4k.tlb[idx];
375 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
376 #ifdef TARGET_MIPS64
377 tlb->VPN &= env->SEGMask;
378 #endif
379 tlb->ASID = env->CP0_EntryHi & 0xFF;
380 tlb->PageMask = env->CP0_PageMask;
381 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
382 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
383 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
384 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
385 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
386 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
387 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
388 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
389 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
390 }
391
392 void r4k_do_tlbwi (void)
393 {
394 /* Discard cached TLB entries. We could avoid doing this if the
395 tlbwi is just upgrading access permissions on the current entry;
396 that might be a further win. */
397 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
398
399 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
400 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
401 }
402
403 void r4k_do_tlbwr (void)
404 {
405 int r = cpu_mips_get_random(env);
406
407 r4k_invalidate_tlb(env, r, 1);
408 r4k_fill_tlb(r);
409 }
410
411 void r4k_do_tlbp (void)
412 {
413 r4k_tlb_t *tlb;
414 target_ulong mask;
415 target_ulong tag;
416 target_ulong VPN;
417 uint8_t ASID;
418 int i;
419
420 ASID = env->CP0_EntryHi & 0xFF;
421 for (i = 0; i < env->tlb->nb_tlb; i++) {
422 tlb = &env->tlb->mmu.r4k.tlb[i];
423 /* 1k pages are not supported. */
424 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
425 tag = env->CP0_EntryHi & ~mask;
426 VPN = tlb->VPN & ~mask;
427 /* Check ASID, virtual page number & size */
428 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
429 /* TLB match */
430 env->CP0_Index = i;
431 break;
432 }
433 }
434 if (i == env->tlb->nb_tlb) {
435 /* No match. Discard any shadow entries, if any of them match. */
436 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
437 tlb = &env->tlb->mmu.r4k.tlb[i];
438 /* 1k pages are not supported. */
439 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
440 tag = env->CP0_EntryHi & ~mask;
441 VPN = tlb->VPN & ~mask;
442 /* Check ASID, virtual page number & size */
443 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
444 r4k_mips_tlb_flush_extra (env, i);
445 break;
446 }
447 }
448
449 env->CP0_Index |= 0x80000000;
450 }
451 }
452
453 void r4k_do_tlbr (void)
454 {
455 r4k_tlb_t *tlb;
456 uint8_t ASID;
457
458 ASID = env->CP0_EntryHi & 0xFF;
459 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
460
461 /* If this will change the current ASID, flush qemu's TLB. */
462 if (ASID != tlb->ASID)
463 cpu_mips_tlb_flush (env, 1);
464
465 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
466
467 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
468 env->CP0_PageMask = tlb->PageMask;
469 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
470 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
471 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
472 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
473 }
474
475 #endif /* !CONFIG_USER_ONLY */
476
477 void dump_ldst (const unsigned char *func)
478 {
479 if (loglevel)
480 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
481 }
482
483 void dump_sc (void)
484 {
485 if (loglevel) {
486 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
487 T1, T0, env->CP0_LLAddr);
488 }
489 }
490
491 void debug_pre_eret (void)
492 {
493 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
494 env->PC[env->current_tc], env->CP0_EPC);
495 if (env->CP0_Status & (1 << CP0St_ERL))
496 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
497 if (env->hflags & MIPS_HFLAG_DM)
498 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
499 fputs("\n", logfile);
500 }
501
502 void debug_post_eret (void)
503 {
504 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
505 env->PC[env->current_tc], env->CP0_EPC);
506 if (env->CP0_Status & (1 << CP0St_ERL))
507 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
508 if (env->hflags & MIPS_HFLAG_DM)
509 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
510 if (env->hflags & MIPS_HFLAG_UM)
511 fputs(", UM\n", logfile);
512 else
513 fputs("\n", logfile);
514 }
515
516 void do_pmon (int function)
517 {
518 function /= 2;
519 switch (function) {
520 case 2: /* TODO: char inbyte(int waitflag); */
521 if (env->gpr[4][env->current_tc] == 0)
522 env->gpr[2][env->current_tc] = -1;
523 /* Fall through */
524 case 11: /* TODO: char inbyte (void); */
525 env->gpr[2][env->current_tc] = -1;
526 break;
527 case 3:
528 case 12:
529 printf("%c", (char)(env->gpr[4][env->current_tc] & 0xFF));
530 break;
531 case 17:
532 break;
533 case 158:
534 {
535 unsigned char *fmt = (void *)(unsigned long)env->gpr[4][env->current_tc];
536 printf("%s", fmt);
537 }
538 break;
539 }
540 }
541
542 #if !defined(CONFIG_USER_ONLY)
543
544 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
545
546 #define MMUSUFFIX _mmu
547 #define ALIGNED_ONLY
548
549 #define SHIFT 0
550 #include "softmmu_template.h"
551
552 #define SHIFT 1
553 #include "softmmu_template.h"
554
555 #define SHIFT 2
556 #include "softmmu_template.h"
557
558 #define SHIFT 3
559 #include "softmmu_template.h"
560
561 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
562 {
563 env->CP0_BadVAddr = addr;
564 do_restore_state (retaddr);
565 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
566 }
567
568 void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
569 {
570 TranslationBlock *tb;
571 CPUState *saved_env;
572 unsigned long pc;
573 int ret;
574
575 /* XXX: hack to restore env in all cases, even if not called from
576 generated code */
577 saved_env = env;
578 env = cpu_single_env;
579 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, is_user, 1);
580 if (ret) {
581 if (retaddr) {
582 /* now we have a real cpu fault */
583 pc = (unsigned long)retaddr;
584 tb = tb_find_pc(pc);
585 if (tb) {
586 /* the PC is inside the translated code. It means that we have
587 a virtual CPU fault */
588 cpu_restore_state(tb, env, pc, NULL);
589 }
590 }
591 do_raise_exception_err(env->exception_index, env->error_code);
592 }
593 env = saved_env;
594 }
595
596 #endif
597
598 /* Complex FPU operations which may need stack space. */
599
600 #define FLOAT_SIGN32 (1 << 31)
601 #define FLOAT_SIGN64 (1ULL << 63)
602 #define FLOAT_ONE32 (0x3f8 << 20)
603 #define FLOAT_ONE64 (0x3ffULL << 52)
604 #define FLOAT_TWO32 (1 << 30)
605 #define FLOAT_TWO64 (1ULL << 62)
606
607 /* convert MIPS rounding mode in FCR31 to IEEE library */
608 unsigned int ieee_rm[] = {
609 float_round_nearest_even,
610 float_round_to_zero,
611 float_round_up,
612 float_round_down
613 };
614
615 #define RESTORE_ROUNDING_MODE \
616 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
617
618 void do_cfc1 (int reg)
619 {
620 switch (reg) {
621 case 0:
622 T0 = (int32_t)env->fpu->fcr0;
623 break;
624 case 25:
625 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
626 break;
627 case 26:
628 T0 = env->fpu->fcr31 & 0x0003f07c;
629 break;
630 case 28:
631 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
632 break;
633 default:
634 T0 = (int32_t)env->fpu->fcr31;
635 break;
636 }
637 }
638
639 void do_ctc1 (int reg)
640 {
641 switch(reg) {
642 case 25:
643 if (T0 & 0xffffff00)
644 return;
645 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
646 ((T0 & 0x1) << 23);
647 break;
648 case 26:
649 if (T0 & 0x007c0000)
650 return;
651 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
652 break;
653 case 28:
654 if (T0 & 0x007c0000)
655 return;
656 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
657 ((T0 & 0x4) << 22);
658 break;
659 case 31:
660 if (T0 & 0x007c0000)
661 return;
662 env->fpu->fcr31 = T0;
663 break;
664 default:
665 return;
666 }
667 /* set rounding mode */
668 RESTORE_ROUNDING_MODE;
669 set_float_exception_flags(0, &env->fpu->fp_status);
670 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
671 do_raise_exception(EXCP_FPE);
672 }
673
674 inline char ieee_ex_to_mips(char xcpt)
675 {
676 return (xcpt & float_flag_inexact) >> 5 |
677 (xcpt & float_flag_underflow) >> 3 |
678 (xcpt & float_flag_overflow) >> 1 |
679 (xcpt & float_flag_divbyzero) << 1 |
680 (xcpt & float_flag_invalid) << 4;
681 }
682
683 inline char mips_ex_to_ieee(char xcpt)
684 {
685 return (xcpt & FP_INEXACT) << 5 |
686 (xcpt & FP_UNDERFLOW) << 3 |
687 (xcpt & FP_OVERFLOW) << 1 |
688 (xcpt & FP_DIV0) >> 1 |
689 (xcpt & FP_INVALID) >> 4;
690 }
691
692 inline void update_fcr31(void)
693 {
694 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
695
696 SET_FP_CAUSE(env->fpu->fcr31, tmp);
697 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
698 do_raise_exception(EXCP_FPE);
699 else
700 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
701 }
702
703 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
704
705 FLOAT_OP(cvtd, s)
706 {
707 set_float_exception_flags(0, &env->fpu->fp_status);
708 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
709 update_fcr31();
710 }
711 FLOAT_OP(cvtd, w)
712 {
713 set_float_exception_flags(0, &env->fpu->fp_status);
714 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
715 update_fcr31();
716 }
717 FLOAT_OP(cvtd, l)
718 {
719 set_float_exception_flags(0, &env->fpu->fp_status);
720 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
721 update_fcr31();
722 }
723 FLOAT_OP(cvtl, d)
724 {
725 set_float_exception_flags(0, &env->fpu->fp_status);
726 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
727 update_fcr31();
728 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
729 DT2 = 0x7fffffffffffffffULL;
730 }
731 FLOAT_OP(cvtl, s)
732 {
733 set_float_exception_flags(0, &env->fpu->fp_status);
734 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
735 update_fcr31();
736 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
737 DT2 = 0x7fffffffffffffffULL;
738 }
739
740 FLOAT_OP(cvtps, pw)
741 {
742 set_float_exception_flags(0, &env->fpu->fp_status);
743 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
744 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
745 update_fcr31();
746 }
747 FLOAT_OP(cvtpw, ps)
748 {
749 set_float_exception_flags(0, &env->fpu->fp_status);
750 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
751 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
752 update_fcr31();
753 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
754 WT2 = 0x7fffffff;
755 }
756 FLOAT_OP(cvts, d)
757 {
758 set_float_exception_flags(0, &env->fpu->fp_status);
759 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
760 update_fcr31();
761 }
762 FLOAT_OP(cvts, w)
763 {
764 set_float_exception_flags(0, &env->fpu->fp_status);
765 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
766 update_fcr31();
767 }
768 FLOAT_OP(cvts, l)
769 {
770 set_float_exception_flags(0, &env->fpu->fp_status);
771 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
772 update_fcr31();
773 }
774 FLOAT_OP(cvts, pl)
775 {
776 set_float_exception_flags(0, &env->fpu->fp_status);
777 WT2 = WT0;
778 update_fcr31();
779 }
780 FLOAT_OP(cvts, pu)
781 {
782 set_float_exception_flags(0, &env->fpu->fp_status);
783 WT2 = WTH0;
784 update_fcr31();
785 }
786 FLOAT_OP(cvtw, s)
787 {
788 set_float_exception_flags(0, &env->fpu->fp_status);
789 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
790 update_fcr31();
791 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
792 WT2 = 0x7fffffff;
793 }
794 FLOAT_OP(cvtw, d)
795 {
796 set_float_exception_flags(0, &env->fpu->fp_status);
797 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
798 update_fcr31();
799 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
800 WT2 = 0x7fffffff;
801 }
802
803 FLOAT_OP(roundl, d)
804 {
805 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
806 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
807 RESTORE_ROUNDING_MODE;
808 update_fcr31();
809 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
810 DT2 = 0x7fffffffffffffffULL;
811 }
812 FLOAT_OP(roundl, s)
813 {
814 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
815 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
816 RESTORE_ROUNDING_MODE;
817 update_fcr31();
818 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
819 DT2 = 0x7fffffffffffffffULL;
820 }
821 FLOAT_OP(roundw, d)
822 {
823 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
824 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
825 RESTORE_ROUNDING_MODE;
826 update_fcr31();
827 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
828 WT2 = 0x7fffffff;
829 }
830 FLOAT_OP(roundw, s)
831 {
832 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
833 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
834 RESTORE_ROUNDING_MODE;
835 update_fcr31();
836 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
837 WT2 = 0x7fffffff;
838 }
839
840 FLOAT_OP(truncl, d)
841 {
842 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
843 update_fcr31();
844 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
845 DT2 = 0x7fffffffffffffffULL;
846 }
847 FLOAT_OP(truncl, s)
848 {
849 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
850 update_fcr31();
851 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
852 DT2 = 0x7fffffffffffffffULL;
853 }
854 FLOAT_OP(truncw, d)
855 {
856 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
857 update_fcr31();
858 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
859 WT2 = 0x7fffffff;
860 }
861 FLOAT_OP(truncw, s)
862 {
863 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
864 update_fcr31();
865 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
866 WT2 = 0x7fffffff;
867 }
868
869 FLOAT_OP(ceill, d)
870 {
871 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
872 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
873 RESTORE_ROUNDING_MODE;
874 update_fcr31();
875 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
876 DT2 = 0x7fffffffffffffffULL;
877 }
878 FLOAT_OP(ceill, s)
879 {
880 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
881 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
882 RESTORE_ROUNDING_MODE;
883 update_fcr31();
884 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
885 DT2 = 0x7fffffffffffffffULL;
886 }
887 FLOAT_OP(ceilw, d)
888 {
889 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
890 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
891 RESTORE_ROUNDING_MODE;
892 update_fcr31();
893 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
894 WT2 = 0x7fffffff;
895 }
896 FLOAT_OP(ceilw, s)
897 {
898 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
899 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
900 RESTORE_ROUNDING_MODE;
901 update_fcr31();
902 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
903 WT2 = 0x7fffffff;
904 }
905
906 FLOAT_OP(floorl, d)
907 {
908 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
909 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
910 RESTORE_ROUNDING_MODE;
911 update_fcr31();
912 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
913 DT2 = 0x7fffffffffffffffULL;
914 }
915 FLOAT_OP(floorl, s)
916 {
917 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
918 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
919 RESTORE_ROUNDING_MODE;
920 update_fcr31();
921 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
922 DT2 = 0x7fffffffffffffffULL;
923 }
924 FLOAT_OP(floorw, d)
925 {
926 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
927 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
928 RESTORE_ROUNDING_MODE;
929 update_fcr31();
930 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
931 WT2 = 0x7fffffff;
932 }
933 FLOAT_OP(floorw, s)
934 {
935 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
936 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
937 RESTORE_ROUNDING_MODE;
938 update_fcr31();
939 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
940 WT2 = 0x7fffffff;
941 }
942
943 /* MIPS specific unary operations */
944 FLOAT_OP(recip, d)
945 {
946 set_float_exception_flags(0, &env->fpu->fp_status);
947 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
948 update_fcr31();
949 }
950 FLOAT_OP(recip, s)
951 {
952 set_float_exception_flags(0, &env->fpu->fp_status);
953 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
954 update_fcr31();
955 }
956
957 FLOAT_OP(rsqrt, d)
958 {
959 set_float_exception_flags(0, &env->fpu->fp_status);
960 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
961 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
962 update_fcr31();
963 }
964 FLOAT_OP(rsqrt, s)
965 {
966 set_float_exception_flags(0, &env->fpu->fp_status);
967 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
968 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
969 update_fcr31();
970 }
971
972 FLOAT_OP(recip1, d)
973 {
974 set_float_exception_flags(0, &env->fpu->fp_status);
975 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
976 update_fcr31();
977 }
978 FLOAT_OP(recip1, s)
979 {
980 set_float_exception_flags(0, &env->fpu->fp_status);
981 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
982 update_fcr31();
983 }
984 FLOAT_OP(recip1, ps)
985 {
986 set_float_exception_flags(0, &env->fpu->fp_status);
987 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
988 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
989 update_fcr31();
990 }
991
992 FLOAT_OP(rsqrt1, d)
993 {
994 set_float_exception_flags(0, &env->fpu->fp_status);
995 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
996 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
997 update_fcr31();
998 }
999 FLOAT_OP(rsqrt1, s)
1000 {
1001 set_float_exception_flags(0, &env->fpu->fp_status);
1002 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1003 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1004 update_fcr31();
1005 }
1006 FLOAT_OP(rsqrt1, ps)
1007 {
1008 set_float_exception_flags(0, &env->fpu->fp_status);
1009 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1010 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1011 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1012 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1013 update_fcr31();
1014 }
1015
1016 /* binary operations */
1017 #define FLOAT_BINOP(name) \
1018 FLOAT_OP(name, d) \
1019 { \
1020 set_float_exception_flags(0, &env->fpu->fp_status); \
1021 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1022 update_fcr31(); \
1023 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1024 FDT2 = 0x7ff7ffffffffffffULL; \
1025 else if (GET_FP_CAUSE(env->fpu->fcr31) & FP_UNDERFLOW) { \
1026 if ((env->fpu->fcr31 & 0x3) == 0) \
1027 FDT2 &= FLOAT_SIGN64; \
1028 } \
1029 } \
1030 FLOAT_OP(name, s) \
1031 { \
1032 set_float_exception_flags(0, &env->fpu->fp_status); \
1033 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1034 update_fcr31(); \
1035 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1036 FST2 = 0x7fbfffff; \
1037 else if (GET_FP_CAUSE(env->fpu->fcr31) & FP_UNDERFLOW) { \
1038 if ((env->fpu->fcr31 & 0x3) == 0) \
1039 FST2 &= FLOAT_SIGN32; \
1040 } \
1041 } \
1042 FLOAT_OP(name, ps) \
1043 { \
1044 set_float_exception_flags(0, &env->fpu->fp_status); \
1045 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1046 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1047 update_fcr31(); \
1048 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1049 FST2 = 0x7fbfffff; \
1050 FSTH2 = 0x7fbfffff; \
1051 } else if (GET_FP_CAUSE(env->fpu->fcr31) & FP_UNDERFLOW) { \
1052 if ((env->fpu->fcr31 & 0x3) == 0) { \
1053 FST2 &= FLOAT_SIGN32; \
1054 FSTH2 &= FLOAT_SIGN32; \
1055 } \
1056 } \
1057 }
1058 FLOAT_BINOP(add)
1059 FLOAT_BINOP(sub)
1060 FLOAT_BINOP(mul)
1061 FLOAT_BINOP(div)
1062 #undef FLOAT_BINOP
1063
1064 /* MIPS specific binary operations */
1065 FLOAT_OP(recip2, d)
1066 {
1067 set_float_exception_flags(0, &env->fpu->fp_status);
1068 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1069 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status) ^ FLOAT_SIGN64;
1070 update_fcr31();
1071 }
1072 FLOAT_OP(recip2, s)
1073 {
1074 set_float_exception_flags(0, &env->fpu->fp_status);
1075 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1076 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1077 update_fcr31();
1078 }
1079 FLOAT_OP(recip2, ps)
1080 {
1081 set_float_exception_flags(0, &env->fpu->fp_status);
1082 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1083 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1084 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1085 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1086 update_fcr31();
1087 }
1088
1089 FLOAT_OP(rsqrt2, d)
1090 {
1091 set_float_exception_flags(0, &env->fpu->fp_status);
1092 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1093 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1094 FDT2 = float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status) ^ FLOAT_SIGN64;
1095 update_fcr31();
1096 }
1097 FLOAT_OP(rsqrt2, s)
1098 {
1099 set_float_exception_flags(0, &env->fpu->fp_status);
1100 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1101 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1102 FST2 = float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1103 update_fcr31();
1104 }
1105 FLOAT_OP(rsqrt2, ps)
1106 {
1107 set_float_exception_flags(0, &env->fpu->fp_status);
1108 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1109 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1110 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1111 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1112 FST2 = float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1113 FSTH2 = float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1114 update_fcr31();
1115 }
1116
1117 FLOAT_OP(addr, ps)
1118 {
1119 set_float_exception_flags(0, &env->fpu->fp_status);
1120 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1121 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1122 update_fcr31();
1123 }
1124
1125 FLOAT_OP(mulr, ps)
1126 {
1127 set_float_exception_flags(0, &env->fpu->fp_status);
1128 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1129 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1130 update_fcr31();
1131 }
1132
1133 /* compare operations */
1134 #define FOP_COND_D(op, cond) \
1135 void do_cmp_d_ ## op (long cc) \
1136 { \
1137 int c = cond; \
1138 update_fcr31(); \
1139 if (c) \
1140 SET_FP_COND(cc, env->fpu); \
1141 else \
1142 CLEAR_FP_COND(cc, env->fpu); \
1143 } \
1144 void do_cmpabs_d_ ## op (long cc) \
1145 { \
1146 int c; \
1147 FDT0 &= ~FLOAT_SIGN64; \
1148 FDT1 &= ~FLOAT_SIGN64; \
1149 c = cond; \
1150 update_fcr31(); \
1151 if (c) \
1152 SET_FP_COND(cc, env->fpu); \
1153 else \
1154 CLEAR_FP_COND(cc, env->fpu); \
1155 }
1156
1157 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1158 {
1159 if (float64_is_signaling_nan(a) ||
1160 float64_is_signaling_nan(b) ||
1161 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1162 float_raise(float_flag_invalid, status);
1163 return 1;
1164 } else if (float64_is_nan(a) || float64_is_nan(b)) {
1165 return 1;
1166 } else {
1167 return 0;
1168 }
1169 }
1170
1171 /* NOTE: the comma operator will make "cond" to eval to false,
1172 * but float*_is_unordered() is still called. */
1173 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1174 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1175 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1176 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1177 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1178 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1179 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1180 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1181 /* NOTE: the comma operator will make "cond" to eval to false,
1182 * but float*_is_unordered() is still called. */
1183 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1184 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1185 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1186 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1187 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1188 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1189 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1190 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1191
1192 #define FOP_COND_S(op, cond) \
1193 void do_cmp_s_ ## op (long cc) \
1194 { \
1195 int c = cond; \
1196 update_fcr31(); \
1197 if (c) \
1198 SET_FP_COND(cc, env->fpu); \
1199 else \
1200 CLEAR_FP_COND(cc, env->fpu); \
1201 } \
1202 void do_cmpabs_s_ ## op (long cc) \
1203 { \
1204 int c; \
1205 FST0 &= ~FLOAT_SIGN32; \
1206 FST1 &= ~FLOAT_SIGN32; \
1207 c = cond; \
1208 update_fcr31(); \
1209 if (c) \
1210 SET_FP_COND(cc, env->fpu); \
1211 else \
1212 CLEAR_FP_COND(cc, env->fpu); \
1213 }
1214
1215 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1216 {
1217 if (float32_is_signaling_nan(a) ||
1218 float32_is_signaling_nan(b) ||
1219 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1220 float_raise(float_flag_invalid, status);
1221 return 1;
1222 } else if (float32_is_nan(a) || float32_is_nan(b)) {
1223 return 1;
1224 } else {
1225 return 0;
1226 }
1227 }
1228
1229 /* NOTE: the comma operator will make "cond" to eval to false,
1230 * but float*_is_unordered() is still called. */
1231 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1232 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1233 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1234 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1235 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1236 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1237 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1238 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1239 /* NOTE: the comma operator will make "cond" to eval to false,
1240 * but float*_is_unordered() is still called. */
1241 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1242 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1243 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1244 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1245 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1246 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1247 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1248 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1249
1250 #define FOP_COND_PS(op, condl, condh) \
1251 void do_cmp_ps_ ## op (long cc) \
1252 { \
1253 int cl = condl; \
1254 int ch = condh; \
1255 update_fcr31(); \
1256 if (cl) \
1257 SET_FP_COND(cc, env->fpu); \
1258 else \
1259 CLEAR_FP_COND(cc, env->fpu); \
1260 if (ch) \
1261 SET_FP_COND(cc + 1, env->fpu); \
1262 else \
1263 CLEAR_FP_COND(cc + 1, env->fpu); \
1264 } \
1265 void do_cmpabs_ps_ ## op (long cc) \
1266 { \
1267 int cl, ch; \
1268 FST0 &= ~FLOAT_SIGN32; \
1269 FSTH0 &= ~FLOAT_SIGN32; \
1270 FST1 &= ~FLOAT_SIGN32; \
1271 FSTH1 &= ~FLOAT_SIGN32; \
1272 cl = condl; \
1273 ch = condh; \
1274 update_fcr31(); \
1275 if (cl) \
1276 SET_FP_COND(cc, env->fpu); \
1277 else \
1278 CLEAR_FP_COND(cc, env->fpu); \
1279 if (ch) \
1280 SET_FP_COND(cc + 1, env->fpu); \
1281 else \
1282 CLEAR_FP_COND(cc + 1, env->fpu); \
1283 }
1284
1285 /* NOTE: the comma operator will make "cond" to eval to false,
1286 * but float*_is_unordered() is still called. */
1287 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1288 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1289 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1290 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1291 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1292 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1293 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1294 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1295 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1296 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1297 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1298 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1299 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1300 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1301 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1302 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1303 /* NOTE: the comma operator will make "cond" to eval to false,
1304 * but float*_is_unordered() is still called. */
1305 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1306 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1307 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1308 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1309 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1310 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1311 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1312 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1313 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1314 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1315 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1316 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1317 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1318 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1319 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1320 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))