]> git.proxmox.com Git - qemu.git/blob - target-mips/op_helper.c
Switch MIPS clo/clz and the condition tests to TCG.
[qemu.git] / target-mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include "exec.h"
22
23 #include "host-utils.h"
24
25 #ifdef __s390__
26 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
27 #else
28 # define GETPC() (__builtin_return_address(0))
29 #endif
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
34 void do_raise_exception_err (uint32_t exception, int error_code)
35 {
36 #if 1
37 if (logfile && exception < 0x100)
38 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
39 #endif
40 env->exception_index = exception;
41 env->error_code = error_code;
42 T0 = 0;
43 cpu_loop_exit();
44 }
45
46 void do_raise_exception (uint32_t exception)
47 {
48 do_raise_exception_err(exception, 0);
49 }
50
51 void do_interrupt_restart (void)
52 {
53 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
54 !(env->CP0_Status & (1 << CP0St_ERL)) &&
55 !(env->hflags & MIPS_HFLAG_DM) &&
56 (env->CP0_Status & (1 << CP0St_IE)) &&
57 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
58 env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
59 do_raise_exception(EXCP_EXT_INTERRUPT);
60 }
61 }
62
63 void do_restore_state (void *pc_ptr)
64 {
65 TranslationBlock *tb;
66 unsigned long pc = (unsigned long) pc_ptr;
67
68 tb = tb_find_pc (pc);
69 if (tb) {
70 cpu_restore_state (tb, env, pc, NULL);
71 }
72 }
73
74 void do_clo (void)
75 {
76 T0 = clo32(T0);
77 }
78
79 void do_clz (void)
80 {
81 T0 = clz32(T0);
82 }
83
84 #if defined(TARGET_MIPS64)
85 #if TARGET_LONG_BITS > HOST_LONG_BITS
86 /* Those might call libgcc functions. */
87 void do_dsll (void)
88 {
89 T0 = T0 << T1;
90 }
91
92 void do_dsll32 (void)
93 {
94 T0 = T0 << (T1 + 32);
95 }
96
97 void do_dsra (void)
98 {
99 T0 = (int64_t)T0 >> T1;
100 }
101
102 void do_dsra32 (void)
103 {
104 T0 = (int64_t)T0 >> (T1 + 32);
105 }
106
107 void do_dsrl (void)
108 {
109 T0 = T0 >> T1;
110 }
111
112 void do_dsrl32 (void)
113 {
114 T0 = T0 >> (T1 + 32);
115 }
116
117 void do_drotr (void)
118 {
119 target_ulong tmp;
120
121 if (T1) {
122 tmp = T0 << (0x40 - T1);
123 T0 = (T0 >> T1) | tmp;
124 }
125 }
126
127 void do_drotr32 (void)
128 {
129 target_ulong tmp;
130
131 tmp = T0 << (0x40 - (32 + T1));
132 T0 = (T0 >> (32 + T1)) | tmp;
133 }
134
135 void do_dsllv (void)
136 {
137 T0 = T1 << (T0 & 0x3F);
138 }
139
140 void do_dsrav (void)
141 {
142 T0 = (int64_t)T1 >> (T0 & 0x3F);
143 }
144
145 void do_dsrlv (void)
146 {
147 T0 = T1 >> (T0 & 0x3F);
148 }
149
150 void do_drotrv (void)
151 {
152 target_ulong tmp;
153
154 T0 &= 0x3F;
155 if (T0) {
156 tmp = T1 << (0x40 - T0);
157 T0 = (T1 >> T0) | tmp;
158 } else
159 T0 = T1;
160 }
161
162 void do_dclo (void)
163 {
164 T0 = clo64(T0);
165 }
166
167 void do_dclz (void)
168 {
169 T0 = clz64(T0);
170 }
171
172 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
173 #endif /* TARGET_MIPS64 */
174
175 /* 64 bits arithmetic for 32 bits hosts */
176 #if TARGET_LONG_BITS > HOST_LONG_BITS
177 static always_inline uint64_t get_HILO (void)
178 {
179 return (env->HI[env->current_tc][0] << 32) | (uint32_t)env->LO[env->current_tc][0];
180 }
181
182 static always_inline void set_HILO (uint64_t HILO)
183 {
184 env->LO[env->current_tc][0] = (int32_t)HILO;
185 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
186 }
187
188 static always_inline void set_HIT0_LO (uint64_t HILO)
189 {
190 env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
191 T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
192 }
193
194 static always_inline void set_HI_LOT0 (uint64_t HILO)
195 {
196 T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
197 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
198 }
199
200 void do_mult (void)
201 {
202 set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
203 }
204
205 void do_multu (void)
206 {
207 set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
208 }
209
210 void do_madd (void)
211 {
212 int64_t tmp;
213
214 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
215 set_HILO((int64_t)get_HILO() + tmp);
216 }
217
218 void do_maddu (void)
219 {
220 uint64_t tmp;
221
222 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
223 set_HILO(get_HILO() + tmp);
224 }
225
226 void do_msub (void)
227 {
228 int64_t tmp;
229
230 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
231 set_HILO((int64_t)get_HILO() - tmp);
232 }
233
234 void do_msubu (void)
235 {
236 uint64_t tmp;
237
238 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
239 set_HILO(get_HILO() - tmp);
240 }
241
242 /* Multiplication variants of the vr54xx. */
243 void do_muls (void)
244 {
245 set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
246 }
247
248 void do_mulsu (void)
249 {
250 set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
251 }
252
253 void do_macc (void)
254 {
255 set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
256 }
257
258 void do_macchi (void)
259 {
260 set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
261 }
262
263 void do_maccu (void)
264 {
265 set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
266 }
267
268 void do_macchiu (void)
269 {
270 set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
271 }
272
273 void do_msac (void)
274 {
275 set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
276 }
277
278 void do_msachi (void)
279 {
280 set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
281 }
282
283 void do_msacu (void)
284 {
285 set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
286 }
287
288 void do_msachiu (void)
289 {
290 set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
291 }
292
293 void do_mulhi (void)
294 {
295 set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
296 }
297
298 void do_mulhiu (void)
299 {
300 set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
301 }
302
303 void do_mulshi (void)
304 {
305 set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
306 }
307
308 void do_mulshiu (void)
309 {
310 set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
311 }
312 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
313
314 #if defined(CONFIG_USER_ONLY)
315 void do_mfc0_random (void)
316 {
317 cpu_abort(env, "mfc0 random\n");
318 }
319
320 void do_mfc0_count (void)
321 {
322 cpu_abort(env, "mfc0 count\n");
323 }
324
325 void cpu_mips_store_count(CPUState *env, uint32_t value)
326 {
327 cpu_abort(env, "mtc0 count\n");
328 }
329
330 void cpu_mips_store_compare(CPUState *env, uint32_t value)
331 {
332 cpu_abort(env, "mtc0 compare\n");
333 }
334
335 void cpu_mips_start_count(CPUState *env)
336 {
337 cpu_abort(env, "start count\n");
338 }
339
340 void cpu_mips_stop_count(CPUState *env)
341 {
342 cpu_abort(env, "stop count\n");
343 }
344
345 void cpu_mips_update_irq(CPUState *env)
346 {
347 cpu_abort(env, "mtc0 status / mtc0 cause\n");
348 }
349
350 void do_mtc0_status_debug(uint32_t old, uint32_t val)
351 {
352 cpu_abort(env, "mtc0 status debug\n");
353 }
354
355 void do_mtc0_status_irqraise_debug (void)
356 {
357 cpu_abort(env, "mtc0 status irqraise debug\n");
358 }
359
360 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
361 {
362 cpu_abort(env, "mips_tlb_flush\n");
363 }
364
365 #else
366
367 /* CP0 helpers */
368 void do_mfc0_random (void)
369 {
370 T0 = (int32_t)cpu_mips_get_random(env);
371 }
372
373 void do_mfc0_count (void)
374 {
375 T0 = (int32_t)cpu_mips_get_count(env);
376 }
377
378 void do_mtc0_status_debug(uint32_t old, uint32_t val)
379 {
380 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
381 old, old & env->CP0_Cause & CP0Ca_IP_mask,
382 val, val & env->CP0_Cause & CP0Ca_IP_mask,
383 env->CP0_Cause);
384 switch (env->hflags & MIPS_HFLAG_KSU) {
385 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
386 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
387 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
388 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
389 }
390 }
391
392 void do_mtc0_status_irqraise_debug(void)
393 {
394 fprintf(logfile, "Raise pending IRQs\n");
395 }
396
397 void fpu_handle_exception(void)
398 {
399 #ifdef CONFIG_SOFTFLOAT
400 int flags = get_float_exception_flags(&env->fpu->fp_status);
401 unsigned int cpuflags = 0, enable, cause = 0;
402
403 enable = GET_FP_ENABLE(env->fpu->fcr31);
404
405 /* determine current flags */
406 if (flags & float_flag_invalid) {
407 cpuflags |= FP_INVALID;
408 cause |= FP_INVALID & enable;
409 }
410 if (flags & float_flag_divbyzero) {
411 cpuflags |= FP_DIV0;
412 cause |= FP_DIV0 & enable;
413 }
414 if (flags & float_flag_overflow) {
415 cpuflags |= FP_OVERFLOW;
416 cause |= FP_OVERFLOW & enable;
417 }
418 if (flags & float_flag_underflow) {
419 cpuflags |= FP_UNDERFLOW;
420 cause |= FP_UNDERFLOW & enable;
421 }
422 if (flags & float_flag_inexact) {
423 cpuflags |= FP_INEXACT;
424 cause |= FP_INEXACT & enable;
425 }
426 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
427 SET_FP_CAUSE(env->fpu->fcr31, cause);
428 #else
429 SET_FP_FLAGS(env->fpu->fcr31, 0);
430 SET_FP_CAUSE(env->fpu->fcr31, 0);
431 #endif
432 }
433
434 /* TLB management */
435 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
436 {
437 /* Flush qemu's TLB and discard all shadowed entries. */
438 tlb_flush (env, flush_global);
439 env->tlb->tlb_in_use = env->tlb->nb_tlb;
440 }
441
442 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
443 {
444 /* Discard entries from env->tlb[first] onwards. */
445 while (env->tlb->tlb_in_use > first) {
446 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
447 }
448 }
449
450 static void r4k_fill_tlb (int idx)
451 {
452 r4k_tlb_t *tlb;
453
454 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
455 tlb = &env->tlb->mmu.r4k.tlb[idx];
456 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
457 #if defined(TARGET_MIPS64)
458 tlb->VPN &= env->SEGMask;
459 #endif
460 tlb->ASID = env->CP0_EntryHi & 0xFF;
461 tlb->PageMask = env->CP0_PageMask;
462 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
463 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
464 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
465 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
466 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
467 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
468 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
469 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
470 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
471 }
472
473 void r4k_do_tlbwi (void)
474 {
475 /* Discard cached TLB entries. We could avoid doing this if the
476 tlbwi is just upgrading access permissions on the current entry;
477 that might be a further win. */
478 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
479
480 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
481 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
482 }
483
484 void r4k_do_tlbwr (void)
485 {
486 int r = cpu_mips_get_random(env);
487
488 r4k_invalidate_tlb(env, r, 1);
489 r4k_fill_tlb(r);
490 }
491
492 void r4k_do_tlbp (void)
493 {
494 r4k_tlb_t *tlb;
495 target_ulong mask;
496 target_ulong tag;
497 target_ulong VPN;
498 uint8_t ASID;
499 int i;
500
501 ASID = env->CP0_EntryHi & 0xFF;
502 for (i = 0; i < env->tlb->nb_tlb; i++) {
503 tlb = &env->tlb->mmu.r4k.tlb[i];
504 /* 1k pages are not supported. */
505 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
506 tag = env->CP0_EntryHi & ~mask;
507 VPN = tlb->VPN & ~mask;
508 /* Check ASID, virtual page number & size */
509 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
510 /* TLB match */
511 env->CP0_Index = i;
512 break;
513 }
514 }
515 if (i == env->tlb->nb_tlb) {
516 /* No match. Discard any shadow entries, if any of them match. */
517 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
518 tlb = &env->tlb->mmu.r4k.tlb[i];
519 /* 1k pages are not supported. */
520 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
521 tag = env->CP0_EntryHi & ~mask;
522 VPN = tlb->VPN & ~mask;
523 /* Check ASID, virtual page number & size */
524 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
525 r4k_mips_tlb_flush_extra (env, i);
526 break;
527 }
528 }
529
530 env->CP0_Index |= 0x80000000;
531 }
532 }
533
534 void r4k_do_tlbr (void)
535 {
536 r4k_tlb_t *tlb;
537 uint8_t ASID;
538
539 ASID = env->CP0_EntryHi & 0xFF;
540 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
541
542 /* If this will change the current ASID, flush qemu's TLB. */
543 if (ASID != tlb->ASID)
544 cpu_mips_tlb_flush (env, 1);
545
546 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
547
548 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
549 env->CP0_PageMask = tlb->PageMask;
550 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
551 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
552 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
553 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
554 }
555
556 #endif /* !CONFIG_USER_ONLY */
557
558 void dump_ldst (const unsigned char *func)
559 {
560 if (loglevel)
561 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
562 }
563
564 void dump_sc (void)
565 {
566 if (loglevel) {
567 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
568 T1, T0, env->CP0_LLAddr);
569 }
570 }
571
572 void debug_pre_eret (void)
573 {
574 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
575 env->PC[env->current_tc], env->CP0_EPC);
576 if (env->CP0_Status & (1 << CP0St_ERL))
577 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
578 if (env->hflags & MIPS_HFLAG_DM)
579 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
580 fputs("\n", logfile);
581 }
582
583 void debug_post_eret (void)
584 {
585 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
586 env->PC[env->current_tc], env->CP0_EPC);
587 if (env->CP0_Status & (1 << CP0St_ERL))
588 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
589 if (env->hflags & MIPS_HFLAG_DM)
590 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
591 switch (env->hflags & MIPS_HFLAG_KSU) {
592 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
593 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
594 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
595 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
596 }
597 }
598
599 void do_pmon (int function)
600 {
601 function /= 2;
602 switch (function) {
603 case 2: /* TODO: char inbyte(int waitflag); */
604 if (env->gpr[env->current_tc][4] == 0)
605 env->gpr[env->current_tc][2] = -1;
606 /* Fall through */
607 case 11: /* TODO: char inbyte (void); */
608 env->gpr[env->current_tc][2] = -1;
609 break;
610 case 3:
611 case 12:
612 printf("%c", (char)(env->gpr[env->current_tc][4] & 0xFF));
613 break;
614 case 17:
615 break;
616 case 158:
617 {
618 unsigned char *fmt = (void *)(unsigned long)env->gpr[env->current_tc][4];
619 printf("%s", fmt);
620 }
621 break;
622 }
623 }
624
625 #if !defined(CONFIG_USER_ONLY)
626
627 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
628
629 #define MMUSUFFIX _mmu
630 #define ALIGNED_ONLY
631
632 #define SHIFT 0
633 #include "softmmu_template.h"
634
635 #define SHIFT 1
636 #include "softmmu_template.h"
637
638 #define SHIFT 2
639 #include "softmmu_template.h"
640
641 #define SHIFT 3
642 #include "softmmu_template.h"
643
644 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
645 {
646 env->CP0_BadVAddr = addr;
647 do_restore_state (retaddr);
648 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
649 }
650
651 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
652 {
653 TranslationBlock *tb;
654 CPUState *saved_env;
655 unsigned long pc;
656 int ret;
657
658 /* XXX: hack to restore env in all cases, even if not called from
659 generated code */
660 saved_env = env;
661 env = cpu_single_env;
662 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
663 if (ret) {
664 if (retaddr) {
665 /* now we have a real cpu fault */
666 pc = (unsigned long)retaddr;
667 tb = tb_find_pc(pc);
668 if (tb) {
669 /* the PC is inside the translated code. It means that we have
670 a virtual CPU fault */
671 cpu_restore_state(tb, env, pc, NULL);
672 }
673 }
674 do_raise_exception_err(env->exception_index, env->error_code);
675 }
676 env = saved_env;
677 }
678
679 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
680 int unused)
681 {
682 if (is_exec)
683 do_raise_exception(EXCP_IBE);
684 else
685 do_raise_exception(EXCP_DBE);
686 }
687 #endif
688
689 /* Complex FPU operations which may need stack space. */
690
691 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
692 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
693 #define FLOAT_TWO32 make_float32(1 << 30)
694 #define FLOAT_TWO64 make_float64(1ULL << 62)
695 #define FLOAT_QNAN32 0x7fbfffff
696 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
697 #define FLOAT_SNAN32 0x7fffffff
698 #define FLOAT_SNAN64 0x7fffffffffffffffULL
699
700 /* convert MIPS rounding mode in FCR31 to IEEE library */
701 unsigned int ieee_rm[] = {
702 float_round_nearest_even,
703 float_round_to_zero,
704 float_round_up,
705 float_round_down
706 };
707
708 #define RESTORE_ROUNDING_MODE \
709 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
710
711 void do_cfc1 (int reg)
712 {
713 switch (reg) {
714 case 0:
715 T0 = (int32_t)env->fpu->fcr0;
716 break;
717 case 25:
718 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
719 break;
720 case 26:
721 T0 = env->fpu->fcr31 & 0x0003f07c;
722 break;
723 case 28:
724 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
725 break;
726 default:
727 T0 = (int32_t)env->fpu->fcr31;
728 break;
729 }
730 }
731
732 void do_ctc1 (int reg)
733 {
734 switch(reg) {
735 case 25:
736 if (T0 & 0xffffff00)
737 return;
738 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
739 ((T0 & 0x1) << 23);
740 break;
741 case 26:
742 if (T0 & 0x007c0000)
743 return;
744 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
745 break;
746 case 28:
747 if (T0 & 0x007c0000)
748 return;
749 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
750 ((T0 & 0x4) << 22);
751 break;
752 case 31:
753 if (T0 & 0x007c0000)
754 return;
755 env->fpu->fcr31 = T0;
756 break;
757 default:
758 return;
759 }
760 /* set rounding mode */
761 RESTORE_ROUNDING_MODE;
762 set_float_exception_flags(0, &env->fpu->fp_status);
763 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
764 do_raise_exception(EXCP_FPE);
765 }
766
767 static always_inline char ieee_ex_to_mips(char xcpt)
768 {
769 return (xcpt & float_flag_inexact) >> 5 |
770 (xcpt & float_flag_underflow) >> 3 |
771 (xcpt & float_flag_overflow) >> 1 |
772 (xcpt & float_flag_divbyzero) << 1 |
773 (xcpt & float_flag_invalid) << 4;
774 }
775
776 static always_inline char mips_ex_to_ieee(char xcpt)
777 {
778 return (xcpt & FP_INEXACT) << 5 |
779 (xcpt & FP_UNDERFLOW) << 3 |
780 (xcpt & FP_OVERFLOW) << 1 |
781 (xcpt & FP_DIV0) >> 1 |
782 (xcpt & FP_INVALID) >> 4;
783 }
784
785 static always_inline void update_fcr31(void)
786 {
787 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
788
789 SET_FP_CAUSE(env->fpu->fcr31, tmp);
790 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
791 do_raise_exception(EXCP_FPE);
792 else
793 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
794 }
795
796 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
797
798 FLOAT_OP(cvtd, s)
799 {
800 set_float_exception_flags(0, &env->fpu->fp_status);
801 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
802 update_fcr31();
803 }
804 FLOAT_OP(cvtd, w)
805 {
806 set_float_exception_flags(0, &env->fpu->fp_status);
807 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
808 update_fcr31();
809 }
810 FLOAT_OP(cvtd, l)
811 {
812 set_float_exception_flags(0, &env->fpu->fp_status);
813 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
814 update_fcr31();
815 }
816 FLOAT_OP(cvtl, d)
817 {
818 set_float_exception_flags(0, &env->fpu->fp_status);
819 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
820 update_fcr31();
821 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
822 DT2 = FLOAT_SNAN64;
823 }
824 FLOAT_OP(cvtl, s)
825 {
826 set_float_exception_flags(0, &env->fpu->fp_status);
827 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
828 update_fcr31();
829 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
830 DT2 = FLOAT_SNAN64;
831 }
832
833 FLOAT_OP(cvtps, pw)
834 {
835 set_float_exception_flags(0, &env->fpu->fp_status);
836 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
837 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
838 update_fcr31();
839 }
840 FLOAT_OP(cvtpw, ps)
841 {
842 set_float_exception_flags(0, &env->fpu->fp_status);
843 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
844 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
845 update_fcr31();
846 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
847 WT2 = FLOAT_SNAN32;
848 }
849 FLOAT_OP(cvts, d)
850 {
851 set_float_exception_flags(0, &env->fpu->fp_status);
852 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
853 update_fcr31();
854 }
855 FLOAT_OP(cvts, w)
856 {
857 set_float_exception_flags(0, &env->fpu->fp_status);
858 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
859 update_fcr31();
860 }
861 FLOAT_OP(cvts, l)
862 {
863 set_float_exception_flags(0, &env->fpu->fp_status);
864 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
865 update_fcr31();
866 }
867 FLOAT_OP(cvts, pl)
868 {
869 set_float_exception_flags(0, &env->fpu->fp_status);
870 WT2 = WT0;
871 update_fcr31();
872 }
873 FLOAT_OP(cvts, pu)
874 {
875 set_float_exception_flags(0, &env->fpu->fp_status);
876 WT2 = WTH0;
877 update_fcr31();
878 }
879 FLOAT_OP(cvtw, s)
880 {
881 set_float_exception_flags(0, &env->fpu->fp_status);
882 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
883 update_fcr31();
884 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
885 WT2 = FLOAT_SNAN32;
886 }
887 FLOAT_OP(cvtw, d)
888 {
889 set_float_exception_flags(0, &env->fpu->fp_status);
890 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
891 update_fcr31();
892 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
893 WT2 = FLOAT_SNAN32;
894 }
895
896 FLOAT_OP(roundl, d)
897 {
898 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
899 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
900 RESTORE_ROUNDING_MODE;
901 update_fcr31();
902 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
903 DT2 = FLOAT_SNAN64;
904 }
905 FLOAT_OP(roundl, s)
906 {
907 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
908 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
909 RESTORE_ROUNDING_MODE;
910 update_fcr31();
911 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
912 DT2 = FLOAT_SNAN64;
913 }
914 FLOAT_OP(roundw, d)
915 {
916 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
917 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
918 RESTORE_ROUNDING_MODE;
919 update_fcr31();
920 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
921 WT2 = FLOAT_SNAN32;
922 }
923 FLOAT_OP(roundw, s)
924 {
925 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
926 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
927 RESTORE_ROUNDING_MODE;
928 update_fcr31();
929 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
930 WT2 = FLOAT_SNAN32;
931 }
932
933 FLOAT_OP(truncl, d)
934 {
935 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
936 update_fcr31();
937 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
938 DT2 = FLOAT_SNAN64;
939 }
940 FLOAT_OP(truncl, s)
941 {
942 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
943 update_fcr31();
944 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
945 DT2 = FLOAT_SNAN64;
946 }
947 FLOAT_OP(truncw, d)
948 {
949 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
950 update_fcr31();
951 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
952 WT2 = FLOAT_SNAN32;
953 }
954 FLOAT_OP(truncw, s)
955 {
956 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
957 update_fcr31();
958 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
959 WT2 = FLOAT_SNAN32;
960 }
961
962 FLOAT_OP(ceill, d)
963 {
964 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
965 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
966 RESTORE_ROUNDING_MODE;
967 update_fcr31();
968 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
969 DT2 = FLOAT_SNAN64;
970 }
971 FLOAT_OP(ceill, s)
972 {
973 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
974 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
975 RESTORE_ROUNDING_MODE;
976 update_fcr31();
977 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
978 DT2 = FLOAT_SNAN64;
979 }
980 FLOAT_OP(ceilw, d)
981 {
982 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
983 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
984 RESTORE_ROUNDING_MODE;
985 update_fcr31();
986 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
987 WT2 = FLOAT_SNAN32;
988 }
989 FLOAT_OP(ceilw, s)
990 {
991 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
992 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
993 RESTORE_ROUNDING_MODE;
994 update_fcr31();
995 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
996 WT2 = FLOAT_SNAN32;
997 }
998
999 FLOAT_OP(floorl, d)
1000 {
1001 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1002 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1003 RESTORE_ROUNDING_MODE;
1004 update_fcr31();
1005 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1006 DT2 = FLOAT_SNAN64;
1007 }
1008 FLOAT_OP(floorl, s)
1009 {
1010 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1011 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1012 RESTORE_ROUNDING_MODE;
1013 update_fcr31();
1014 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1015 DT2 = FLOAT_SNAN64;
1016 }
1017 FLOAT_OP(floorw, d)
1018 {
1019 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1020 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1021 RESTORE_ROUNDING_MODE;
1022 update_fcr31();
1023 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1024 WT2 = FLOAT_SNAN32;
1025 }
1026 FLOAT_OP(floorw, s)
1027 {
1028 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1029 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1030 RESTORE_ROUNDING_MODE;
1031 update_fcr31();
1032 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1033 WT2 = FLOAT_SNAN32;
1034 }
1035
1036 /* MIPS specific unary operations */
1037 FLOAT_OP(recip, d)
1038 {
1039 set_float_exception_flags(0, &env->fpu->fp_status);
1040 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1041 update_fcr31();
1042 }
1043 FLOAT_OP(recip, s)
1044 {
1045 set_float_exception_flags(0, &env->fpu->fp_status);
1046 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1047 update_fcr31();
1048 }
1049
1050 FLOAT_OP(rsqrt, d)
1051 {
1052 set_float_exception_flags(0, &env->fpu->fp_status);
1053 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1054 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1055 update_fcr31();
1056 }
1057 FLOAT_OP(rsqrt, s)
1058 {
1059 set_float_exception_flags(0, &env->fpu->fp_status);
1060 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1061 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1062 update_fcr31();
1063 }
1064
1065 FLOAT_OP(recip1, d)
1066 {
1067 set_float_exception_flags(0, &env->fpu->fp_status);
1068 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1069 update_fcr31();
1070 }
1071 FLOAT_OP(recip1, s)
1072 {
1073 set_float_exception_flags(0, &env->fpu->fp_status);
1074 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1075 update_fcr31();
1076 }
1077 FLOAT_OP(recip1, ps)
1078 {
1079 set_float_exception_flags(0, &env->fpu->fp_status);
1080 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1081 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
1082 update_fcr31();
1083 }
1084
1085 FLOAT_OP(rsqrt1, d)
1086 {
1087 set_float_exception_flags(0, &env->fpu->fp_status);
1088 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1089 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1090 update_fcr31();
1091 }
1092 FLOAT_OP(rsqrt1, s)
1093 {
1094 set_float_exception_flags(0, &env->fpu->fp_status);
1095 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1096 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1097 update_fcr31();
1098 }
1099 FLOAT_OP(rsqrt1, ps)
1100 {
1101 set_float_exception_flags(0, &env->fpu->fp_status);
1102 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1103 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1104 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1105 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1106 update_fcr31();
1107 }
1108
1109 /* binary operations */
1110 #define FLOAT_BINOP(name) \
1111 FLOAT_OP(name, d) \
1112 { \
1113 set_float_exception_flags(0, &env->fpu->fp_status); \
1114 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1115 update_fcr31(); \
1116 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1117 DT2 = FLOAT_QNAN64; \
1118 } \
1119 FLOAT_OP(name, s) \
1120 { \
1121 set_float_exception_flags(0, &env->fpu->fp_status); \
1122 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1123 update_fcr31(); \
1124 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1125 WT2 = FLOAT_QNAN32; \
1126 } \
1127 FLOAT_OP(name, ps) \
1128 { \
1129 set_float_exception_flags(0, &env->fpu->fp_status); \
1130 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1131 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1132 update_fcr31(); \
1133 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1134 WT2 = FLOAT_QNAN32; \
1135 WTH2 = FLOAT_QNAN32; \
1136 } \
1137 }
1138 FLOAT_BINOP(add)
1139 FLOAT_BINOP(sub)
1140 FLOAT_BINOP(mul)
1141 FLOAT_BINOP(div)
1142 #undef FLOAT_BINOP
1143
1144 /* MIPS specific binary operations */
1145 FLOAT_OP(recip2, d)
1146 {
1147 set_float_exception_flags(0, &env->fpu->fp_status);
1148 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1149 FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
1150 update_fcr31();
1151 }
1152 FLOAT_OP(recip2, s)
1153 {
1154 set_float_exception_flags(0, &env->fpu->fp_status);
1155 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1156 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1157 update_fcr31();
1158 }
1159 FLOAT_OP(recip2, ps)
1160 {
1161 set_float_exception_flags(0, &env->fpu->fp_status);
1162 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1163 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1164 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1165 FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
1166 update_fcr31();
1167 }
1168
1169 FLOAT_OP(rsqrt2, d)
1170 {
1171 set_float_exception_flags(0, &env->fpu->fp_status);
1172 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1173 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1174 FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
1175 update_fcr31();
1176 }
1177 FLOAT_OP(rsqrt2, s)
1178 {
1179 set_float_exception_flags(0, &env->fpu->fp_status);
1180 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1181 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1182 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1183 update_fcr31();
1184 }
1185 FLOAT_OP(rsqrt2, ps)
1186 {
1187 set_float_exception_flags(0, &env->fpu->fp_status);
1188 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1189 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1190 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1191 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1192 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1193 FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
1194 update_fcr31();
1195 }
1196
1197 FLOAT_OP(addr, ps)
1198 {
1199 set_float_exception_flags(0, &env->fpu->fp_status);
1200 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1201 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1202 update_fcr31();
1203 }
1204
1205 FLOAT_OP(mulr, ps)
1206 {
1207 set_float_exception_flags(0, &env->fpu->fp_status);
1208 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1209 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1210 update_fcr31();
1211 }
1212
1213 /* compare operations */
1214 #define FOP_COND_D(op, cond) \
1215 void do_cmp_d_ ## op (long cc) \
1216 { \
1217 int c = cond; \
1218 update_fcr31(); \
1219 if (c) \
1220 SET_FP_COND(cc, env->fpu); \
1221 else \
1222 CLEAR_FP_COND(cc, env->fpu); \
1223 } \
1224 void do_cmpabs_d_ ## op (long cc) \
1225 { \
1226 int c; \
1227 FDT0 = float64_abs(FDT0); \
1228 FDT1 = float64_abs(FDT1); \
1229 c = cond; \
1230 update_fcr31(); \
1231 if (c) \
1232 SET_FP_COND(cc, env->fpu); \
1233 else \
1234 CLEAR_FP_COND(cc, env->fpu); \
1235 }
1236
1237 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1238 {
1239 if (float64_is_signaling_nan(a) ||
1240 float64_is_signaling_nan(b) ||
1241 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1242 float_raise(float_flag_invalid, status);
1243 return 1;
1244 } else if (float64_is_nan(a) || float64_is_nan(b)) {
1245 return 1;
1246 } else {
1247 return 0;
1248 }
1249 }
1250
1251 /* NOTE: the comma operator will make "cond" to eval to false,
1252 * but float*_is_unordered() is still called. */
1253 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1254 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1255 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1256 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1257 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1258 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1259 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1260 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1261 /* NOTE: the comma operator will make "cond" to eval to false,
1262 * but float*_is_unordered() is still called. */
1263 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1264 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1265 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1266 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1267 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1268 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1269 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1270 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1271
1272 #define FOP_COND_S(op, cond) \
1273 void do_cmp_s_ ## op (long cc) \
1274 { \
1275 int c = cond; \
1276 update_fcr31(); \
1277 if (c) \
1278 SET_FP_COND(cc, env->fpu); \
1279 else \
1280 CLEAR_FP_COND(cc, env->fpu); \
1281 } \
1282 void do_cmpabs_s_ ## op (long cc) \
1283 { \
1284 int c; \
1285 FST0 = float32_abs(FST0); \
1286 FST1 = float32_abs(FST1); \
1287 c = cond; \
1288 update_fcr31(); \
1289 if (c) \
1290 SET_FP_COND(cc, env->fpu); \
1291 else \
1292 CLEAR_FP_COND(cc, env->fpu); \
1293 }
1294
1295 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1296 {
1297 if (float32_is_signaling_nan(a) ||
1298 float32_is_signaling_nan(b) ||
1299 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1300 float_raise(float_flag_invalid, status);
1301 return 1;
1302 } else if (float32_is_nan(a) || float32_is_nan(b)) {
1303 return 1;
1304 } else {
1305 return 0;
1306 }
1307 }
1308
1309 /* NOTE: the comma operator will make "cond" to eval to false,
1310 * but float*_is_unordered() is still called. */
1311 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1312 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1313 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1314 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1315 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1316 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1317 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1318 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1319 /* NOTE: the comma operator will make "cond" to eval to false,
1320 * but float*_is_unordered() is still called. */
1321 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1322 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1323 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1324 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1325 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1326 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1327 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1328 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1329
1330 #define FOP_COND_PS(op, condl, condh) \
1331 void do_cmp_ps_ ## op (long cc) \
1332 { \
1333 int cl = condl; \
1334 int ch = condh; \
1335 update_fcr31(); \
1336 if (cl) \
1337 SET_FP_COND(cc, env->fpu); \
1338 else \
1339 CLEAR_FP_COND(cc, env->fpu); \
1340 if (ch) \
1341 SET_FP_COND(cc + 1, env->fpu); \
1342 else \
1343 CLEAR_FP_COND(cc + 1, env->fpu); \
1344 } \
1345 void do_cmpabs_ps_ ## op (long cc) \
1346 { \
1347 int cl, ch; \
1348 FST0 = float32_abs(FST0); \
1349 FSTH0 = float32_abs(FSTH0); \
1350 FST1 = float32_abs(FST1); \
1351 FSTH1 = float32_abs(FSTH1); \
1352 cl = condl; \
1353 ch = condh; \
1354 update_fcr31(); \
1355 if (cl) \
1356 SET_FP_COND(cc, env->fpu); \
1357 else \
1358 CLEAR_FP_COND(cc, env->fpu); \
1359 if (ch) \
1360 SET_FP_COND(cc + 1, env->fpu); \
1361 else \
1362 CLEAR_FP_COND(cc + 1, env->fpu); \
1363 }
1364
1365 /* NOTE: the comma operator will make "cond" to eval to false,
1366 * but float*_is_unordered() is still called. */
1367 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1368 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1369 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1370 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1371 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1372 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1373 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1374 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1375 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1376 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1377 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1378 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1379 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1380 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1381 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1382 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1383 /* NOTE: the comma operator will make "cond" to eval to false,
1384 * but float*_is_unordered() is still called. */
1385 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1386 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1387 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1388 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1389 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1390 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1391 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1392 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1393 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1394 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1395 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1396 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1397 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1398 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1399 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1400 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))