]> git.proxmox.com Git - qemu.git/blob - target-mips/op_helper.c
Fix broken absoluteness check for cabs.d.*.
[qemu.git] / target-mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include "exec.h"
22
23 #include "host-utils.h"
24
25 #ifdef __s390__
26 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
27 #else
28 # define GETPC() (__builtin_return_address(0))
29 #endif
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
34 void do_raise_exception_err (uint32_t exception, int error_code)
35 {
36 #if 1
37 if (logfile && exception < 0x100)
38 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
39 #endif
40 env->exception_index = exception;
41 env->error_code = error_code;
42 T0 = 0;
43 cpu_loop_exit();
44 }
45
46 void do_raise_exception (uint32_t exception)
47 {
48 do_raise_exception_err(exception, 0);
49 }
50
51 void do_restore_state (void *pc_ptr)
52 {
53 TranslationBlock *tb;
54 unsigned long pc = (unsigned long) pc_ptr;
55
56 tb = tb_find_pc (pc);
57 cpu_restore_state (tb, env, pc, NULL);
58 }
59
60 void do_raise_exception_direct_err (uint32_t exception, int error_code)
61 {
62 do_restore_state (GETPC ());
63 do_raise_exception_err (exception, error_code);
64 }
65
66 void do_raise_exception_direct (uint32_t exception)
67 {
68 do_raise_exception_direct_err (exception, 0);
69 }
70
71 #if defined(TARGET_MIPS64)
72 #if TARGET_LONG_BITS > HOST_LONG_BITS
73 /* Those might call libgcc functions. */
74 void do_dsll (void)
75 {
76 T0 = T0 << T1;
77 }
78
79 void do_dsll32 (void)
80 {
81 T0 = T0 << (T1 + 32);
82 }
83
84 void do_dsra (void)
85 {
86 T0 = (int64_t)T0 >> T1;
87 }
88
89 void do_dsra32 (void)
90 {
91 T0 = (int64_t)T0 >> (T1 + 32);
92 }
93
94 void do_dsrl (void)
95 {
96 T0 = T0 >> T1;
97 }
98
99 void do_dsrl32 (void)
100 {
101 T0 = T0 >> (T1 + 32);
102 }
103
104 void do_drotr (void)
105 {
106 target_ulong tmp;
107
108 if (T1) {
109 tmp = T0 << (0x40 - T1);
110 T0 = (T0 >> T1) | tmp;
111 }
112 }
113
114 void do_drotr32 (void)
115 {
116 target_ulong tmp;
117
118 tmp = T0 << (0x40 - (32 + T1));
119 T0 = (T0 >> (32 + T1)) | tmp;
120 }
121
122 void do_dsllv (void)
123 {
124 T0 = T1 << (T0 & 0x3F);
125 }
126
127 void do_dsrav (void)
128 {
129 T0 = (int64_t)T1 >> (T0 & 0x3F);
130 }
131
132 void do_dsrlv (void)
133 {
134 T0 = T1 >> (T0 & 0x3F);
135 }
136
137 void do_drotrv (void)
138 {
139 target_ulong tmp;
140
141 T0 &= 0x3F;
142 if (T0) {
143 tmp = T1 << (0x40 - T0);
144 T0 = (T1 >> T0) | tmp;
145 } else
146 T0 = T1;
147 }
148
149 void do_dclo (void)
150 {
151 T0 = clo64(T0);
152 }
153
154 void do_dclz (void)
155 {
156 T0 = clz64(T0);
157 }
158
159 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
160 #endif /* TARGET_MIPS64 */
161
162 /* 64 bits arithmetic for 32 bits hosts */
163 #if TARGET_LONG_BITS > HOST_LONG_BITS
164 static always_inline uint64_t get_HILO (void)
165 {
166 return (env->HI[0][env->current_tc] << 32) | (uint32_t)env->LO[0][env->current_tc];
167 }
168
169 static always_inline void set_HILO (uint64_t HILO)
170 {
171 env->LO[0][env->current_tc] = (int32_t)HILO;
172 env->HI[0][env->current_tc] = (int32_t)(HILO >> 32);
173 }
174
175 static always_inline void set_HIT0_LO (uint64_t HILO)
176 {
177 env->LO[0][env->current_tc] = (int32_t)(HILO & 0xFFFFFFFF);
178 T0 = env->HI[0][env->current_tc] = (int32_t)(HILO >> 32);
179 }
180
181 static always_inline void set_HI_LOT0 (uint64_t HILO)
182 {
183 T0 = env->LO[0][env->current_tc] = (int32_t)(HILO & 0xFFFFFFFF);
184 env->HI[0][env->current_tc] = (int32_t)(HILO >> 32);
185 }
186
187 void do_mult (void)
188 {
189 set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
190 }
191
192 void do_multu (void)
193 {
194 set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
195 }
196
197 void do_madd (void)
198 {
199 int64_t tmp;
200
201 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
202 set_HILO((int64_t)get_HILO() + tmp);
203 }
204
205 void do_maddu (void)
206 {
207 uint64_t tmp;
208
209 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
210 set_HILO(get_HILO() + tmp);
211 }
212
213 void do_msub (void)
214 {
215 int64_t tmp;
216
217 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
218 set_HILO((int64_t)get_HILO() - tmp);
219 }
220
221 void do_msubu (void)
222 {
223 uint64_t tmp;
224
225 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
226 set_HILO(get_HILO() - tmp);
227 }
228
229 /* Multiplication variants of the vr54xx. */
230 void do_muls (void)
231 {
232 set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
233 }
234
235 void do_mulsu (void)
236 {
237 set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
238 }
239
240 void do_macc (void)
241 {
242 set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
243 }
244
245 void do_macchi (void)
246 {
247 set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
248 }
249
250 void do_maccu (void)
251 {
252 set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
253 }
254
255 void do_macchiu (void)
256 {
257 set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
258 }
259
260 void do_msac (void)
261 {
262 set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
263 }
264
265 void do_msachi (void)
266 {
267 set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
268 }
269
270 void do_msacu (void)
271 {
272 set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
273 }
274
275 void do_msachiu (void)
276 {
277 set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
278 }
279
280 void do_mulhi (void)
281 {
282 set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
283 }
284
285 void do_mulhiu (void)
286 {
287 set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
288 }
289
290 void do_mulshi (void)
291 {
292 set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
293 }
294
295 void do_mulshiu (void)
296 {
297 set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
298 }
299 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
300
301 #if HOST_LONG_BITS < 64
302 void do_div (void)
303 {
304 /* 64bit datatypes because we may see overflow/underflow. */
305 if (T1 != 0) {
306 env->LO[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 / (int32_t)T1);
307 env->HI[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 % (int32_t)T1);
308 }
309 }
310 #endif
311
312 #if defined(TARGET_MIPS64)
313 void do_ddiv (void)
314 {
315 if (T1 != 0) {
316 int64_t arg0 = (int64_t)T0;
317 int64_t arg1 = (int64_t)T1;
318 if (arg0 == ((int64_t)-1 << 63) && arg1 == (int64_t)-1) {
319 env->LO[0][env->current_tc] = arg0;
320 env->HI[0][env->current_tc] = 0;
321 } else {
322 lldiv_t res = lldiv(arg0, arg1);
323 env->LO[0][env->current_tc] = res.quot;
324 env->HI[0][env->current_tc] = res.rem;
325 }
326 }
327 }
328
329 #if TARGET_LONG_BITS > HOST_LONG_BITS
330 void do_ddivu (void)
331 {
332 if (T1 != 0) {
333 env->LO[0][env->current_tc] = T0 / T1;
334 env->HI[0][env->current_tc] = T0 % T1;
335 }
336 }
337 #endif
338 #endif /* TARGET_MIPS64 */
339
340 #if defined(CONFIG_USER_ONLY)
341 void do_mfc0_random (void)
342 {
343 cpu_abort(env, "mfc0 random\n");
344 }
345
346 void do_mfc0_count (void)
347 {
348 cpu_abort(env, "mfc0 count\n");
349 }
350
351 void cpu_mips_store_count(CPUState *env, uint32_t value)
352 {
353 cpu_abort(env, "mtc0 count\n");
354 }
355
356 void cpu_mips_store_compare(CPUState *env, uint32_t value)
357 {
358 cpu_abort(env, "mtc0 compare\n");
359 }
360
361 void cpu_mips_start_count(CPUState *env)
362 {
363 cpu_abort(env, "start count\n");
364 }
365
366 void cpu_mips_stop_count(CPUState *env)
367 {
368 cpu_abort(env, "stop count\n");
369 }
370
371 void cpu_mips_update_irq(CPUState *env)
372 {
373 cpu_abort(env, "mtc0 status / mtc0 cause\n");
374 }
375
376 void do_mtc0_status_debug(uint32_t old, uint32_t val)
377 {
378 cpu_abort(env, "mtc0 status debug\n");
379 }
380
381 void do_mtc0_status_irqraise_debug (void)
382 {
383 cpu_abort(env, "mtc0 status irqraise debug\n");
384 }
385
386 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
387 {
388 cpu_abort(env, "mips_tlb_flush\n");
389 }
390
391 #else
392
393 /* CP0 helpers */
394 void do_mfc0_random (void)
395 {
396 T0 = (int32_t)cpu_mips_get_random(env);
397 }
398
399 void do_mfc0_count (void)
400 {
401 T0 = (int32_t)cpu_mips_get_count(env);
402 }
403
404 void do_mtc0_status_debug(uint32_t old, uint32_t val)
405 {
406 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
407 old, old & env->CP0_Cause & CP0Ca_IP_mask,
408 val, val & env->CP0_Cause & CP0Ca_IP_mask,
409 env->CP0_Cause);
410 switch (env->hflags & MIPS_HFLAG_KSU) {
411 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
412 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
413 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
414 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
415 }
416 }
417
418 void do_mtc0_status_irqraise_debug(void)
419 {
420 fprintf(logfile, "Raise pending IRQs\n");
421 }
422
423 void fpu_handle_exception(void)
424 {
425 #ifdef CONFIG_SOFTFLOAT
426 int flags = get_float_exception_flags(&env->fpu->fp_status);
427 unsigned int cpuflags = 0, enable, cause = 0;
428
429 enable = GET_FP_ENABLE(env->fpu->fcr31);
430
431 /* determine current flags */
432 if (flags & float_flag_invalid) {
433 cpuflags |= FP_INVALID;
434 cause |= FP_INVALID & enable;
435 }
436 if (flags & float_flag_divbyzero) {
437 cpuflags |= FP_DIV0;
438 cause |= FP_DIV0 & enable;
439 }
440 if (flags & float_flag_overflow) {
441 cpuflags |= FP_OVERFLOW;
442 cause |= FP_OVERFLOW & enable;
443 }
444 if (flags & float_flag_underflow) {
445 cpuflags |= FP_UNDERFLOW;
446 cause |= FP_UNDERFLOW & enable;
447 }
448 if (flags & float_flag_inexact) {
449 cpuflags |= FP_INEXACT;
450 cause |= FP_INEXACT & enable;
451 }
452 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
453 SET_FP_CAUSE(env->fpu->fcr31, cause);
454 #else
455 SET_FP_FLAGS(env->fpu->fcr31, 0);
456 SET_FP_CAUSE(env->fpu->fcr31, 0);
457 #endif
458 }
459
460 /* TLB management */
461 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
462 {
463 /* Flush qemu's TLB and discard all shadowed entries. */
464 tlb_flush (env, flush_global);
465 env->tlb->tlb_in_use = env->tlb->nb_tlb;
466 }
467
468 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
469 {
470 /* Discard entries from env->tlb[first] onwards. */
471 while (env->tlb->tlb_in_use > first) {
472 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
473 }
474 }
475
476 static void r4k_fill_tlb (int idx)
477 {
478 r4k_tlb_t *tlb;
479
480 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
481 tlb = &env->tlb->mmu.r4k.tlb[idx];
482 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
483 #if defined(TARGET_MIPS64)
484 tlb->VPN &= env->SEGMask;
485 #endif
486 tlb->ASID = env->CP0_EntryHi & 0xFF;
487 tlb->PageMask = env->CP0_PageMask;
488 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
489 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
490 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
491 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
492 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
493 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
494 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
495 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
496 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
497 }
498
499 void r4k_do_tlbwi (void)
500 {
501 /* Discard cached TLB entries. We could avoid doing this if the
502 tlbwi is just upgrading access permissions on the current entry;
503 that might be a further win. */
504 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
505
506 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
507 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
508 }
509
510 void r4k_do_tlbwr (void)
511 {
512 int r = cpu_mips_get_random(env);
513
514 r4k_invalidate_tlb(env, r, 1);
515 r4k_fill_tlb(r);
516 }
517
518 void r4k_do_tlbp (void)
519 {
520 r4k_tlb_t *tlb;
521 target_ulong mask;
522 target_ulong tag;
523 target_ulong VPN;
524 uint8_t ASID;
525 int i;
526
527 ASID = env->CP0_EntryHi & 0xFF;
528 for (i = 0; i < env->tlb->nb_tlb; i++) {
529 tlb = &env->tlb->mmu.r4k.tlb[i];
530 /* 1k pages are not supported. */
531 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
532 tag = env->CP0_EntryHi & ~mask;
533 VPN = tlb->VPN & ~mask;
534 /* Check ASID, virtual page number & size */
535 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
536 /* TLB match */
537 env->CP0_Index = i;
538 break;
539 }
540 }
541 if (i == env->tlb->nb_tlb) {
542 /* No match. Discard any shadow entries, if any of them match. */
543 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
544 tlb = &env->tlb->mmu.r4k.tlb[i];
545 /* 1k pages are not supported. */
546 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
547 tag = env->CP0_EntryHi & ~mask;
548 VPN = tlb->VPN & ~mask;
549 /* Check ASID, virtual page number & size */
550 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
551 r4k_mips_tlb_flush_extra (env, i);
552 break;
553 }
554 }
555
556 env->CP0_Index |= 0x80000000;
557 }
558 }
559
560 void r4k_do_tlbr (void)
561 {
562 r4k_tlb_t *tlb;
563 uint8_t ASID;
564
565 ASID = env->CP0_EntryHi & 0xFF;
566 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
567
568 /* If this will change the current ASID, flush qemu's TLB. */
569 if (ASID != tlb->ASID)
570 cpu_mips_tlb_flush (env, 1);
571
572 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
573
574 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
575 env->CP0_PageMask = tlb->PageMask;
576 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
577 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
578 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
579 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
580 }
581
582 #endif /* !CONFIG_USER_ONLY */
583
584 void dump_ldst (const unsigned char *func)
585 {
586 if (loglevel)
587 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
588 }
589
590 void dump_sc (void)
591 {
592 if (loglevel) {
593 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
594 T1, T0, env->CP0_LLAddr);
595 }
596 }
597
598 void debug_pre_eret (void)
599 {
600 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
601 env->PC[env->current_tc], env->CP0_EPC);
602 if (env->CP0_Status & (1 << CP0St_ERL))
603 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
604 if (env->hflags & MIPS_HFLAG_DM)
605 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
606 fputs("\n", logfile);
607 }
608
609 void debug_post_eret (void)
610 {
611 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
612 env->PC[env->current_tc], env->CP0_EPC);
613 if (env->CP0_Status & (1 << CP0St_ERL))
614 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
615 if (env->hflags & MIPS_HFLAG_DM)
616 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
617 switch (env->hflags & MIPS_HFLAG_KSU) {
618 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
619 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
620 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
621 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
622 }
623 }
624
625 void do_pmon (int function)
626 {
627 function /= 2;
628 switch (function) {
629 case 2: /* TODO: char inbyte(int waitflag); */
630 if (env->gpr[4][env->current_tc] == 0)
631 env->gpr[2][env->current_tc] = -1;
632 /* Fall through */
633 case 11: /* TODO: char inbyte (void); */
634 env->gpr[2][env->current_tc] = -1;
635 break;
636 case 3:
637 case 12:
638 printf("%c", (char)(env->gpr[4][env->current_tc] & 0xFF));
639 break;
640 case 17:
641 break;
642 case 158:
643 {
644 unsigned char *fmt = (void *)(unsigned long)env->gpr[4][env->current_tc];
645 printf("%s", fmt);
646 }
647 break;
648 }
649 }
650
651 #if !defined(CONFIG_USER_ONLY)
652
653 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
654
655 #define MMUSUFFIX _mmu
656 #define ALIGNED_ONLY
657
658 #define SHIFT 0
659 #include "softmmu_template.h"
660
661 #define SHIFT 1
662 #include "softmmu_template.h"
663
664 #define SHIFT 2
665 #include "softmmu_template.h"
666
667 #define SHIFT 3
668 #include "softmmu_template.h"
669
670 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
671 {
672 env->CP0_BadVAddr = addr;
673 do_restore_state (retaddr);
674 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
675 }
676
677 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
678 {
679 TranslationBlock *tb;
680 CPUState *saved_env;
681 unsigned long pc;
682 int ret;
683
684 /* XXX: hack to restore env in all cases, even if not called from
685 generated code */
686 saved_env = env;
687 env = cpu_single_env;
688 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
689 if (ret) {
690 if (retaddr) {
691 /* now we have a real cpu fault */
692 pc = (unsigned long)retaddr;
693 tb = tb_find_pc(pc);
694 if (tb) {
695 /* the PC is inside the translated code. It means that we have
696 a virtual CPU fault */
697 cpu_restore_state(tb, env, pc, NULL);
698 }
699 }
700 do_raise_exception_err(env->exception_index, env->error_code);
701 }
702 env = saved_env;
703 }
704
705 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
706 int unused)
707 {
708 if (is_exec)
709 do_raise_exception(EXCP_IBE);
710 else
711 do_raise_exception(EXCP_DBE);
712 }
713 #endif
714
715 /* Complex FPU operations which may need stack space. */
716
717 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
718 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
719 #define FLOAT_TWO32 make_float32(1 << 30)
720 #define FLOAT_TWO64 make_float64(1ULL << 62)
721 #define FLOAT_QNAN32 0x7fbfffff
722 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
723 #define FLOAT_SNAN32 0x7fffffff
724 #define FLOAT_SNAN64 0x7fffffffffffffffULL
725
726 /* convert MIPS rounding mode in FCR31 to IEEE library */
727 unsigned int ieee_rm[] = {
728 float_round_nearest_even,
729 float_round_to_zero,
730 float_round_up,
731 float_round_down
732 };
733
734 #define RESTORE_ROUNDING_MODE \
735 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
736
737 void do_cfc1 (int reg)
738 {
739 switch (reg) {
740 case 0:
741 T0 = (int32_t)env->fpu->fcr0;
742 break;
743 case 25:
744 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
745 break;
746 case 26:
747 T0 = env->fpu->fcr31 & 0x0003f07c;
748 break;
749 case 28:
750 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
751 break;
752 default:
753 T0 = (int32_t)env->fpu->fcr31;
754 break;
755 }
756 }
757
758 void do_ctc1 (int reg)
759 {
760 switch(reg) {
761 case 25:
762 if (T0 & 0xffffff00)
763 return;
764 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
765 ((T0 & 0x1) << 23);
766 break;
767 case 26:
768 if (T0 & 0x007c0000)
769 return;
770 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
771 break;
772 case 28:
773 if (T0 & 0x007c0000)
774 return;
775 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
776 ((T0 & 0x4) << 22);
777 break;
778 case 31:
779 if (T0 & 0x007c0000)
780 return;
781 env->fpu->fcr31 = T0;
782 break;
783 default:
784 return;
785 }
786 /* set rounding mode */
787 RESTORE_ROUNDING_MODE;
788 set_float_exception_flags(0, &env->fpu->fp_status);
789 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
790 do_raise_exception(EXCP_FPE);
791 }
792
793 static always_inline char ieee_ex_to_mips(char xcpt)
794 {
795 return (xcpt & float_flag_inexact) >> 5 |
796 (xcpt & float_flag_underflow) >> 3 |
797 (xcpt & float_flag_overflow) >> 1 |
798 (xcpt & float_flag_divbyzero) << 1 |
799 (xcpt & float_flag_invalid) << 4;
800 }
801
802 static always_inline char mips_ex_to_ieee(char xcpt)
803 {
804 return (xcpt & FP_INEXACT) << 5 |
805 (xcpt & FP_UNDERFLOW) << 3 |
806 (xcpt & FP_OVERFLOW) << 1 |
807 (xcpt & FP_DIV0) >> 1 |
808 (xcpt & FP_INVALID) >> 4;
809 }
810
811 static always_inline void update_fcr31(void)
812 {
813 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
814
815 SET_FP_CAUSE(env->fpu->fcr31, tmp);
816 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
817 do_raise_exception(EXCP_FPE);
818 else
819 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
820 }
821
822 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
823
824 FLOAT_OP(cvtd, s)
825 {
826 set_float_exception_flags(0, &env->fpu->fp_status);
827 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
828 update_fcr31();
829 }
830 FLOAT_OP(cvtd, w)
831 {
832 set_float_exception_flags(0, &env->fpu->fp_status);
833 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
834 update_fcr31();
835 }
836 FLOAT_OP(cvtd, l)
837 {
838 set_float_exception_flags(0, &env->fpu->fp_status);
839 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
840 update_fcr31();
841 }
842 FLOAT_OP(cvtl, d)
843 {
844 set_float_exception_flags(0, &env->fpu->fp_status);
845 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
846 update_fcr31();
847 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
848 DT2 = FLOAT_SNAN64;
849 }
850 FLOAT_OP(cvtl, s)
851 {
852 set_float_exception_flags(0, &env->fpu->fp_status);
853 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
854 update_fcr31();
855 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
856 DT2 = FLOAT_SNAN64;
857 }
858
859 FLOAT_OP(cvtps, pw)
860 {
861 set_float_exception_flags(0, &env->fpu->fp_status);
862 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
863 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
864 update_fcr31();
865 }
866 FLOAT_OP(cvtpw, ps)
867 {
868 set_float_exception_flags(0, &env->fpu->fp_status);
869 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
870 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
871 update_fcr31();
872 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
873 WT2 = FLOAT_SNAN32;
874 }
875 FLOAT_OP(cvts, d)
876 {
877 set_float_exception_flags(0, &env->fpu->fp_status);
878 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
879 update_fcr31();
880 }
881 FLOAT_OP(cvts, w)
882 {
883 set_float_exception_flags(0, &env->fpu->fp_status);
884 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
885 update_fcr31();
886 }
887 FLOAT_OP(cvts, l)
888 {
889 set_float_exception_flags(0, &env->fpu->fp_status);
890 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
891 update_fcr31();
892 }
893 FLOAT_OP(cvts, pl)
894 {
895 set_float_exception_flags(0, &env->fpu->fp_status);
896 WT2 = WT0;
897 update_fcr31();
898 }
899 FLOAT_OP(cvts, pu)
900 {
901 set_float_exception_flags(0, &env->fpu->fp_status);
902 WT2 = WTH0;
903 update_fcr31();
904 }
905 FLOAT_OP(cvtw, s)
906 {
907 set_float_exception_flags(0, &env->fpu->fp_status);
908 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
909 update_fcr31();
910 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
911 WT2 = FLOAT_SNAN32;
912 }
913 FLOAT_OP(cvtw, d)
914 {
915 set_float_exception_flags(0, &env->fpu->fp_status);
916 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
917 update_fcr31();
918 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
919 WT2 = FLOAT_SNAN32;
920 }
921
922 FLOAT_OP(roundl, d)
923 {
924 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
925 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
926 RESTORE_ROUNDING_MODE;
927 update_fcr31();
928 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
929 DT2 = FLOAT_SNAN64;
930 }
931 FLOAT_OP(roundl, s)
932 {
933 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
934 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
935 RESTORE_ROUNDING_MODE;
936 update_fcr31();
937 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
938 DT2 = FLOAT_SNAN64;
939 }
940 FLOAT_OP(roundw, d)
941 {
942 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
943 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
944 RESTORE_ROUNDING_MODE;
945 update_fcr31();
946 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
947 WT2 = FLOAT_SNAN32;
948 }
949 FLOAT_OP(roundw, s)
950 {
951 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
952 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
953 RESTORE_ROUNDING_MODE;
954 update_fcr31();
955 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
956 WT2 = FLOAT_SNAN32;
957 }
958
959 FLOAT_OP(truncl, d)
960 {
961 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
962 update_fcr31();
963 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
964 DT2 = FLOAT_SNAN64;
965 }
966 FLOAT_OP(truncl, s)
967 {
968 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
969 update_fcr31();
970 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
971 DT2 = FLOAT_SNAN64;
972 }
973 FLOAT_OP(truncw, d)
974 {
975 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
976 update_fcr31();
977 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
978 WT2 = FLOAT_SNAN32;
979 }
980 FLOAT_OP(truncw, s)
981 {
982 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
983 update_fcr31();
984 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
985 WT2 = FLOAT_SNAN32;
986 }
987
988 FLOAT_OP(ceill, d)
989 {
990 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
991 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
992 RESTORE_ROUNDING_MODE;
993 update_fcr31();
994 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
995 DT2 = FLOAT_SNAN64;
996 }
997 FLOAT_OP(ceill, s)
998 {
999 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1000 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1001 RESTORE_ROUNDING_MODE;
1002 update_fcr31();
1003 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1004 DT2 = FLOAT_SNAN64;
1005 }
1006 FLOAT_OP(ceilw, d)
1007 {
1008 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1009 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1010 RESTORE_ROUNDING_MODE;
1011 update_fcr31();
1012 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1013 WT2 = FLOAT_SNAN32;
1014 }
1015 FLOAT_OP(ceilw, s)
1016 {
1017 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1018 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1019 RESTORE_ROUNDING_MODE;
1020 update_fcr31();
1021 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1022 WT2 = FLOAT_SNAN32;
1023 }
1024
1025 FLOAT_OP(floorl, d)
1026 {
1027 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1028 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1029 RESTORE_ROUNDING_MODE;
1030 update_fcr31();
1031 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1032 DT2 = FLOAT_SNAN64;
1033 }
1034 FLOAT_OP(floorl, s)
1035 {
1036 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1037 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1038 RESTORE_ROUNDING_MODE;
1039 update_fcr31();
1040 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1041 DT2 = FLOAT_SNAN64;
1042 }
1043 FLOAT_OP(floorw, d)
1044 {
1045 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1046 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1047 RESTORE_ROUNDING_MODE;
1048 update_fcr31();
1049 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1050 WT2 = FLOAT_SNAN32;
1051 }
1052 FLOAT_OP(floorw, s)
1053 {
1054 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1055 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1056 RESTORE_ROUNDING_MODE;
1057 update_fcr31();
1058 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1059 WT2 = FLOAT_SNAN32;
1060 }
1061
1062 /* MIPS specific unary operations */
1063 FLOAT_OP(recip, d)
1064 {
1065 set_float_exception_flags(0, &env->fpu->fp_status);
1066 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1067 update_fcr31();
1068 }
1069 FLOAT_OP(recip, s)
1070 {
1071 set_float_exception_flags(0, &env->fpu->fp_status);
1072 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1073 update_fcr31();
1074 }
1075
1076 FLOAT_OP(rsqrt, d)
1077 {
1078 set_float_exception_flags(0, &env->fpu->fp_status);
1079 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1080 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1081 update_fcr31();
1082 }
1083 FLOAT_OP(rsqrt, s)
1084 {
1085 set_float_exception_flags(0, &env->fpu->fp_status);
1086 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1087 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1088 update_fcr31();
1089 }
1090
1091 FLOAT_OP(recip1, d)
1092 {
1093 set_float_exception_flags(0, &env->fpu->fp_status);
1094 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1095 update_fcr31();
1096 }
1097 FLOAT_OP(recip1, s)
1098 {
1099 set_float_exception_flags(0, &env->fpu->fp_status);
1100 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1101 update_fcr31();
1102 }
1103 FLOAT_OP(recip1, ps)
1104 {
1105 set_float_exception_flags(0, &env->fpu->fp_status);
1106 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1107 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
1108 update_fcr31();
1109 }
1110
1111 FLOAT_OP(rsqrt1, d)
1112 {
1113 set_float_exception_flags(0, &env->fpu->fp_status);
1114 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1115 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1116 update_fcr31();
1117 }
1118 FLOAT_OP(rsqrt1, s)
1119 {
1120 set_float_exception_flags(0, &env->fpu->fp_status);
1121 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1122 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1123 update_fcr31();
1124 }
1125 FLOAT_OP(rsqrt1, ps)
1126 {
1127 set_float_exception_flags(0, &env->fpu->fp_status);
1128 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1129 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1130 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1131 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1132 update_fcr31();
1133 }
1134
1135 /* binary operations */
1136 #define FLOAT_BINOP(name) \
1137 FLOAT_OP(name, d) \
1138 { \
1139 set_float_exception_flags(0, &env->fpu->fp_status); \
1140 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1141 update_fcr31(); \
1142 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1143 DT2 = FLOAT_QNAN64; \
1144 } \
1145 FLOAT_OP(name, s) \
1146 { \
1147 set_float_exception_flags(0, &env->fpu->fp_status); \
1148 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1149 update_fcr31(); \
1150 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1151 WT2 = FLOAT_QNAN32; \
1152 } \
1153 FLOAT_OP(name, ps) \
1154 { \
1155 set_float_exception_flags(0, &env->fpu->fp_status); \
1156 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1157 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1158 update_fcr31(); \
1159 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1160 WT2 = FLOAT_QNAN32; \
1161 WTH2 = FLOAT_QNAN32; \
1162 } \
1163 }
1164 FLOAT_BINOP(add)
1165 FLOAT_BINOP(sub)
1166 FLOAT_BINOP(mul)
1167 FLOAT_BINOP(div)
1168 #undef FLOAT_BINOP
1169
1170 /* MIPS specific binary operations */
1171 FLOAT_OP(recip2, d)
1172 {
1173 set_float_exception_flags(0, &env->fpu->fp_status);
1174 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1175 FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
1176 update_fcr31();
1177 }
1178 FLOAT_OP(recip2, s)
1179 {
1180 set_float_exception_flags(0, &env->fpu->fp_status);
1181 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1182 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1183 update_fcr31();
1184 }
1185 FLOAT_OP(recip2, ps)
1186 {
1187 set_float_exception_flags(0, &env->fpu->fp_status);
1188 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1189 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1190 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1191 FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
1192 update_fcr31();
1193 }
1194
1195 FLOAT_OP(rsqrt2, d)
1196 {
1197 set_float_exception_flags(0, &env->fpu->fp_status);
1198 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1199 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1200 FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
1201 update_fcr31();
1202 }
1203 FLOAT_OP(rsqrt2, s)
1204 {
1205 set_float_exception_flags(0, &env->fpu->fp_status);
1206 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1207 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1208 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1209 update_fcr31();
1210 }
1211 FLOAT_OP(rsqrt2, ps)
1212 {
1213 set_float_exception_flags(0, &env->fpu->fp_status);
1214 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1215 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1216 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1217 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1218 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1219 FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
1220 update_fcr31();
1221 }
1222
1223 FLOAT_OP(addr, ps)
1224 {
1225 set_float_exception_flags(0, &env->fpu->fp_status);
1226 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1227 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1228 update_fcr31();
1229 }
1230
1231 FLOAT_OP(mulr, ps)
1232 {
1233 set_float_exception_flags(0, &env->fpu->fp_status);
1234 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1235 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1236 update_fcr31();
1237 }
1238
1239 /* compare operations */
1240 #define FOP_COND_D(op, cond) \
1241 void do_cmp_d_ ## op (long cc) \
1242 { \
1243 int c = cond; \
1244 update_fcr31(); \
1245 if (c) \
1246 SET_FP_COND(cc, env->fpu); \
1247 else \
1248 CLEAR_FP_COND(cc, env->fpu); \
1249 } \
1250 void do_cmpabs_d_ ## op (long cc) \
1251 { \
1252 int c; \
1253 FDT0 = float64_abs(FDT0); \
1254 FDT1 = float64_abs(FDT1); \
1255 c = cond; \
1256 update_fcr31(); \
1257 if (c) \
1258 SET_FP_COND(cc, env->fpu); \
1259 else \
1260 CLEAR_FP_COND(cc, env->fpu); \
1261 }
1262
1263 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1264 {
1265 if (float64_is_signaling_nan(a) ||
1266 float64_is_signaling_nan(b) ||
1267 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1268 float_raise(float_flag_invalid, status);
1269 return 1;
1270 } else if (float64_is_nan(a) || float64_is_nan(b)) {
1271 return 1;
1272 } else {
1273 return 0;
1274 }
1275 }
1276
1277 /* NOTE: the comma operator will make "cond" to eval to false,
1278 * but float*_is_unordered() is still called. */
1279 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1280 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1281 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1282 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1283 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1284 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1285 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1286 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1287 /* NOTE: the comma operator will make "cond" to eval to false,
1288 * but float*_is_unordered() is still called. */
1289 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1290 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1291 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1292 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1293 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1294 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1295 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1296 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1297
1298 #define FOP_COND_S(op, cond) \
1299 void do_cmp_s_ ## op (long cc) \
1300 { \
1301 int c = cond; \
1302 update_fcr31(); \
1303 if (c) \
1304 SET_FP_COND(cc, env->fpu); \
1305 else \
1306 CLEAR_FP_COND(cc, env->fpu); \
1307 } \
1308 void do_cmpabs_s_ ## op (long cc) \
1309 { \
1310 int c; \
1311 FST0 = float32_abs(FST0); \
1312 FST1 = float32_abs(FST1); \
1313 c = cond; \
1314 update_fcr31(); \
1315 if (c) \
1316 SET_FP_COND(cc, env->fpu); \
1317 else \
1318 CLEAR_FP_COND(cc, env->fpu); \
1319 }
1320
1321 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1322 {
1323 if (float32_is_signaling_nan(a) ||
1324 float32_is_signaling_nan(b) ||
1325 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1326 float_raise(float_flag_invalid, status);
1327 return 1;
1328 } else if (float32_is_nan(a) || float32_is_nan(b)) {
1329 return 1;
1330 } else {
1331 return 0;
1332 }
1333 }
1334
1335 /* NOTE: the comma operator will make "cond" to eval to false,
1336 * but float*_is_unordered() is still called. */
1337 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1338 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1339 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1340 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1341 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1342 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1343 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1344 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1345 /* NOTE: the comma operator will make "cond" to eval to false,
1346 * but float*_is_unordered() is still called. */
1347 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1348 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1349 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1350 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1351 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1352 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1353 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1354 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1355
1356 #define FOP_COND_PS(op, condl, condh) \
1357 void do_cmp_ps_ ## op (long cc) \
1358 { \
1359 int cl = condl; \
1360 int ch = condh; \
1361 update_fcr31(); \
1362 if (cl) \
1363 SET_FP_COND(cc, env->fpu); \
1364 else \
1365 CLEAR_FP_COND(cc, env->fpu); \
1366 if (ch) \
1367 SET_FP_COND(cc + 1, env->fpu); \
1368 else \
1369 CLEAR_FP_COND(cc + 1, env->fpu); \
1370 } \
1371 void do_cmpabs_ps_ ## op (long cc) \
1372 { \
1373 int cl, ch; \
1374 FST0 = float32_abs(FST0); \
1375 FSTH0 = float32_abs(FSTH0); \
1376 FST1 = float32_abs(FST1); \
1377 FSTH1 = float32_abs(FSTH1); \
1378 cl = condl; \
1379 ch = condh; \
1380 update_fcr31(); \
1381 if (cl) \
1382 SET_FP_COND(cc, env->fpu); \
1383 else \
1384 CLEAR_FP_COND(cc, env->fpu); \
1385 if (ch) \
1386 SET_FP_COND(cc + 1, env->fpu); \
1387 else \
1388 CLEAR_FP_COND(cc + 1, env->fpu); \
1389 }
1390
1391 /* NOTE: the comma operator will make "cond" to eval to false,
1392 * but float*_is_unordered() is still called. */
1393 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1394 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1395 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1396 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1397 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1398 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1399 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1400 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1401 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1402 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1403 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1404 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1405 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1406 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1407 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1408 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1409 /* NOTE: the comma operator will make "cond" to eval to false,
1410 * but float*_is_unordered() is still called. */
1411 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1412 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1413 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1414 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1415 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1416 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1417 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1418 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1419 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1420 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1421 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1422 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1423 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1424 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1425 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1426 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))