]> git.proxmox.com Git - qemu.git/blob - target-mips/op_helper.c
fixed do_restore_state()
[qemu.git] / target-mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include "exec.h"
22
23 #include "host-utils.h"
24
25 #ifdef __s390__
26 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
27 #else
28 # define GETPC() (__builtin_return_address(0))
29 #endif
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
34 void do_raise_exception_err (uint32_t exception, int error_code)
35 {
36 #if 1
37 if (logfile && exception < 0x100)
38 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
39 #endif
40 env->exception_index = exception;
41 env->error_code = error_code;
42 T0 = 0;
43 cpu_loop_exit();
44 }
45
46 void do_raise_exception (uint32_t exception)
47 {
48 do_raise_exception_err(exception, 0);
49 }
50
51 void do_restore_state (void *pc_ptr)
52 {
53 TranslationBlock *tb;
54 unsigned long pc = (unsigned long) pc_ptr;
55
56 tb = tb_find_pc (pc);
57 if (tb) {
58 cpu_restore_state (tb, env, pc, NULL);
59 }
60 }
61
62 void do_raise_exception_direct_err (uint32_t exception, int error_code)
63 {
64 do_restore_state (GETPC ());
65 do_raise_exception_err (exception, error_code);
66 }
67
68 void do_raise_exception_direct (uint32_t exception)
69 {
70 do_raise_exception_direct_err (exception, 0);
71 }
72
73 #if defined(TARGET_MIPS64)
74 #if TARGET_LONG_BITS > HOST_LONG_BITS
75 /* Those might call libgcc functions. */
76 void do_dsll (void)
77 {
78 T0 = T0 << T1;
79 }
80
81 void do_dsll32 (void)
82 {
83 T0 = T0 << (T1 + 32);
84 }
85
86 void do_dsra (void)
87 {
88 T0 = (int64_t)T0 >> T1;
89 }
90
91 void do_dsra32 (void)
92 {
93 T0 = (int64_t)T0 >> (T1 + 32);
94 }
95
96 void do_dsrl (void)
97 {
98 T0 = T0 >> T1;
99 }
100
101 void do_dsrl32 (void)
102 {
103 T0 = T0 >> (T1 + 32);
104 }
105
106 void do_drotr (void)
107 {
108 target_ulong tmp;
109
110 if (T1) {
111 tmp = T0 << (0x40 - T1);
112 T0 = (T0 >> T1) | tmp;
113 }
114 }
115
116 void do_drotr32 (void)
117 {
118 target_ulong tmp;
119
120 tmp = T0 << (0x40 - (32 + T1));
121 T0 = (T0 >> (32 + T1)) | tmp;
122 }
123
124 void do_dsllv (void)
125 {
126 T0 = T1 << (T0 & 0x3F);
127 }
128
129 void do_dsrav (void)
130 {
131 T0 = (int64_t)T1 >> (T0 & 0x3F);
132 }
133
134 void do_dsrlv (void)
135 {
136 T0 = T1 >> (T0 & 0x3F);
137 }
138
139 void do_drotrv (void)
140 {
141 target_ulong tmp;
142
143 T0 &= 0x3F;
144 if (T0) {
145 tmp = T1 << (0x40 - T0);
146 T0 = (T1 >> T0) | tmp;
147 } else
148 T0 = T1;
149 }
150
151 void do_dclo (void)
152 {
153 T0 = clo64(T0);
154 }
155
156 void do_dclz (void)
157 {
158 T0 = clz64(T0);
159 }
160
161 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
162 #endif /* TARGET_MIPS64 */
163
164 /* 64 bits arithmetic for 32 bits hosts */
165 #if TARGET_LONG_BITS > HOST_LONG_BITS
166 static always_inline uint64_t get_HILO (void)
167 {
168 return (env->HI[env->current_tc][0] << 32) | (uint32_t)env->LO[env->current_tc][0];
169 }
170
171 static always_inline void set_HILO (uint64_t HILO)
172 {
173 env->LO[env->current_tc][0] = (int32_t)HILO;
174 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
175 }
176
177 static always_inline void set_HIT0_LO (uint64_t HILO)
178 {
179 env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
180 T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
181 }
182
183 static always_inline void set_HI_LOT0 (uint64_t HILO)
184 {
185 T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
186 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
187 }
188
189 void do_mult (void)
190 {
191 set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
192 }
193
194 void do_multu (void)
195 {
196 set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
197 }
198
199 void do_madd (void)
200 {
201 int64_t tmp;
202
203 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
204 set_HILO((int64_t)get_HILO() + tmp);
205 }
206
207 void do_maddu (void)
208 {
209 uint64_t tmp;
210
211 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
212 set_HILO(get_HILO() + tmp);
213 }
214
215 void do_msub (void)
216 {
217 int64_t tmp;
218
219 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
220 set_HILO((int64_t)get_HILO() - tmp);
221 }
222
223 void do_msubu (void)
224 {
225 uint64_t tmp;
226
227 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
228 set_HILO(get_HILO() - tmp);
229 }
230
231 /* Multiplication variants of the vr54xx. */
232 void do_muls (void)
233 {
234 set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
235 }
236
237 void do_mulsu (void)
238 {
239 set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
240 }
241
242 void do_macc (void)
243 {
244 set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
245 }
246
247 void do_macchi (void)
248 {
249 set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
250 }
251
252 void do_maccu (void)
253 {
254 set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
255 }
256
257 void do_macchiu (void)
258 {
259 set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
260 }
261
262 void do_msac (void)
263 {
264 set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
265 }
266
267 void do_msachi (void)
268 {
269 set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
270 }
271
272 void do_msacu (void)
273 {
274 set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
275 }
276
277 void do_msachiu (void)
278 {
279 set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
280 }
281
282 void do_mulhi (void)
283 {
284 set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
285 }
286
287 void do_mulhiu (void)
288 {
289 set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
290 }
291
292 void do_mulshi (void)
293 {
294 set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
295 }
296
297 void do_mulshiu (void)
298 {
299 set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
300 }
301 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
302
303 #if HOST_LONG_BITS < 64
304 void do_div (void)
305 {
306 /* 64bit datatypes because we may see overflow/underflow. */
307 if (T1 != 0) {
308 env->LO[env->current_tc][0] = (int32_t)((int64_t)(int32_t)T0 / (int32_t)T1);
309 env->HI[env->current_tc][0] = (int32_t)((int64_t)(int32_t)T0 % (int32_t)T1);
310 }
311 }
312 #endif
313
314 #if defined(TARGET_MIPS64)
315 void do_ddiv (void)
316 {
317 if (T1 != 0) {
318 int64_t arg0 = (int64_t)T0;
319 int64_t arg1 = (int64_t)T1;
320 if (arg0 == ((int64_t)-1 << 63) && arg1 == (int64_t)-1) {
321 env->LO[env->current_tc][0] = arg0;
322 env->HI[env->current_tc][0] = 0;
323 } else {
324 lldiv_t res = lldiv(arg0, arg1);
325 env->LO[env->current_tc][0] = res.quot;
326 env->HI[env->current_tc][0] = res.rem;
327 }
328 }
329 }
330
331 #if TARGET_LONG_BITS > HOST_LONG_BITS
332 void do_ddivu (void)
333 {
334 if (T1 != 0) {
335 env->LO[env->current_tc][0] = T0 / T1;
336 env->HI[env->current_tc][0] = T0 % T1;
337 }
338 }
339 #endif
340 #endif /* TARGET_MIPS64 */
341
342 #if defined(CONFIG_USER_ONLY)
343 void do_mfc0_random (void)
344 {
345 cpu_abort(env, "mfc0 random\n");
346 }
347
348 void do_mfc0_count (void)
349 {
350 cpu_abort(env, "mfc0 count\n");
351 }
352
353 void cpu_mips_store_count(CPUState *env, uint32_t value)
354 {
355 cpu_abort(env, "mtc0 count\n");
356 }
357
358 void cpu_mips_store_compare(CPUState *env, uint32_t value)
359 {
360 cpu_abort(env, "mtc0 compare\n");
361 }
362
363 void cpu_mips_start_count(CPUState *env)
364 {
365 cpu_abort(env, "start count\n");
366 }
367
368 void cpu_mips_stop_count(CPUState *env)
369 {
370 cpu_abort(env, "stop count\n");
371 }
372
373 void cpu_mips_update_irq(CPUState *env)
374 {
375 cpu_abort(env, "mtc0 status / mtc0 cause\n");
376 }
377
378 void do_mtc0_status_debug(uint32_t old, uint32_t val)
379 {
380 cpu_abort(env, "mtc0 status debug\n");
381 }
382
383 void do_mtc0_status_irqraise_debug (void)
384 {
385 cpu_abort(env, "mtc0 status irqraise debug\n");
386 }
387
388 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
389 {
390 cpu_abort(env, "mips_tlb_flush\n");
391 }
392
393 #else
394
395 /* CP0 helpers */
396 void do_mfc0_random (void)
397 {
398 T0 = (int32_t)cpu_mips_get_random(env);
399 }
400
401 void do_mfc0_count (void)
402 {
403 T0 = (int32_t)cpu_mips_get_count(env);
404 }
405
406 void do_mtc0_status_debug(uint32_t old, uint32_t val)
407 {
408 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
409 old, old & env->CP0_Cause & CP0Ca_IP_mask,
410 val, val & env->CP0_Cause & CP0Ca_IP_mask,
411 env->CP0_Cause);
412 switch (env->hflags & MIPS_HFLAG_KSU) {
413 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
414 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
415 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
416 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
417 }
418 }
419
420 void do_mtc0_status_irqraise_debug(void)
421 {
422 fprintf(logfile, "Raise pending IRQs\n");
423 }
424
425 void fpu_handle_exception(void)
426 {
427 #ifdef CONFIG_SOFTFLOAT
428 int flags = get_float_exception_flags(&env->fpu->fp_status);
429 unsigned int cpuflags = 0, enable, cause = 0;
430
431 enable = GET_FP_ENABLE(env->fpu->fcr31);
432
433 /* determine current flags */
434 if (flags & float_flag_invalid) {
435 cpuflags |= FP_INVALID;
436 cause |= FP_INVALID & enable;
437 }
438 if (flags & float_flag_divbyzero) {
439 cpuflags |= FP_DIV0;
440 cause |= FP_DIV0 & enable;
441 }
442 if (flags & float_flag_overflow) {
443 cpuflags |= FP_OVERFLOW;
444 cause |= FP_OVERFLOW & enable;
445 }
446 if (flags & float_flag_underflow) {
447 cpuflags |= FP_UNDERFLOW;
448 cause |= FP_UNDERFLOW & enable;
449 }
450 if (flags & float_flag_inexact) {
451 cpuflags |= FP_INEXACT;
452 cause |= FP_INEXACT & enable;
453 }
454 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
455 SET_FP_CAUSE(env->fpu->fcr31, cause);
456 #else
457 SET_FP_FLAGS(env->fpu->fcr31, 0);
458 SET_FP_CAUSE(env->fpu->fcr31, 0);
459 #endif
460 }
461
462 /* TLB management */
463 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
464 {
465 /* Flush qemu's TLB and discard all shadowed entries. */
466 tlb_flush (env, flush_global);
467 env->tlb->tlb_in_use = env->tlb->nb_tlb;
468 }
469
470 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
471 {
472 /* Discard entries from env->tlb[first] onwards. */
473 while (env->tlb->tlb_in_use > first) {
474 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
475 }
476 }
477
478 static void r4k_fill_tlb (int idx)
479 {
480 r4k_tlb_t *tlb;
481
482 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
483 tlb = &env->tlb->mmu.r4k.tlb[idx];
484 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
485 #if defined(TARGET_MIPS64)
486 tlb->VPN &= env->SEGMask;
487 #endif
488 tlb->ASID = env->CP0_EntryHi & 0xFF;
489 tlb->PageMask = env->CP0_PageMask;
490 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
491 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
492 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
493 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
494 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
495 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
496 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
497 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
498 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
499 }
500
501 void r4k_do_tlbwi (void)
502 {
503 /* Discard cached TLB entries. We could avoid doing this if the
504 tlbwi is just upgrading access permissions on the current entry;
505 that might be a further win. */
506 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
507
508 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
509 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
510 }
511
512 void r4k_do_tlbwr (void)
513 {
514 int r = cpu_mips_get_random(env);
515
516 r4k_invalidate_tlb(env, r, 1);
517 r4k_fill_tlb(r);
518 }
519
520 void r4k_do_tlbp (void)
521 {
522 r4k_tlb_t *tlb;
523 target_ulong mask;
524 target_ulong tag;
525 target_ulong VPN;
526 uint8_t ASID;
527 int i;
528
529 ASID = env->CP0_EntryHi & 0xFF;
530 for (i = 0; i < env->tlb->nb_tlb; i++) {
531 tlb = &env->tlb->mmu.r4k.tlb[i];
532 /* 1k pages are not supported. */
533 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
534 tag = env->CP0_EntryHi & ~mask;
535 VPN = tlb->VPN & ~mask;
536 /* Check ASID, virtual page number & size */
537 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
538 /* TLB match */
539 env->CP0_Index = i;
540 break;
541 }
542 }
543 if (i == env->tlb->nb_tlb) {
544 /* No match. Discard any shadow entries, if any of them match. */
545 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
546 tlb = &env->tlb->mmu.r4k.tlb[i];
547 /* 1k pages are not supported. */
548 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
549 tag = env->CP0_EntryHi & ~mask;
550 VPN = tlb->VPN & ~mask;
551 /* Check ASID, virtual page number & size */
552 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
553 r4k_mips_tlb_flush_extra (env, i);
554 break;
555 }
556 }
557
558 env->CP0_Index |= 0x80000000;
559 }
560 }
561
562 void r4k_do_tlbr (void)
563 {
564 r4k_tlb_t *tlb;
565 uint8_t ASID;
566
567 ASID = env->CP0_EntryHi & 0xFF;
568 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
569
570 /* If this will change the current ASID, flush qemu's TLB. */
571 if (ASID != tlb->ASID)
572 cpu_mips_tlb_flush (env, 1);
573
574 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
575
576 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
577 env->CP0_PageMask = tlb->PageMask;
578 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
579 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
580 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
581 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
582 }
583
584 #endif /* !CONFIG_USER_ONLY */
585
586 void dump_ldst (const unsigned char *func)
587 {
588 if (loglevel)
589 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
590 }
591
592 void dump_sc (void)
593 {
594 if (loglevel) {
595 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
596 T1, T0, env->CP0_LLAddr);
597 }
598 }
599
600 void debug_pre_eret (void)
601 {
602 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
603 env->PC[env->current_tc], env->CP0_EPC);
604 if (env->CP0_Status & (1 << CP0St_ERL))
605 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
606 if (env->hflags & MIPS_HFLAG_DM)
607 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
608 fputs("\n", logfile);
609 }
610
611 void debug_post_eret (void)
612 {
613 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
614 env->PC[env->current_tc], env->CP0_EPC);
615 if (env->CP0_Status & (1 << CP0St_ERL))
616 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
617 if (env->hflags & MIPS_HFLAG_DM)
618 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
619 switch (env->hflags & MIPS_HFLAG_KSU) {
620 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
621 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
622 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
623 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
624 }
625 }
626
627 void do_pmon (int function)
628 {
629 function /= 2;
630 switch (function) {
631 case 2: /* TODO: char inbyte(int waitflag); */
632 if (env->gpr[env->current_tc][4] == 0)
633 env->gpr[env->current_tc][2] = -1;
634 /* Fall through */
635 case 11: /* TODO: char inbyte (void); */
636 env->gpr[env->current_tc][2] = -1;
637 break;
638 case 3:
639 case 12:
640 printf("%c", (char)(env->gpr[env->current_tc][4] & 0xFF));
641 break;
642 case 17:
643 break;
644 case 158:
645 {
646 unsigned char *fmt = (void *)(unsigned long)env->gpr[env->current_tc][4];
647 printf("%s", fmt);
648 }
649 break;
650 }
651 }
652
653 #if !defined(CONFIG_USER_ONLY)
654
655 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
656
657 #define MMUSUFFIX _mmu
658 #define ALIGNED_ONLY
659
660 #define SHIFT 0
661 #include "softmmu_template.h"
662
663 #define SHIFT 1
664 #include "softmmu_template.h"
665
666 #define SHIFT 2
667 #include "softmmu_template.h"
668
669 #define SHIFT 3
670 #include "softmmu_template.h"
671
672 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
673 {
674 env->CP0_BadVAddr = addr;
675 do_restore_state (retaddr);
676 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
677 }
678
679 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
680 {
681 TranslationBlock *tb;
682 CPUState *saved_env;
683 unsigned long pc;
684 int ret;
685
686 /* XXX: hack to restore env in all cases, even if not called from
687 generated code */
688 saved_env = env;
689 env = cpu_single_env;
690 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
691 if (ret) {
692 if (retaddr) {
693 /* now we have a real cpu fault */
694 pc = (unsigned long)retaddr;
695 tb = tb_find_pc(pc);
696 if (tb) {
697 /* the PC is inside the translated code. It means that we have
698 a virtual CPU fault */
699 cpu_restore_state(tb, env, pc, NULL);
700 }
701 }
702 do_raise_exception_err(env->exception_index, env->error_code);
703 }
704 env = saved_env;
705 }
706
707 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
708 int unused)
709 {
710 if (is_exec)
711 do_raise_exception(EXCP_IBE);
712 else
713 do_raise_exception(EXCP_DBE);
714 }
715 #endif
716
717 /* Complex FPU operations which may need stack space. */
718
719 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
720 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
721 #define FLOAT_TWO32 make_float32(1 << 30)
722 #define FLOAT_TWO64 make_float64(1ULL << 62)
723 #define FLOAT_QNAN32 0x7fbfffff
724 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
725 #define FLOAT_SNAN32 0x7fffffff
726 #define FLOAT_SNAN64 0x7fffffffffffffffULL
727
728 /* convert MIPS rounding mode in FCR31 to IEEE library */
729 unsigned int ieee_rm[] = {
730 float_round_nearest_even,
731 float_round_to_zero,
732 float_round_up,
733 float_round_down
734 };
735
736 #define RESTORE_ROUNDING_MODE \
737 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
738
739 void do_cfc1 (int reg)
740 {
741 switch (reg) {
742 case 0:
743 T0 = (int32_t)env->fpu->fcr0;
744 break;
745 case 25:
746 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
747 break;
748 case 26:
749 T0 = env->fpu->fcr31 & 0x0003f07c;
750 break;
751 case 28:
752 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
753 break;
754 default:
755 T0 = (int32_t)env->fpu->fcr31;
756 break;
757 }
758 }
759
760 void do_ctc1 (int reg)
761 {
762 switch(reg) {
763 case 25:
764 if (T0 & 0xffffff00)
765 return;
766 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
767 ((T0 & 0x1) << 23);
768 break;
769 case 26:
770 if (T0 & 0x007c0000)
771 return;
772 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
773 break;
774 case 28:
775 if (T0 & 0x007c0000)
776 return;
777 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
778 ((T0 & 0x4) << 22);
779 break;
780 case 31:
781 if (T0 & 0x007c0000)
782 return;
783 env->fpu->fcr31 = T0;
784 break;
785 default:
786 return;
787 }
788 /* set rounding mode */
789 RESTORE_ROUNDING_MODE;
790 set_float_exception_flags(0, &env->fpu->fp_status);
791 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
792 do_raise_exception(EXCP_FPE);
793 }
794
795 static always_inline char ieee_ex_to_mips(char xcpt)
796 {
797 return (xcpt & float_flag_inexact) >> 5 |
798 (xcpt & float_flag_underflow) >> 3 |
799 (xcpt & float_flag_overflow) >> 1 |
800 (xcpt & float_flag_divbyzero) << 1 |
801 (xcpt & float_flag_invalid) << 4;
802 }
803
804 static always_inline char mips_ex_to_ieee(char xcpt)
805 {
806 return (xcpt & FP_INEXACT) << 5 |
807 (xcpt & FP_UNDERFLOW) << 3 |
808 (xcpt & FP_OVERFLOW) << 1 |
809 (xcpt & FP_DIV0) >> 1 |
810 (xcpt & FP_INVALID) >> 4;
811 }
812
813 static always_inline void update_fcr31(void)
814 {
815 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
816
817 SET_FP_CAUSE(env->fpu->fcr31, tmp);
818 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
819 do_raise_exception(EXCP_FPE);
820 else
821 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
822 }
823
824 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
825
826 FLOAT_OP(cvtd, s)
827 {
828 set_float_exception_flags(0, &env->fpu->fp_status);
829 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
830 update_fcr31();
831 }
832 FLOAT_OP(cvtd, w)
833 {
834 set_float_exception_flags(0, &env->fpu->fp_status);
835 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
836 update_fcr31();
837 }
838 FLOAT_OP(cvtd, l)
839 {
840 set_float_exception_flags(0, &env->fpu->fp_status);
841 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
842 update_fcr31();
843 }
844 FLOAT_OP(cvtl, d)
845 {
846 set_float_exception_flags(0, &env->fpu->fp_status);
847 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
848 update_fcr31();
849 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
850 DT2 = FLOAT_SNAN64;
851 }
852 FLOAT_OP(cvtl, s)
853 {
854 set_float_exception_flags(0, &env->fpu->fp_status);
855 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
856 update_fcr31();
857 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
858 DT2 = FLOAT_SNAN64;
859 }
860
861 FLOAT_OP(cvtps, pw)
862 {
863 set_float_exception_flags(0, &env->fpu->fp_status);
864 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
865 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
866 update_fcr31();
867 }
868 FLOAT_OP(cvtpw, ps)
869 {
870 set_float_exception_flags(0, &env->fpu->fp_status);
871 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
872 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
873 update_fcr31();
874 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
875 WT2 = FLOAT_SNAN32;
876 }
877 FLOAT_OP(cvts, d)
878 {
879 set_float_exception_flags(0, &env->fpu->fp_status);
880 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
881 update_fcr31();
882 }
883 FLOAT_OP(cvts, w)
884 {
885 set_float_exception_flags(0, &env->fpu->fp_status);
886 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
887 update_fcr31();
888 }
889 FLOAT_OP(cvts, l)
890 {
891 set_float_exception_flags(0, &env->fpu->fp_status);
892 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
893 update_fcr31();
894 }
895 FLOAT_OP(cvts, pl)
896 {
897 set_float_exception_flags(0, &env->fpu->fp_status);
898 WT2 = WT0;
899 update_fcr31();
900 }
901 FLOAT_OP(cvts, pu)
902 {
903 set_float_exception_flags(0, &env->fpu->fp_status);
904 WT2 = WTH0;
905 update_fcr31();
906 }
907 FLOAT_OP(cvtw, s)
908 {
909 set_float_exception_flags(0, &env->fpu->fp_status);
910 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
911 update_fcr31();
912 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
913 WT2 = FLOAT_SNAN32;
914 }
915 FLOAT_OP(cvtw, d)
916 {
917 set_float_exception_flags(0, &env->fpu->fp_status);
918 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
919 update_fcr31();
920 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
921 WT2 = FLOAT_SNAN32;
922 }
923
924 FLOAT_OP(roundl, d)
925 {
926 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
927 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
928 RESTORE_ROUNDING_MODE;
929 update_fcr31();
930 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
931 DT2 = FLOAT_SNAN64;
932 }
933 FLOAT_OP(roundl, s)
934 {
935 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
936 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
937 RESTORE_ROUNDING_MODE;
938 update_fcr31();
939 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
940 DT2 = FLOAT_SNAN64;
941 }
942 FLOAT_OP(roundw, d)
943 {
944 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
945 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
946 RESTORE_ROUNDING_MODE;
947 update_fcr31();
948 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
949 WT2 = FLOAT_SNAN32;
950 }
951 FLOAT_OP(roundw, s)
952 {
953 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
954 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
955 RESTORE_ROUNDING_MODE;
956 update_fcr31();
957 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
958 WT2 = FLOAT_SNAN32;
959 }
960
961 FLOAT_OP(truncl, d)
962 {
963 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
964 update_fcr31();
965 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
966 DT2 = FLOAT_SNAN64;
967 }
968 FLOAT_OP(truncl, s)
969 {
970 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
971 update_fcr31();
972 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
973 DT2 = FLOAT_SNAN64;
974 }
975 FLOAT_OP(truncw, d)
976 {
977 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
978 update_fcr31();
979 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
980 WT2 = FLOAT_SNAN32;
981 }
982 FLOAT_OP(truncw, s)
983 {
984 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
985 update_fcr31();
986 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
987 WT2 = FLOAT_SNAN32;
988 }
989
990 FLOAT_OP(ceill, d)
991 {
992 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
993 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
994 RESTORE_ROUNDING_MODE;
995 update_fcr31();
996 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
997 DT2 = FLOAT_SNAN64;
998 }
999 FLOAT_OP(ceill, s)
1000 {
1001 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1002 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1003 RESTORE_ROUNDING_MODE;
1004 update_fcr31();
1005 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1006 DT2 = FLOAT_SNAN64;
1007 }
1008 FLOAT_OP(ceilw, d)
1009 {
1010 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1011 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1012 RESTORE_ROUNDING_MODE;
1013 update_fcr31();
1014 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1015 WT2 = FLOAT_SNAN32;
1016 }
1017 FLOAT_OP(ceilw, s)
1018 {
1019 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1020 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1021 RESTORE_ROUNDING_MODE;
1022 update_fcr31();
1023 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1024 WT2 = FLOAT_SNAN32;
1025 }
1026
1027 FLOAT_OP(floorl, d)
1028 {
1029 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1030 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1031 RESTORE_ROUNDING_MODE;
1032 update_fcr31();
1033 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1034 DT2 = FLOAT_SNAN64;
1035 }
1036 FLOAT_OP(floorl, s)
1037 {
1038 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1039 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1040 RESTORE_ROUNDING_MODE;
1041 update_fcr31();
1042 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1043 DT2 = FLOAT_SNAN64;
1044 }
1045 FLOAT_OP(floorw, d)
1046 {
1047 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1048 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1049 RESTORE_ROUNDING_MODE;
1050 update_fcr31();
1051 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1052 WT2 = FLOAT_SNAN32;
1053 }
1054 FLOAT_OP(floorw, s)
1055 {
1056 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1057 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1058 RESTORE_ROUNDING_MODE;
1059 update_fcr31();
1060 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1061 WT2 = FLOAT_SNAN32;
1062 }
1063
1064 /* MIPS specific unary operations */
1065 FLOAT_OP(recip, d)
1066 {
1067 set_float_exception_flags(0, &env->fpu->fp_status);
1068 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1069 update_fcr31();
1070 }
1071 FLOAT_OP(recip, s)
1072 {
1073 set_float_exception_flags(0, &env->fpu->fp_status);
1074 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1075 update_fcr31();
1076 }
1077
1078 FLOAT_OP(rsqrt, d)
1079 {
1080 set_float_exception_flags(0, &env->fpu->fp_status);
1081 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1082 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1083 update_fcr31();
1084 }
1085 FLOAT_OP(rsqrt, s)
1086 {
1087 set_float_exception_flags(0, &env->fpu->fp_status);
1088 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1089 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1090 update_fcr31();
1091 }
1092
1093 FLOAT_OP(recip1, d)
1094 {
1095 set_float_exception_flags(0, &env->fpu->fp_status);
1096 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1097 update_fcr31();
1098 }
1099 FLOAT_OP(recip1, s)
1100 {
1101 set_float_exception_flags(0, &env->fpu->fp_status);
1102 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1103 update_fcr31();
1104 }
1105 FLOAT_OP(recip1, ps)
1106 {
1107 set_float_exception_flags(0, &env->fpu->fp_status);
1108 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1109 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
1110 update_fcr31();
1111 }
1112
1113 FLOAT_OP(rsqrt1, d)
1114 {
1115 set_float_exception_flags(0, &env->fpu->fp_status);
1116 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1117 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1118 update_fcr31();
1119 }
1120 FLOAT_OP(rsqrt1, s)
1121 {
1122 set_float_exception_flags(0, &env->fpu->fp_status);
1123 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1124 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1125 update_fcr31();
1126 }
1127 FLOAT_OP(rsqrt1, ps)
1128 {
1129 set_float_exception_flags(0, &env->fpu->fp_status);
1130 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1131 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1132 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1133 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1134 update_fcr31();
1135 }
1136
1137 /* binary operations */
1138 #define FLOAT_BINOP(name) \
1139 FLOAT_OP(name, d) \
1140 { \
1141 set_float_exception_flags(0, &env->fpu->fp_status); \
1142 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1143 update_fcr31(); \
1144 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1145 DT2 = FLOAT_QNAN64; \
1146 } \
1147 FLOAT_OP(name, s) \
1148 { \
1149 set_float_exception_flags(0, &env->fpu->fp_status); \
1150 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1151 update_fcr31(); \
1152 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1153 WT2 = FLOAT_QNAN32; \
1154 } \
1155 FLOAT_OP(name, ps) \
1156 { \
1157 set_float_exception_flags(0, &env->fpu->fp_status); \
1158 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1159 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1160 update_fcr31(); \
1161 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1162 WT2 = FLOAT_QNAN32; \
1163 WTH2 = FLOAT_QNAN32; \
1164 } \
1165 }
1166 FLOAT_BINOP(add)
1167 FLOAT_BINOP(sub)
1168 FLOAT_BINOP(mul)
1169 FLOAT_BINOP(div)
1170 #undef FLOAT_BINOP
1171
1172 /* MIPS specific binary operations */
1173 FLOAT_OP(recip2, d)
1174 {
1175 set_float_exception_flags(0, &env->fpu->fp_status);
1176 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1177 FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
1178 update_fcr31();
1179 }
1180 FLOAT_OP(recip2, s)
1181 {
1182 set_float_exception_flags(0, &env->fpu->fp_status);
1183 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1184 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1185 update_fcr31();
1186 }
1187 FLOAT_OP(recip2, ps)
1188 {
1189 set_float_exception_flags(0, &env->fpu->fp_status);
1190 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1191 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1192 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1193 FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
1194 update_fcr31();
1195 }
1196
1197 FLOAT_OP(rsqrt2, d)
1198 {
1199 set_float_exception_flags(0, &env->fpu->fp_status);
1200 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1201 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1202 FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
1203 update_fcr31();
1204 }
1205 FLOAT_OP(rsqrt2, s)
1206 {
1207 set_float_exception_flags(0, &env->fpu->fp_status);
1208 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1209 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1210 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1211 update_fcr31();
1212 }
1213 FLOAT_OP(rsqrt2, ps)
1214 {
1215 set_float_exception_flags(0, &env->fpu->fp_status);
1216 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1217 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1218 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1219 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1220 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1221 FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
1222 update_fcr31();
1223 }
1224
1225 FLOAT_OP(addr, ps)
1226 {
1227 set_float_exception_flags(0, &env->fpu->fp_status);
1228 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1229 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1230 update_fcr31();
1231 }
1232
1233 FLOAT_OP(mulr, ps)
1234 {
1235 set_float_exception_flags(0, &env->fpu->fp_status);
1236 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1237 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1238 update_fcr31();
1239 }
1240
1241 /* compare operations */
1242 #define FOP_COND_D(op, cond) \
1243 void do_cmp_d_ ## op (long cc) \
1244 { \
1245 int c = cond; \
1246 update_fcr31(); \
1247 if (c) \
1248 SET_FP_COND(cc, env->fpu); \
1249 else \
1250 CLEAR_FP_COND(cc, env->fpu); \
1251 } \
1252 void do_cmpabs_d_ ## op (long cc) \
1253 { \
1254 int c; \
1255 FDT0 = float64_abs(FDT0); \
1256 FDT1 = float64_abs(FDT1); \
1257 c = cond; \
1258 update_fcr31(); \
1259 if (c) \
1260 SET_FP_COND(cc, env->fpu); \
1261 else \
1262 CLEAR_FP_COND(cc, env->fpu); \
1263 }
1264
1265 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1266 {
1267 if (float64_is_signaling_nan(a) ||
1268 float64_is_signaling_nan(b) ||
1269 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1270 float_raise(float_flag_invalid, status);
1271 return 1;
1272 } else if (float64_is_nan(a) || float64_is_nan(b)) {
1273 return 1;
1274 } else {
1275 return 0;
1276 }
1277 }
1278
1279 /* NOTE: the comma operator will make "cond" to eval to false,
1280 * but float*_is_unordered() is still called. */
1281 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1282 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1283 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1284 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1285 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1286 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1287 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1288 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1289 /* NOTE: the comma operator will make "cond" to eval to false,
1290 * but float*_is_unordered() is still called. */
1291 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1292 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1293 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1294 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1295 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1296 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1297 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1298 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1299
1300 #define FOP_COND_S(op, cond) \
1301 void do_cmp_s_ ## op (long cc) \
1302 { \
1303 int c = cond; \
1304 update_fcr31(); \
1305 if (c) \
1306 SET_FP_COND(cc, env->fpu); \
1307 else \
1308 CLEAR_FP_COND(cc, env->fpu); \
1309 } \
1310 void do_cmpabs_s_ ## op (long cc) \
1311 { \
1312 int c; \
1313 FST0 = float32_abs(FST0); \
1314 FST1 = float32_abs(FST1); \
1315 c = cond; \
1316 update_fcr31(); \
1317 if (c) \
1318 SET_FP_COND(cc, env->fpu); \
1319 else \
1320 CLEAR_FP_COND(cc, env->fpu); \
1321 }
1322
1323 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1324 {
1325 if (float32_is_signaling_nan(a) ||
1326 float32_is_signaling_nan(b) ||
1327 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1328 float_raise(float_flag_invalid, status);
1329 return 1;
1330 } else if (float32_is_nan(a) || float32_is_nan(b)) {
1331 return 1;
1332 } else {
1333 return 0;
1334 }
1335 }
1336
1337 /* NOTE: the comma operator will make "cond" to eval to false,
1338 * but float*_is_unordered() is still called. */
1339 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1340 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1341 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1342 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1343 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1344 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1345 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1346 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1347 /* NOTE: the comma operator will make "cond" to eval to false,
1348 * but float*_is_unordered() is still called. */
1349 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1350 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1351 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1352 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1353 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1354 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1355 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1356 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1357
1358 #define FOP_COND_PS(op, condl, condh) \
1359 void do_cmp_ps_ ## op (long cc) \
1360 { \
1361 int cl = condl; \
1362 int ch = condh; \
1363 update_fcr31(); \
1364 if (cl) \
1365 SET_FP_COND(cc, env->fpu); \
1366 else \
1367 CLEAR_FP_COND(cc, env->fpu); \
1368 if (ch) \
1369 SET_FP_COND(cc + 1, env->fpu); \
1370 else \
1371 CLEAR_FP_COND(cc + 1, env->fpu); \
1372 } \
1373 void do_cmpabs_ps_ ## op (long cc) \
1374 { \
1375 int cl, ch; \
1376 FST0 = float32_abs(FST0); \
1377 FSTH0 = float32_abs(FSTH0); \
1378 FST1 = float32_abs(FST1); \
1379 FSTH1 = float32_abs(FSTH1); \
1380 cl = condl; \
1381 ch = condh; \
1382 update_fcr31(); \
1383 if (cl) \
1384 SET_FP_COND(cc, env->fpu); \
1385 else \
1386 CLEAR_FP_COND(cc, env->fpu); \
1387 if (ch) \
1388 SET_FP_COND(cc + 1, env->fpu); \
1389 else \
1390 CLEAR_FP_COND(cc + 1, env->fpu); \
1391 }
1392
1393 /* NOTE: the comma operator will make "cond" to eval to false,
1394 * but float*_is_unordered() is still called. */
1395 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1396 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1397 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1398 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1399 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1400 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1401 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1402 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1403 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1404 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1405 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1406 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1407 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1408 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1409 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1410 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1411 /* NOTE: the comma operator will make "cond" to eval to false,
1412 * but float*_is_unordered() is still called. */
1413 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1414 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1415 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1416 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1417 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1418 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1419 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1420 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1421 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1422 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1423 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1424 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1425 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1426 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1427 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1428 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))