]> git.proxmox.com Git - mirror_qemu.git/blob - target-mips/op_helper.c
Switch bitfield instructions and assorted special ops to TCG.
[mirror_qemu.git] / target-mips / op_helper.c
1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include "exec.h"
22
23 #include "host-utils.h"
24
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27
28 void do_raise_exception_err (uint32_t exception, int error_code)
29 {
30 #if 1
31 if (logfile && exception < 0x100)
32 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33 #endif
34 env->exception_index = exception;
35 env->error_code = error_code;
36 T0 = 0;
37 cpu_loop_exit();
38 }
39
40 void do_raise_exception (uint32_t exception)
41 {
42 do_raise_exception_err(exception, 0);
43 }
44
45 void do_interrupt_restart (void)
46 {
47 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
48 !(env->CP0_Status & (1 << CP0St_ERL)) &&
49 !(env->hflags & MIPS_HFLAG_DM) &&
50 (env->CP0_Status & (1 << CP0St_IE)) &&
51 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
52 env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
53 do_raise_exception(EXCP_EXT_INTERRUPT);
54 }
55 }
56
57 void do_restore_state (void *pc_ptr)
58 {
59 TranslationBlock *tb;
60 unsigned long pc = (unsigned long) pc_ptr;
61
62 tb = tb_find_pc (pc);
63 if (tb) {
64 cpu_restore_state (tb, env, pc, NULL);
65 }
66 }
67
68 void do_clo (void)
69 {
70 T0 = clo32(T0);
71 }
72
73 void do_clz (void)
74 {
75 T0 = clz32(T0);
76 }
77
78 #if defined(TARGET_MIPS64)
79 #if TARGET_LONG_BITS > HOST_LONG_BITS
80 /* Those might call libgcc functions. */
81 void do_dsll (void)
82 {
83 T0 = T0 << T1;
84 }
85
86 void do_dsll32 (void)
87 {
88 T0 = T0 << (T1 + 32);
89 }
90
91 void do_dsra (void)
92 {
93 T0 = (int64_t)T0 >> T1;
94 }
95
96 void do_dsra32 (void)
97 {
98 T0 = (int64_t)T0 >> (T1 + 32);
99 }
100
101 void do_dsrl (void)
102 {
103 T0 = T0 >> T1;
104 }
105
106 void do_dsrl32 (void)
107 {
108 T0 = T0 >> (T1 + 32);
109 }
110
111 void do_drotr (void)
112 {
113 target_ulong tmp;
114
115 if (T1) {
116 tmp = T0 << (0x40 - T1);
117 T0 = (T0 >> T1) | tmp;
118 }
119 }
120
121 void do_drotr32 (void)
122 {
123 target_ulong tmp;
124
125 tmp = T0 << (0x40 - (32 + T1));
126 T0 = (T0 >> (32 + T1)) | tmp;
127 }
128
129 void do_dsllv (void)
130 {
131 T0 = T1 << (T0 & 0x3F);
132 }
133
134 void do_dsrav (void)
135 {
136 T0 = (int64_t)T1 >> (T0 & 0x3F);
137 }
138
139 void do_dsrlv (void)
140 {
141 T0 = T1 >> (T0 & 0x3F);
142 }
143
144 void do_drotrv (void)
145 {
146 target_ulong tmp;
147
148 T0 &= 0x3F;
149 if (T0) {
150 tmp = T1 << (0x40 - T0);
151 T0 = (T1 >> T0) | tmp;
152 } else
153 T0 = T1;
154 }
155
156 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
157
158 void do_dclo (void)
159 {
160 T0 = clo64(T0);
161 }
162
163 void do_dclz (void)
164 {
165 T0 = clz64(T0);
166 }
167
168 #endif /* TARGET_MIPS64 */
169
170 /* 64 bits arithmetic for 32 bits hosts */
171 #if TARGET_LONG_BITS > HOST_LONG_BITS
172 static always_inline uint64_t get_HILO (void)
173 {
174 return (env->HI[env->current_tc][0] << 32) | (uint32_t)env->LO[env->current_tc][0];
175 }
176
177 static always_inline void set_HILO (uint64_t HILO)
178 {
179 env->LO[env->current_tc][0] = (int32_t)HILO;
180 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
181 }
182
183 static always_inline void set_HIT0_LO (uint64_t HILO)
184 {
185 env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
186 T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
187 }
188
189 static always_inline void set_HI_LOT0 (uint64_t HILO)
190 {
191 T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
192 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
193 }
194
195 void do_mult (void)
196 {
197 set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
198 }
199
200 void do_multu (void)
201 {
202 set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
203 }
204
205 void do_madd (void)
206 {
207 int64_t tmp;
208
209 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
210 set_HILO((int64_t)get_HILO() + tmp);
211 }
212
213 void do_maddu (void)
214 {
215 uint64_t tmp;
216
217 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
218 set_HILO(get_HILO() + tmp);
219 }
220
221 void do_msub (void)
222 {
223 int64_t tmp;
224
225 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
226 set_HILO((int64_t)get_HILO() - tmp);
227 }
228
229 void do_msubu (void)
230 {
231 uint64_t tmp;
232
233 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
234 set_HILO(get_HILO() - tmp);
235 }
236
237 /* Multiplication variants of the vr54xx. */
238 void do_muls (void)
239 {
240 set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
241 }
242
243 void do_mulsu (void)
244 {
245 set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
246 }
247
248 void do_macc (void)
249 {
250 set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
251 }
252
253 void do_macchi (void)
254 {
255 set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
256 }
257
258 void do_maccu (void)
259 {
260 set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
261 }
262
263 void do_macchiu (void)
264 {
265 set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
266 }
267
268 void do_msac (void)
269 {
270 set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
271 }
272
273 void do_msachi (void)
274 {
275 set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
276 }
277
278 void do_msacu (void)
279 {
280 set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
281 }
282
283 void do_msachiu (void)
284 {
285 set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
286 }
287
288 void do_mulhi (void)
289 {
290 set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
291 }
292
293 void do_mulhiu (void)
294 {
295 set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
296 }
297
298 void do_mulshi (void)
299 {
300 set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
301 }
302
303 void do_mulshiu (void)
304 {
305 set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
306 }
307 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
308
309 #ifdef CONFIG_USER_ONLY
310 void do_mfc0_random (void)
311 {
312 cpu_abort(env, "mfc0 random\n");
313 }
314
315 void do_mfc0_count (void)
316 {
317 cpu_abort(env, "mfc0 count\n");
318 }
319
320 void cpu_mips_store_count(CPUState *env, uint32_t value)
321 {
322 cpu_abort(env, "mtc0 count\n");
323 }
324
325 void cpu_mips_store_compare(CPUState *env, uint32_t value)
326 {
327 cpu_abort(env, "mtc0 compare\n");
328 }
329
330 void cpu_mips_start_count(CPUState *env)
331 {
332 cpu_abort(env, "start count\n");
333 }
334
335 void cpu_mips_stop_count(CPUState *env)
336 {
337 cpu_abort(env, "stop count\n");
338 }
339
340 void cpu_mips_update_irq(CPUState *env)
341 {
342 cpu_abort(env, "mtc0 status / mtc0 cause\n");
343 }
344
345 void do_mtc0_status_debug(uint32_t old, uint32_t val)
346 {
347 cpu_abort(env, "mtc0 status debug\n");
348 }
349
350 void do_mtc0_status_irqraise_debug (void)
351 {
352 cpu_abort(env, "mtc0 status irqraise debug\n");
353 }
354
355 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
356 {
357 cpu_abort(env, "mips_tlb_flush\n");
358 }
359
360 #else
361
362 /* CP0 helpers */
363 void do_mfc0_mvpcontrol (void)
364 {
365 T0 = env->mvp->CP0_MVPControl;
366 }
367
368 void do_mfc0_mvpconf0 (void)
369 {
370 T0 = env->mvp->CP0_MVPConf0;
371 }
372
373 void do_mfc0_mvpconf1 (void)
374 {
375 T0 = env->mvp->CP0_MVPConf1;
376 }
377
378 void do_mfc0_random (void)
379 {
380 T0 = (int32_t)cpu_mips_get_random(env);
381 }
382
383 void do_mfc0_tcstatus (void)
384 {
385 T0 = env->CP0_TCStatus[env->current_tc];
386 }
387
388 void do_mftc0_tcstatus(void)
389 {
390 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
391
392 T0 = env->CP0_TCStatus[other_tc];
393 }
394
395 void do_mfc0_tcbind (void)
396 {
397 T0 = env->CP0_TCBind[env->current_tc];
398 }
399
400 void do_mftc0_tcbind(void)
401 {
402 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
403
404 T0 = env->CP0_TCBind[other_tc];
405 }
406
407 void do_mfc0_tcrestart (void)
408 {
409 T0 = env->PC[env->current_tc];
410 }
411
412 void do_mftc0_tcrestart(void)
413 {
414 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
415
416 T0 = env->PC[other_tc];
417 }
418
419 void do_mfc0_tchalt (void)
420 {
421 T0 = env->CP0_TCHalt[env->current_tc];
422 }
423
424 void do_mftc0_tchalt(void)
425 {
426 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
427
428 T0 = env->CP0_TCHalt[other_tc];
429 }
430
431 void do_mfc0_tccontext (void)
432 {
433 T0 = env->CP0_TCContext[env->current_tc];
434 }
435
436 void do_mftc0_tccontext(void)
437 {
438 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
439
440 T0 = env->CP0_TCContext[other_tc];
441 }
442
443 void do_mfc0_tcschedule (void)
444 {
445 T0 = env->CP0_TCSchedule[env->current_tc];
446 }
447
448 void do_mftc0_tcschedule(void)
449 {
450 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
451
452 T0 = env->CP0_TCSchedule[other_tc];
453 }
454
455 void do_mfc0_tcschefback (void)
456 {
457 T0 = env->CP0_TCScheFBack[env->current_tc];
458 }
459
460 void do_mftc0_tcschefback(void)
461 {
462 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
463
464 T0 = env->CP0_TCScheFBack[other_tc];
465 }
466
467 void do_mfc0_count (void)
468 {
469 T0 = (int32_t)cpu_mips_get_count(env);
470 }
471
472 void do_mftc0_entryhi(void)
473 {
474 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
475
476 T0 = (env->CP0_EntryHi & ~0xff) | (env->CP0_TCStatus[other_tc] & 0xff);
477 }
478
479 void do_mftc0_status(void)
480 {
481 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
482 uint32_t tcstatus = env->CP0_TCStatus[other_tc];
483
484 T0 = env->CP0_Status & ~0xf1000018;
485 T0 |= tcstatus & (0xf << CP0TCSt_TCU0);
486 T0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
487 T0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
488 }
489
490 void do_mfc0_lladdr (void)
491 {
492 T0 = (int32_t)env->CP0_LLAddr >> 4;
493 }
494
495 void do_mfc0_watchlo (uint32_t sel)
496 {
497 T0 = (int32_t)env->CP0_WatchLo[sel];
498 }
499
500 void do_mfc0_watchhi (uint32_t sel)
501 {
502 T0 = env->CP0_WatchHi[sel];
503 }
504
505 void do_mfc0_debug (void)
506 {
507 T0 = env->CP0_Debug;
508 if (env->hflags & MIPS_HFLAG_DM)
509 T0 |= 1 << CP0DB_DM;
510 }
511
512 void do_mftc0_debug(void)
513 {
514 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
515
516 /* XXX: Might be wrong, check with EJTAG spec. */
517 T0 = (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
518 (env->CP0_Debug_tcstatus[other_tc] &
519 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
520 }
521
522 #if defined(TARGET_MIPS64)
523 void do_dmfc0_tcrestart (void)
524 {
525 T0 = env->PC[env->current_tc];
526 }
527
528 void do_dmfc0_tchalt (void)
529 {
530 T0 = env->CP0_TCHalt[env->current_tc];
531 }
532
533 void do_dmfc0_tccontext (void)
534 {
535 T0 = env->CP0_TCContext[env->current_tc];
536 }
537
538 void do_dmfc0_tcschedule (void)
539 {
540 T0 = env->CP0_TCSchedule[env->current_tc];
541 }
542
543 void do_dmfc0_tcschefback (void)
544 {
545 T0 = env->CP0_TCScheFBack[env->current_tc];
546 }
547
548 void do_dmfc0_lladdr (void)
549 {
550 T0 = env->CP0_LLAddr >> 4;
551 }
552
553 void do_dmfc0_watchlo (uint32_t sel)
554 {
555 T0 = env->CP0_WatchLo[sel];
556 }
557 #endif /* TARGET_MIPS64 */
558
559 void do_mtc0_index (void)
560 {
561 int num = 1;
562 unsigned int tmp = env->tlb->nb_tlb;
563
564 do {
565 tmp >>= 1;
566 num <<= 1;
567 } while (tmp);
568 env->CP0_Index = (env->CP0_Index & 0x80000000) | (T0 & (num - 1));
569 }
570
571 void do_mtc0_mvpcontrol (void)
572 {
573 uint32_t mask = 0;
574 uint32_t newval;
575
576 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
577 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
578 (1 << CP0MVPCo_EVP);
579 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
580 mask |= (1 << CP0MVPCo_STLB);
581 newval = (env->mvp->CP0_MVPControl & ~mask) | (T0 & mask);
582
583 // TODO: Enable/disable shared TLB, enable/disable VPEs.
584
585 env->mvp->CP0_MVPControl = newval;
586 }
587
588 void do_mtc0_vpecontrol (void)
589 {
590 uint32_t mask;
591 uint32_t newval;
592
593 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
594 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
595 newval = (env->CP0_VPEControl & ~mask) | (T0 & mask);
596
597 /* Yield scheduler intercept not implemented. */
598 /* Gating storage scheduler intercept not implemented. */
599
600 // TODO: Enable/disable TCs.
601
602 env->CP0_VPEControl = newval;
603 }
604
605 void do_mtc0_vpeconf0 (void)
606 {
607 uint32_t mask = 0;
608 uint32_t newval;
609
610 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
611 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
612 mask |= (0xff << CP0VPEC0_XTC);
613 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
614 }
615 newval = (env->CP0_VPEConf0 & ~mask) | (T0 & mask);
616
617 // TODO: TC exclusive handling due to ERL/EXL.
618
619 env->CP0_VPEConf0 = newval;
620 }
621
622 void do_mtc0_vpeconf1 (void)
623 {
624 uint32_t mask = 0;
625 uint32_t newval;
626
627 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
628 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
629 (0xff << CP0VPEC1_NCP1);
630 newval = (env->CP0_VPEConf1 & ~mask) | (T0 & mask);
631
632 /* UDI not implemented. */
633 /* CP2 not implemented. */
634
635 // TODO: Handle FPU (CP1) binding.
636
637 env->CP0_VPEConf1 = newval;
638 }
639
640 void do_mtc0_yqmask (void)
641 {
642 /* Yield qualifier inputs not implemented. */
643 env->CP0_YQMask = 0x00000000;
644 }
645
646 void do_mtc0_vpeopt (void)
647 {
648 env->CP0_VPEOpt = T0 & 0x0000ffff;
649 }
650
651 void do_mtc0_entrylo0 (void)
652 {
653 /* Large physaddr (PABITS) not implemented */
654 /* 1k pages not implemented */
655 env->CP0_EntryLo0 = T0 & 0x3FFFFFFF;
656 }
657
658 void do_mtc0_tcstatus (void)
659 {
660 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
661 uint32_t newval;
662
663 newval = (env->CP0_TCStatus[env->current_tc] & ~mask) | (T0 & mask);
664
665 // TODO: Sync with CP0_Status.
666
667 env->CP0_TCStatus[env->current_tc] = newval;
668 }
669
670 void do_mttc0_tcstatus (void)
671 {
672 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
673
674 // TODO: Sync with CP0_Status.
675
676 env->CP0_TCStatus[other_tc] = T0;
677 }
678
679 void do_mtc0_tcbind (void)
680 {
681 uint32_t mask = (1 << CP0TCBd_TBE);
682 uint32_t newval;
683
684 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
685 mask |= (1 << CP0TCBd_CurVPE);
686 newval = (env->CP0_TCBind[env->current_tc] & ~mask) | (T0 & mask);
687 env->CP0_TCBind[env->current_tc] = newval;
688 }
689
690 void do_mttc0_tcbind (void)
691 {
692 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
693 uint32_t mask = (1 << CP0TCBd_TBE);
694 uint32_t newval;
695
696 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
697 mask |= (1 << CP0TCBd_CurVPE);
698 newval = (env->CP0_TCBind[other_tc] & ~mask) | (T0 & mask);
699 env->CP0_TCBind[other_tc] = newval;
700 }
701
702 void do_mtc0_tcrestart (void)
703 {
704 env->PC[env->current_tc] = T0;
705 env->CP0_TCStatus[env->current_tc] &= ~(1 << CP0TCSt_TDS);
706 env->CP0_LLAddr = 0ULL;
707 /* MIPS16 not implemented. */
708 }
709
710 void do_mttc0_tcrestart (void)
711 {
712 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
713
714 env->PC[other_tc] = T0;
715 env->CP0_TCStatus[other_tc] &= ~(1 << CP0TCSt_TDS);
716 env->CP0_LLAddr = 0ULL;
717 /* MIPS16 not implemented. */
718 }
719
720 void do_mtc0_tchalt (void)
721 {
722 env->CP0_TCHalt[env->current_tc] = T0 & 0x1;
723
724 // TODO: Halt TC / Restart (if allocated+active) TC.
725 }
726
727 void do_mttc0_tchalt (void)
728 {
729 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
730
731 // TODO: Halt TC / Restart (if allocated+active) TC.
732
733 env->CP0_TCHalt[other_tc] = T0;
734 }
735
736 void do_mtc0_tccontext (void)
737 {
738 env->CP0_TCContext[env->current_tc] = T0;
739 }
740
741 void do_mttc0_tccontext (void)
742 {
743 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
744
745 env->CP0_TCContext[other_tc] = T0;
746 }
747
748 void do_mtc0_tcschedule (void)
749 {
750 env->CP0_TCSchedule[env->current_tc] = T0;
751 }
752
753 void do_mttc0_tcschedule (void)
754 {
755 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
756
757 env->CP0_TCSchedule[other_tc] = T0;
758 }
759
760 void do_mtc0_tcschefback (void)
761 {
762 env->CP0_TCScheFBack[env->current_tc] = T0;
763 }
764
765 void do_mttc0_tcschefback (void)
766 {
767 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
768
769 env->CP0_TCScheFBack[other_tc] = T0;
770 }
771
772 void do_mtc0_entrylo1 (void)
773 {
774 /* Large physaddr (PABITS) not implemented */
775 /* 1k pages not implemented */
776 env->CP0_EntryLo1 = T0 & 0x3FFFFFFF;
777 }
778
779 void do_mtc0_context (void)
780 {
781 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (T0 & ~0x007FFFFF);
782 }
783
784 void do_mtc0_pagemask (void)
785 {
786 /* 1k pages not implemented */
787 env->CP0_PageMask = T0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
788 }
789
790 void do_mtc0_pagegrain (void)
791 {
792 /* SmartMIPS not implemented */
793 /* Large physaddr (PABITS) not implemented */
794 /* 1k pages not implemented */
795 env->CP0_PageGrain = 0;
796 }
797
798 void do_mtc0_wired (void)
799 {
800 env->CP0_Wired = T0 % env->tlb->nb_tlb;
801 }
802
803 void do_mtc0_srsconf0 (void)
804 {
805 env->CP0_SRSConf0 |= T0 & env->CP0_SRSConf0_rw_bitmask;
806 }
807
808 void do_mtc0_srsconf1 (void)
809 {
810 env->CP0_SRSConf1 |= T0 & env->CP0_SRSConf1_rw_bitmask;
811 }
812
813 void do_mtc0_srsconf2 (void)
814 {
815 env->CP0_SRSConf2 |= T0 & env->CP0_SRSConf2_rw_bitmask;
816 }
817
818 void do_mtc0_srsconf3 (void)
819 {
820 env->CP0_SRSConf3 |= T0 & env->CP0_SRSConf3_rw_bitmask;
821 }
822
823 void do_mtc0_srsconf4 (void)
824 {
825 env->CP0_SRSConf4 |= T0 & env->CP0_SRSConf4_rw_bitmask;
826 }
827
828 void do_mtc0_hwrena (void)
829 {
830 env->CP0_HWREna = T0 & 0x0000000F;
831 }
832
833 void do_mtc0_count (void)
834 {
835 cpu_mips_store_count(env, T0);
836 }
837
838 void do_mtc0_entryhi (void)
839 {
840 target_ulong old, val;
841
842 /* 1k pages not implemented */
843 val = T0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
844 #if defined(TARGET_MIPS64)
845 val &= env->SEGMask;
846 #endif
847 old = env->CP0_EntryHi;
848 env->CP0_EntryHi = val;
849 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
850 uint32_t tcst = env->CP0_TCStatus[env->current_tc] & ~0xff;
851 env->CP0_TCStatus[env->current_tc] = tcst | (val & 0xff);
852 }
853 /* If the ASID changes, flush qemu's TLB. */
854 if ((old & 0xFF) != (val & 0xFF))
855 cpu_mips_tlb_flush(env, 1);
856 }
857
858 void do_mttc0_entryhi(void)
859 {
860 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
861
862 env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (T0 & ~0xff);
863 env->CP0_TCStatus[other_tc] = (env->CP0_TCStatus[other_tc] & ~0xff) | (T0 & 0xff);
864 }
865
866 void do_mtc0_compare (void)
867 {
868 cpu_mips_store_compare(env, T0);
869 }
870
871 void do_mtc0_status (void)
872 {
873 uint32_t val, old;
874 uint32_t mask = env->CP0_Status_rw_bitmask;
875
876 val = T0 & mask;
877 old = env->CP0_Status;
878 env->CP0_Status = (env->CP0_Status & ~mask) | val;
879 compute_hflags(env);
880 if (loglevel & CPU_LOG_EXEC)
881 do_mtc0_status_debug(old, val);
882 cpu_mips_update_irq(env);
883 }
884
885 void do_mttc0_status(void)
886 {
887 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
888 uint32_t tcstatus = env->CP0_TCStatus[other_tc];
889
890 env->CP0_Status = T0 & ~0xf1000018;
891 tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (T0 & (0xf << CP0St_CU0));
892 tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((T0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
893 tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((T0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
894 env->CP0_TCStatus[other_tc] = tcstatus;
895 }
896
897 void do_mtc0_intctl (void)
898 {
899 /* vectored interrupts not implemented, no performance counters. */
900 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (T0 & 0x000002e0);
901 }
902
903 void do_mtc0_srsctl (void)
904 {
905 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
906 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (T0 & mask);
907 }
908
909 void do_mtc0_cause (void)
910 {
911 uint32_t mask = 0x00C00300;
912 uint32_t old = env->CP0_Cause;
913
914 if (env->insn_flags & ISA_MIPS32R2)
915 mask |= 1 << CP0Ca_DC;
916
917 env->CP0_Cause = (env->CP0_Cause & ~mask) | (T0 & mask);
918
919 if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
920 if (env->CP0_Cause & (1 << CP0Ca_DC))
921 cpu_mips_stop_count(env);
922 else
923 cpu_mips_start_count(env);
924 }
925
926 /* Handle the software interrupt as an hardware one, as they
927 are very similar */
928 if (T0 & CP0Ca_IP_mask) {
929 cpu_mips_update_irq(env);
930 }
931 }
932
933 void do_mtc0_ebase (void)
934 {
935 /* vectored interrupts not implemented */
936 /* Multi-CPU not implemented */
937 env->CP0_EBase = 0x80000000 | (T0 & 0x3FFFF000);
938 }
939
940 void do_mtc0_config0 (void)
941 {
942 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (T0 & 0x00000007);
943 }
944
945 void do_mtc0_config2 (void)
946 {
947 /* tertiary/secondary caches not implemented */
948 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
949 }
950
951 void do_mtc0_watchlo (uint32_t sel)
952 {
953 /* Watch exceptions for instructions, data loads, data stores
954 not implemented. */
955 env->CP0_WatchLo[sel] = (T0 & ~0x7);
956 }
957
958 void do_mtc0_watchhi (uint32_t sel)
959 {
960 env->CP0_WatchHi[sel] = (T0 & 0x40FF0FF8);
961 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & T0 & 0x7);
962 }
963
964 void do_mtc0_xcontext (void)
965 {
966 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
967 env->CP0_XContext = (env->CP0_XContext & mask) | (T0 & ~mask);
968 }
969
970 void do_mtc0_framemask (void)
971 {
972 env->CP0_Framemask = T0; /* XXX */
973 }
974
975 void do_mtc0_debug (void)
976 {
977 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (T0 & 0x13300120);
978 if (T0 & (1 << CP0DB_DM))
979 env->hflags |= MIPS_HFLAG_DM;
980 else
981 env->hflags &= ~MIPS_HFLAG_DM;
982 }
983
984 void do_mttc0_debug(void)
985 {
986 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
987
988 /* XXX: Might be wrong, check with EJTAG spec. */
989 env->CP0_Debug_tcstatus[other_tc] = T0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
990 env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
991 (T0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
992 }
993
994 void do_mtc0_performance0 (void)
995 {
996 env->CP0_Performance0 = T0 & 0x000007ff;
997 }
998
999 void do_mtc0_taglo (void)
1000 {
1001 env->CP0_TagLo = T0 & 0xFFFFFCF6;
1002 }
1003
1004 void do_mtc0_datalo (void)
1005 {
1006 env->CP0_DataLo = T0; /* XXX */
1007 }
1008
1009 void do_mtc0_taghi (void)
1010 {
1011 env->CP0_TagHi = T0; /* XXX */
1012 }
1013
1014 void do_mtc0_datahi (void)
1015 {
1016 env->CP0_DataHi = T0; /* XXX */
1017 }
1018
1019 void do_mtc0_status_debug(uint32_t old, uint32_t val)
1020 {
1021 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
1022 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1023 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1024 env->CP0_Cause);
1025 switch (env->hflags & MIPS_HFLAG_KSU) {
1026 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1027 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1028 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1029 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1030 }
1031 }
1032
1033 void do_mtc0_status_irqraise_debug(void)
1034 {
1035 fprintf(logfile, "Raise pending IRQs\n");
1036 }
1037 #endif /* !CONFIG_USER_ONLY */
1038
1039 /* MIPS MT functions */
1040 void do_mftgpr(uint32_t sel)
1041 {
1042 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1043
1044 T0 = env->gpr[other_tc][sel];
1045 }
1046
1047 void do_mftlo(uint32_t sel)
1048 {
1049 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1050
1051 T0 = env->LO[other_tc][sel];
1052 }
1053
1054 void do_mfthi(uint32_t sel)
1055 {
1056 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1057
1058 T0 = env->HI[other_tc][sel];
1059 }
1060
1061 void do_mftacx(uint32_t sel)
1062 {
1063 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1064
1065 T0 = env->ACX[other_tc][sel];
1066 }
1067
1068 void do_mftdsp(void)
1069 {
1070 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1071
1072 T0 = env->DSPControl[other_tc];
1073 }
1074
1075 void do_mttgpr(uint32_t sel)
1076 {
1077 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1078
1079 T0 = env->gpr[other_tc][sel];
1080 }
1081
1082 void do_mttlo(uint32_t sel)
1083 {
1084 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1085
1086 T0 = env->LO[other_tc][sel];
1087 }
1088
1089 void do_mtthi(uint32_t sel)
1090 {
1091 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1092
1093 T0 = env->HI[other_tc][sel];
1094 }
1095
1096 void do_mttacx(uint32_t sel)
1097 {
1098 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1099
1100 T0 = env->ACX[other_tc][sel];
1101 }
1102
1103 void do_mttdsp(void)
1104 {
1105 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1106
1107 T0 = env->DSPControl[other_tc];
1108 }
1109
1110 /* MIPS MT functions */
1111 void do_dmt(void)
1112 {
1113 // TODO
1114 T0 = 0;
1115 // rt = T0
1116 }
1117
1118 void do_emt(void)
1119 {
1120 // TODO
1121 T0 = 0;
1122 // rt = T0
1123 }
1124
1125 void do_dvpe(void)
1126 {
1127 // TODO
1128 T0 = 0;
1129 // rt = T0
1130 }
1131
1132 void do_evpe(void)
1133 {
1134 // TODO
1135 T0 = 0;
1136 // rt = T0
1137 }
1138
1139 void do_fork(void)
1140 {
1141 // T0 = rt, T1 = rs
1142 T0 = 0;
1143 // TODO: store to TC register
1144 }
1145
1146 void do_yield(void)
1147 {
1148 if (T0 < 0) {
1149 /* No scheduling policy implemented. */
1150 if (T0 != -2) {
1151 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1152 env->CP0_TCStatus[env->current_tc] & (1 << CP0TCSt_DT)) {
1153 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1154 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1155 do_raise_exception(EXCP_THREAD);
1156 }
1157 }
1158 } else if (T0 == 0) {
1159 if (0 /* TODO: TC underflow */) {
1160 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1161 do_raise_exception(EXCP_THREAD);
1162 } else {
1163 // TODO: Deallocate TC
1164 }
1165 } else if (T0 > 0) {
1166 /* Yield qualifier inputs not implemented. */
1167 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1168 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1169 do_raise_exception(EXCP_THREAD);
1170 }
1171 T0 = env->CP0_YQMask;
1172 }
1173
1174 /* CP1 functions */
1175 void fpu_handle_exception(void)
1176 {
1177 #ifdef CONFIG_SOFTFLOAT
1178 int flags = get_float_exception_flags(&env->fpu->fp_status);
1179 unsigned int cpuflags = 0, enable, cause = 0;
1180
1181 enable = GET_FP_ENABLE(env->fpu->fcr31);
1182
1183 /* determine current flags */
1184 if (flags & float_flag_invalid) {
1185 cpuflags |= FP_INVALID;
1186 cause |= FP_INVALID & enable;
1187 }
1188 if (flags & float_flag_divbyzero) {
1189 cpuflags |= FP_DIV0;
1190 cause |= FP_DIV0 & enable;
1191 }
1192 if (flags & float_flag_overflow) {
1193 cpuflags |= FP_OVERFLOW;
1194 cause |= FP_OVERFLOW & enable;
1195 }
1196 if (flags & float_flag_underflow) {
1197 cpuflags |= FP_UNDERFLOW;
1198 cause |= FP_UNDERFLOW & enable;
1199 }
1200 if (flags & float_flag_inexact) {
1201 cpuflags |= FP_INEXACT;
1202 cause |= FP_INEXACT & enable;
1203 }
1204 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
1205 SET_FP_CAUSE(env->fpu->fcr31, cause);
1206 #else
1207 SET_FP_FLAGS(env->fpu->fcr31, 0);
1208 SET_FP_CAUSE(env->fpu->fcr31, 0);
1209 #endif
1210 }
1211
1212 #ifndef CONFIG_USER_ONLY
1213 /* TLB management */
1214 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1215 {
1216 /* Flush qemu's TLB and discard all shadowed entries. */
1217 tlb_flush (env, flush_global);
1218 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1219 }
1220
1221 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1222 {
1223 /* Discard entries from env->tlb[first] onwards. */
1224 while (env->tlb->tlb_in_use > first) {
1225 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1226 }
1227 }
1228
1229 static void r4k_fill_tlb (int idx)
1230 {
1231 r4k_tlb_t *tlb;
1232
1233 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1234 tlb = &env->tlb->mmu.r4k.tlb[idx];
1235 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1236 #if defined(TARGET_MIPS64)
1237 tlb->VPN &= env->SEGMask;
1238 #endif
1239 tlb->ASID = env->CP0_EntryHi & 0xFF;
1240 tlb->PageMask = env->CP0_PageMask;
1241 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1242 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1243 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1244 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1245 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1246 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1247 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1248 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1249 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1250 }
1251
1252 void r4k_do_tlbwi (void)
1253 {
1254 /* Discard cached TLB entries. We could avoid doing this if the
1255 tlbwi is just upgrading access permissions on the current entry;
1256 that might be a further win. */
1257 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1258
1259 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
1260 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
1261 }
1262
1263 void r4k_do_tlbwr (void)
1264 {
1265 int r = cpu_mips_get_random(env);
1266
1267 r4k_invalidate_tlb(env, r, 1);
1268 r4k_fill_tlb(r);
1269 }
1270
1271 void r4k_do_tlbp (void)
1272 {
1273 r4k_tlb_t *tlb;
1274 target_ulong mask;
1275 target_ulong tag;
1276 target_ulong VPN;
1277 uint8_t ASID;
1278 int i;
1279
1280 ASID = env->CP0_EntryHi & 0xFF;
1281 for (i = 0; i < env->tlb->nb_tlb; i++) {
1282 tlb = &env->tlb->mmu.r4k.tlb[i];
1283 /* 1k pages are not supported. */
1284 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1285 tag = env->CP0_EntryHi & ~mask;
1286 VPN = tlb->VPN & ~mask;
1287 /* Check ASID, virtual page number & size */
1288 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1289 /* TLB match */
1290 env->CP0_Index = i;
1291 break;
1292 }
1293 }
1294 if (i == env->tlb->nb_tlb) {
1295 /* No match. Discard any shadow entries, if any of them match. */
1296 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1297 tlb = &env->tlb->mmu.r4k.tlb[i];
1298 /* 1k pages are not supported. */
1299 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1300 tag = env->CP0_EntryHi & ~mask;
1301 VPN = tlb->VPN & ~mask;
1302 /* Check ASID, virtual page number & size */
1303 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1304 r4k_mips_tlb_flush_extra (env, i);
1305 break;
1306 }
1307 }
1308
1309 env->CP0_Index |= 0x80000000;
1310 }
1311 }
1312
1313 void r4k_do_tlbr (void)
1314 {
1315 r4k_tlb_t *tlb;
1316 uint8_t ASID;
1317
1318 ASID = env->CP0_EntryHi & 0xFF;
1319 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1320
1321 /* If this will change the current ASID, flush qemu's TLB. */
1322 if (ASID != tlb->ASID)
1323 cpu_mips_tlb_flush (env, 1);
1324
1325 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1326
1327 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1328 env->CP0_PageMask = tlb->PageMask;
1329 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1330 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1331 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1332 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1333 }
1334
1335 #endif /* !CONFIG_USER_ONLY */
1336
1337 void dump_ldst (const unsigned char *func)
1338 {
1339 if (loglevel)
1340 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
1341 }
1342
1343 void dump_sc (void)
1344 {
1345 if (loglevel) {
1346 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
1347 T1, T0, env->CP0_LLAddr);
1348 }
1349 }
1350
1351 /* Specials */
1352 void do_di (void)
1353 {
1354 T0 = env->CP0_Status;
1355 env->CP0_Status = T0 & ~(1 << CP0St_IE);
1356 cpu_mips_update_irq(env);
1357 }
1358
1359 void do_ei (void)
1360 {
1361 T0 = env->CP0_Status;
1362 env->CP0_Status = T0 | (1 << CP0St_IE);
1363 cpu_mips_update_irq(env);
1364 }
1365
1366 void debug_pre_eret (void)
1367 {
1368 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1369 env->PC[env->current_tc], env->CP0_EPC);
1370 if (env->CP0_Status & (1 << CP0St_ERL))
1371 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1372 if (env->hflags & MIPS_HFLAG_DM)
1373 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1374 fputs("\n", logfile);
1375 }
1376
1377 void debug_post_eret (void)
1378 {
1379 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1380 env->PC[env->current_tc], env->CP0_EPC);
1381 if (env->CP0_Status & (1 << CP0St_ERL))
1382 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1383 if (env->hflags & MIPS_HFLAG_DM)
1384 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1385 switch (env->hflags & MIPS_HFLAG_KSU) {
1386 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1387 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1388 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1389 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1390 }
1391 }
1392
1393 void do_eret (void)
1394 {
1395 if (loglevel & CPU_LOG_EXEC)
1396 debug_pre_eret();
1397 if (env->CP0_Status & (1 << CP0St_ERL)) {
1398 env->PC[env->current_tc] = env->CP0_ErrorEPC;
1399 env->CP0_Status &= ~(1 << CP0St_ERL);
1400 } else {
1401 env->PC[env->current_tc] = env->CP0_EPC;
1402 env->CP0_Status &= ~(1 << CP0St_EXL);
1403 }
1404 compute_hflags(env);
1405 if (loglevel & CPU_LOG_EXEC)
1406 debug_post_eret();
1407 env->CP0_LLAddr = 1;
1408 }
1409
1410 void do_deret (void)
1411 {
1412 if (loglevel & CPU_LOG_EXEC)
1413 debug_pre_eret();
1414 env->PC[env->current_tc] = env->CP0_DEPC;
1415 env->hflags &= MIPS_HFLAG_DM;
1416 compute_hflags(env);
1417 if (loglevel & CPU_LOG_EXEC)
1418 debug_post_eret();
1419 env->CP0_LLAddr = 1;
1420 }
1421
1422 void do_rdhwr_cpunum(void)
1423 {
1424 if ((env->hflags & MIPS_HFLAG_CP0) ||
1425 (env->CP0_HWREna & (1 << 0)))
1426 T0 = env->CP0_EBase & 0x3ff;
1427 else
1428 do_raise_exception(EXCP_RI);
1429 }
1430
1431 void do_rdhwr_synci_step(void)
1432 {
1433 if ((env->hflags & MIPS_HFLAG_CP0) ||
1434 (env->CP0_HWREna & (1 << 1)))
1435 T0 = env->SYNCI_Step;
1436 else
1437 do_raise_exception(EXCP_RI);
1438 }
1439
1440 void do_rdhwr_cc(void)
1441 {
1442 if ((env->hflags & MIPS_HFLAG_CP0) ||
1443 (env->CP0_HWREna & (1 << 2)))
1444 T0 = env->CP0_Count;
1445 else
1446 do_raise_exception(EXCP_RI);
1447 }
1448
1449 void do_rdhwr_ccres(void)
1450 {
1451 if ((env->hflags & MIPS_HFLAG_CP0) ||
1452 (env->CP0_HWREna & (1 << 3)))
1453 T0 = env->CCRes;
1454 else
1455 do_raise_exception(EXCP_RI);
1456 }
1457
1458 /* Bitfield operations. */
1459 void do_ext(uint32_t pos, uint32_t size)
1460 {
1461 T0 = (int32_t)((T1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
1462 }
1463
1464 void do_ins(uint32_t pos, uint32_t size)
1465 {
1466 target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
1467
1468 T0 = (int32_t)((T0 & ~mask) | ((T1 << pos) & mask));
1469 }
1470
1471 void do_wsbh(void)
1472 {
1473 T0 = (int32_t)(((T1 << 8) & ~0x00FF00FF) | ((T1 >> 8) & 0x00FF00FF));
1474 }
1475
1476 #if defined(TARGET_MIPS64)
1477 void do_dext(uint32_t pos, uint32_t size)
1478 {
1479 T0 = (T1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
1480 }
1481
1482 void do_dins(uint32_t pos, uint32_t size)
1483 {
1484 target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
1485
1486 T0 = (T0 & ~mask) | ((T1 << pos) & mask);
1487 }
1488
1489 void do_dsbh(void)
1490 {
1491 T0 = ((T1 << 8) & ~0x00FF00FF00FF00FFULL) | ((T1 >> 8) & 0x00FF00FF00FF00FFULL);
1492 }
1493
1494 void do_dshd(void)
1495 {
1496 T1 = ((T1 << 16) & ~0x0000FFFF0000FFFFULL) | ((T1 >> 16) & 0x0000FFFF0000FFFFULL);
1497 T0 = (T1 << 32) | (T1 >> 32);
1498 }
1499 #endif
1500
1501 void do_pmon (int function)
1502 {
1503 function /= 2;
1504 switch (function) {
1505 case 2: /* TODO: char inbyte(int waitflag); */
1506 if (env->gpr[env->current_tc][4] == 0)
1507 env->gpr[env->current_tc][2] = -1;
1508 /* Fall through */
1509 case 11: /* TODO: char inbyte (void); */
1510 env->gpr[env->current_tc][2] = -1;
1511 break;
1512 case 3:
1513 case 12:
1514 printf("%c", (char)(env->gpr[env->current_tc][4] & 0xFF));
1515 break;
1516 case 17:
1517 break;
1518 case 158:
1519 {
1520 unsigned char *fmt = (void *)(unsigned long)env->gpr[env->current_tc][4];
1521 printf("%s", fmt);
1522 }
1523 break;
1524 }
1525 }
1526
1527 void do_wait (void)
1528 {
1529 env->halted = 1;
1530 do_raise_exception(EXCP_HLT);
1531 }
1532
1533 #if !defined(CONFIG_USER_ONLY)
1534
1535 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1536
1537 #define MMUSUFFIX _mmu
1538 #define ALIGNED_ONLY
1539
1540 #define SHIFT 0
1541 #include "softmmu_template.h"
1542
1543 #define SHIFT 1
1544 #include "softmmu_template.h"
1545
1546 #define SHIFT 2
1547 #include "softmmu_template.h"
1548
1549 #define SHIFT 3
1550 #include "softmmu_template.h"
1551
1552 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1553 {
1554 env->CP0_BadVAddr = addr;
1555 do_restore_state (retaddr);
1556 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1557 }
1558
1559 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1560 {
1561 TranslationBlock *tb;
1562 CPUState *saved_env;
1563 unsigned long pc;
1564 int ret;
1565
1566 /* XXX: hack to restore env in all cases, even if not called from
1567 generated code */
1568 saved_env = env;
1569 env = cpu_single_env;
1570 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1571 if (ret) {
1572 if (retaddr) {
1573 /* now we have a real cpu fault */
1574 pc = (unsigned long)retaddr;
1575 tb = tb_find_pc(pc);
1576 if (tb) {
1577 /* the PC is inside the translated code. It means that we have
1578 a virtual CPU fault */
1579 cpu_restore_state(tb, env, pc, NULL);
1580 }
1581 }
1582 do_raise_exception_err(env->exception_index, env->error_code);
1583 }
1584 env = saved_env;
1585 }
1586
1587 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1588 int unused)
1589 {
1590 if (is_exec)
1591 do_raise_exception(EXCP_IBE);
1592 else
1593 do_raise_exception(EXCP_DBE);
1594 }
1595 #endif /* !CONFIG_USER_ONLY */
1596
1597 /* Complex FPU operations which may need stack space. */
1598
1599 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1600 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1601 #define FLOAT_TWO32 make_float32(1 << 30)
1602 #define FLOAT_TWO64 make_float64(1ULL << 62)
1603 #define FLOAT_QNAN32 0x7fbfffff
1604 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1605 #define FLOAT_SNAN32 0x7fffffff
1606 #define FLOAT_SNAN64 0x7fffffffffffffffULL
1607
1608 /* convert MIPS rounding mode in FCR31 to IEEE library */
1609 unsigned int ieee_rm[] = {
1610 float_round_nearest_even,
1611 float_round_to_zero,
1612 float_round_up,
1613 float_round_down
1614 };
1615
1616 #define RESTORE_ROUNDING_MODE \
1617 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
1618
1619 void do_cfc1 (uint32_t reg)
1620 {
1621 switch (reg) {
1622 case 0:
1623 T0 = (int32_t)env->fpu->fcr0;
1624 break;
1625 case 25:
1626 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
1627 break;
1628 case 26:
1629 T0 = env->fpu->fcr31 & 0x0003f07c;
1630 break;
1631 case 28:
1632 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
1633 break;
1634 default:
1635 T0 = (int32_t)env->fpu->fcr31;
1636 break;
1637 }
1638 }
1639
1640 void do_ctc1 (uint32_t reg)
1641 {
1642 switch(reg) {
1643 case 25:
1644 if (T0 & 0xffffff00)
1645 return;
1646 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
1647 ((T0 & 0x1) << 23);
1648 break;
1649 case 26:
1650 if (T0 & 0x007c0000)
1651 return;
1652 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
1653 break;
1654 case 28:
1655 if (T0 & 0x007c0000)
1656 return;
1657 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
1658 ((T0 & 0x4) << 22);
1659 break;
1660 case 31:
1661 if (T0 & 0x007c0000)
1662 return;
1663 env->fpu->fcr31 = T0;
1664 break;
1665 default:
1666 return;
1667 }
1668 /* set rounding mode */
1669 RESTORE_ROUNDING_MODE;
1670 set_float_exception_flags(0, &env->fpu->fp_status);
1671 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
1672 do_raise_exception(EXCP_FPE);
1673 }
1674
1675 static always_inline char ieee_ex_to_mips(char xcpt)
1676 {
1677 return (xcpt & float_flag_inexact) >> 5 |
1678 (xcpt & float_flag_underflow) >> 3 |
1679 (xcpt & float_flag_overflow) >> 1 |
1680 (xcpt & float_flag_divbyzero) << 1 |
1681 (xcpt & float_flag_invalid) << 4;
1682 }
1683
1684 static always_inline char mips_ex_to_ieee(char xcpt)
1685 {
1686 return (xcpt & FP_INEXACT) << 5 |
1687 (xcpt & FP_UNDERFLOW) << 3 |
1688 (xcpt & FP_OVERFLOW) << 1 |
1689 (xcpt & FP_DIV0) >> 1 |
1690 (xcpt & FP_INVALID) >> 4;
1691 }
1692
1693 static always_inline void update_fcr31(void)
1694 {
1695 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
1696
1697 SET_FP_CAUSE(env->fpu->fcr31, tmp);
1698 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
1699 do_raise_exception(EXCP_FPE);
1700 else
1701 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
1702 }
1703
1704 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
1705
1706 FLOAT_OP(cvtd, s)
1707 {
1708 set_float_exception_flags(0, &env->fpu->fp_status);
1709 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
1710 update_fcr31();
1711 }
1712 FLOAT_OP(cvtd, w)
1713 {
1714 set_float_exception_flags(0, &env->fpu->fp_status);
1715 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
1716 update_fcr31();
1717 }
1718 FLOAT_OP(cvtd, l)
1719 {
1720 set_float_exception_flags(0, &env->fpu->fp_status);
1721 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
1722 update_fcr31();
1723 }
1724 FLOAT_OP(cvtl, d)
1725 {
1726 set_float_exception_flags(0, &env->fpu->fp_status);
1727 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1728 update_fcr31();
1729 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1730 DT2 = FLOAT_SNAN64;
1731 }
1732 FLOAT_OP(cvtl, s)
1733 {
1734 set_float_exception_flags(0, &env->fpu->fp_status);
1735 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1736 update_fcr31();
1737 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1738 DT2 = FLOAT_SNAN64;
1739 }
1740
1741 FLOAT_OP(cvtps, pw)
1742 {
1743 set_float_exception_flags(0, &env->fpu->fp_status);
1744 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
1745 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
1746 update_fcr31();
1747 }
1748 FLOAT_OP(cvtpw, ps)
1749 {
1750 set_float_exception_flags(0, &env->fpu->fp_status);
1751 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1752 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
1753 update_fcr31();
1754 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1755 WT2 = FLOAT_SNAN32;
1756 }
1757 FLOAT_OP(cvts, d)
1758 {
1759 set_float_exception_flags(0, &env->fpu->fp_status);
1760 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
1761 update_fcr31();
1762 }
1763 FLOAT_OP(cvts, w)
1764 {
1765 set_float_exception_flags(0, &env->fpu->fp_status);
1766 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
1767 update_fcr31();
1768 }
1769 FLOAT_OP(cvts, l)
1770 {
1771 set_float_exception_flags(0, &env->fpu->fp_status);
1772 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
1773 update_fcr31();
1774 }
1775 FLOAT_OP(cvts, pl)
1776 {
1777 set_float_exception_flags(0, &env->fpu->fp_status);
1778 WT2 = WT0;
1779 update_fcr31();
1780 }
1781 FLOAT_OP(cvts, pu)
1782 {
1783 set_float_exception_flags(0, &env->fpu->fp_status);
1784 WT2 = WTH0;
1785 update_fcr31();
1786 }
1787 FLOAT_OP(cvtw, s)
1788 {
1789 set_float_exception_flags(0, &env->fpu->fp_status);
1790 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1791 update_fcr31();
1792 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1793 WT2 = FLOAT_SNAN32;
1794 }
1795 FLOAT_OP(cvtw, d)
1796 {
1797 set_float_exception_flags(0, &env->fpu->fp_status);
1798 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1799 update_fcr31();
1800 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1801 WT2 = FLOAT_SNAN32;
1802 }
1803
1804 FLOAT_OP(roundl, d)
1805 {
1806 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1807 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1808 RESTORE_ROUNDING_MODE;
1809 update_fcr31();
1810 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1811 DT2 = FLOAT_SNAN64;
1812 }
1813 FLOAT_OP(roundl, s)
1814 {
1815 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1816 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1817 RESTORE_ROUNDING_MODE;
1818 update_fcr31();
1819 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1820 DT2 = FLOAT_SNAN64;
1821 }
1822 FLOAT_OP(roundw, d)
1823 {
1824 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1825 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1826 RESTORE_ROUNDING_MODE;
1827 update_fcr31();
1828 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1829 WT2 = FLOAT_SNAN32;
1830 }
1831 FLOAT_OP(roundw, s)
1832 {
1833 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1834 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1835 RESTORE_ROUNDING_MODE;
1836 update_fcr31();
1837 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1838 WT2 = FLOAT_SNAN32;
1839 }
1840
1841 FLOAT_OP(truncl, d)
1842 {
1843 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
1844 update_fcr31();
1845 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1846 DT2 = FLOAT_SNAN64;
1847 }
1848 FLOAT_OP(truncl, s)
1849 {
1850 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
1851 update_fcr31();
1852 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1853 DT2 = FLOAT_SNAN64;
1854 }
1855 FLOAT_OP(truncw, d)
1856 {
1857 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
1858 update_fcr31();
1859 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1860 WT2 = FLOAT_SNAN32;
1861 }
1862 FLOAT_OP(truncw, s)
1863 {
1864 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
1865 update_fcr31();
1866 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1867 WT2 = FLOAT_SNAN32;
1868 }
1869
1870 FLOAT_OP(ceill, d)
1871 {
1872 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1873 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1874 RESTORE_ROUNDING_MODE;
1875 update_fcr31();
1876 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1877 DT2 = FLOAT_SNAN64;
1878 }
1879 FLOAT_OP(ceill, s)
1880 {
1881 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1882 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1883 RESTORE_ROUNDING_MODE;
1884 update_fcr31();
1885 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1886 DT2 = FLOAT_SNAN64;
1887 }
1888 FLOAT_OP(ceilw, d)
1889 {
1890 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1891 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1892 RESTORE_ROUNDING_MODE;
1893 update_fcr31();
1894 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1895 WT2 = FLOAT_SNAN32;
1896 }
1897 FLOAT_OP(ceilw, s)
1898 {
1899 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1900 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1901 RESTORE_ROUNDING_MODE;
1902 update_fcr31();
1903 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1904 WT2 = FLOAT_SNAN32;
1905 }
1906
1907 FLOAT_OP(floorl, d)
1908 {
1909 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1910 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1911 RESTORE_ROUNDING_MODE;
1912 update_fcr31();
1913 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1914 DT2 = FLOAT_SNAN64;
1915 }
1916 FLOAT_OP(floorl, s)
1917 {
1918 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1919 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1920 RESTORE_ROUNDING_MODE;
1921 update_fcr31();
1922 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1923 DT2 = FLOAT_SNAN64;
1924 }
1925 FLOAT_OP(floorw, d)
1926 {
1927 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1928 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1929 RESTORE_ROUNDING_MODE;
1930 update_fcr31();
1931 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1932 WT2 = FLOAT_SNAN32;
1933 }
1934 FLOAT_OP(floorw, s)
1935 {
1936 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1937 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1938 RESTORE_ROUNDING_MODE;
1939 update_fcr31();
1940 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1941 WT2 = FLOAT_SNAN32;
1942 }
1943
1944 /* MIPS specific unary operations */
1945 FLOAT_OP(recip, d)
1946 {
1947 set_float_exception_flags(0, &env->fpu->fp_status);
1948 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1949 update_fcr31();
1950 }
1951 FLOAT_OP(recip, s)
1952 {
1953 set_float_exception_flags(0, &env->fpu->fp_status);
1954 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1955 update_fcr31();
1956 }
1957
1958 FLOAT_OP(rsqrt, d)
1959 {
1960 set_float_exception_flags(0, &env->fpu->fp_status);
1961 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1962 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1963 update_fcr31();
1964 }
1965 FLOAT_OP(rsqrt, s)
1966 {
1967 set_float_exception_flags(0, &env->fpu->fp_status);
1968 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1969 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1970 update_fcr31();
1971 }
1972
1973 FLOAT_OP(recip1, d)
1974 {
1975 set_float_exception_flags(0, &env->fpu->fp_status);
1976 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1977 update_fcr31();
1978 }
1979 FLOAT_OP(recip1, s)
1980 {
1981 set_float_exception_flags(0, &env->fpu->fp_status);
1982 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1983 update_fcr31();
1984 }
1985 FLOAT_OP(recip1, ps)
1986 {
1987 set_float_exception_flags(0, &env->fpu->fp_status);
1988 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1989 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
1990 update_fcr31();
1991 }
1992
1993 FLOAT_OP(rsqrt1, d)
1994 {
1995 set_float_exception_flags(0, &env->fpu->fp_status);
1996 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1997 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1998 update_fcr31();
1999 }
2000 FLOAT_OP(rsqrt1, s)
2001 {
2002 set_float_exception_flags(0, &env->fpu->fp_status);
2003 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2004 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2005 update_fcr31();
2006 }
2007 FLOAT_OP(rsqrt1, ps)
2008 {
2009 set_float_exception_flags(0, &env->fpu->fp_status);
2010 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2011 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
2012 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2013 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
2014 update_fcr31();
2015 }
2016
2017 /* binary operations */
2018 #define FLOAT_BINOP(name) \
2019 FLOAT_OP(name, d) \
2020 { \
2021 set_float_exception_flags(0, &env->fpu->fp_status); \
2022 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
2023 update_fcr31(); \
2024 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
2025 DT2 = FLOAT_QNAN64; \
2026 } \
2027 FLOAT_OP(name, s) \
2028 { \
2029 set_float_exception_flags(0, &env->fpu->fp_status); \
2030 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
2031 update_fcr31(); \
2032 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
2033 WT2 = FLOAT_QNAN32; \
2034 } \
2035 FLOAT_OP(name, ps) \
2036 { \
2037 set_float_exception_flags(0, &env->fpu->fp_status); \
2038 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
2039 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
2040 update_fcr31(); \
2041 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
2042 WT2 = FLOAT_QNAN32; \
2043 WTH2 = FLOAT_QNAN32; \
2044 } \
2045 }
2046 FLOAT_BINOP(add)
2047 FLOAT_BINOP(sub)
2048 FLOAT_BINOP(mul)
2049 FLOAT_BINOP(div)
2050 #undef FLOAT_BINOP
2051
2052 /* MIPS specific binary operations */
2053 FLOAT_OP(recip2, d)
2054 {
2055 set_float_exception_flags(0, &env->fpu->fp_status);
2056 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
2057 FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
2058 update_fcr31();
2059 }
2060 FLOAT_OP(recip2, s)
2061 {
2062 set_float_exception_flags(0, &env->fpu->fp_status);
2063 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2064 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
2065 update_fcr31();
2066 }
2067 FLOAT_OP(recip2, ps)
2068 {
2069 set_float_exception_flags(0, &env->fpu->fp_status);
2070 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2071 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
2072 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
2073 FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
2074 update_fcr31();
2075 }
2076
2077 FLOAT_OP(rsqrt2, d)
2078 {
2079 set_float_exception_flags(0, &env->fpu->fp_status);
2080 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
2081 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
2082 FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
2083 update_fcr31();
2084 }
2085 FLOAT_OP(rsqrt2, s)
2086 {
2087 set_float_exception_flags(0, &env->fpu->fp_status);
2088 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2089 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
2090 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
2091 update_fcr31();
2092 }
2093 FLOAT_OP(rsqrt2, ps)
2094 {
2095 set_float_exception_flags(0, &env->fpu->fp_status);
2096 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2097 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
2098 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
2099 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
2100 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
2101 FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
2102 update_fcr31();
2103 }
2104
2105 FLOAT_OP(addr, ps)
2106 {
2107 set_float_exception_flags(0, &env->fpu->fp_status);
2108 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
2109 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
2110 update_fcr31();
2111 }
2112
2113 FLOAT_OP(mulr, ps)
2114 {
2115 set_float_exception_flags(0, &env->fpu->fp_status);
2116 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
2117 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
2118 update_fcr31();
2119 }
2120
2121 /* compare operations */
2122 #define FOP_COND_D(op, cond) \
2123 void do_cmp_d_ ## op (long cc) \
2124 { \
2125 int c = cond; \
2126 update_fcr31(); \
2127 if (c) \
2128 SET_FP_COND(cc, env->fpu); \
2129 else \
2130 CLEAR_FP_COND(cc, env->fpu); \
2131 } \
2132 void do_cmpabs_d_ ## op (long cc) \
2133 { \
2134 int c; \
2135 FDT0 = float64_abs(FDT0); \
2136 FDT1 = float64_abs(FDT1); \
2137 c = cond; \
2138 update_fcr31(); \
2139 if (c) \
2140 SET_FP_COND(cc, env->fpu); \
2141 else \
2142 CLEAR_FP_COND(cc, env->fpu); \
2143 }
2144
2145 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2146 {
2147 if (float64_is_signaling_nan(a) ||
2148 float64_is_signaling_nan(b) ||
2149 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2150 float_raise(float_flag_invalid, status);
2151 return 1;
2152 } else if (float64_is_nan(a) || float64_is_nan(b)) {
2153 return 1;
2154 } else {
2155 return 0;
2156 }
2157 }
2158
2159 /* NOTE: the comma operator will make "cond" to eval to false,
2160 * but float*_is_unordered() is still called. */
2161 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
2162 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
2163 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2164 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2165 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2166 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2167 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
2168 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
2169 /* NOTE: the comma operator will make "cond" to eval to false,
2170 * but float*_is_unordered() is still called. */
2171 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
2172 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
2173 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2174 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2175 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2176 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2177 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
2178 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
2179
2180 #define FOP_COND_S(op, cond) \
2181 void do_cmp_s_ ## op (long cc) \
2182 { \
2183 int c = cond; \
2184 update_fcr31(); \
2185 if (c) \
2186 SET_FP_COND(cc, env->fpu); \
2187 else \
2188 CLEAR_FP_COND(cc, env->fpu); \
2189 } \
2190 void do_cmpabs_s_ ## op (long cc) \
2191 { \
2192 int c; \
2193 FST0 = float32_abs(FST0); \
2194 FST1 = float32_abs(FST1); \
2195 c = cond; \
2196 update_fcr31(); \
2197 if (c) \
2198 SET_FP_COND(cc, env->fpu); \
2199 else \
2200 CLEAR_FP_COND(cc, env->fpu); \
2201 }
2202
2203 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2204 {
2205 if (float32_is_signaling_nan(a) ||
2206 float32_is_signaling_nan(b) ||
2207 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2208 float_raise(float_flag_invalid, status);
2209 return 1;
2210 } else if (float32_is_nan(a) || float32_is_nan(b)) {
2211 return 1;
2212 } else {
2213 return 0;
2214 }
2215 }
2216
2217 /* NOTE: the comma operator will make "cond" to eval to false,
2218 * but float*_is_unordered() is still called. */
2219 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
2220 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
2221 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
2222 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
2223 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
2224 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
2225 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
2226 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
2227 /* NOTE: the comma operator will make "cond" to eval to false,
2228 * but float*_is_unordered() is still called. */
2229 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
2230 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
2231 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
2232 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
2233 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
2234 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
2235 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
2236 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
2237
2238 #define FOP_COND_PS(op, condl, condh) \
2239 void do_cmp_ps_ ## op (long cc) \
2240 { \
2241 int cl = condl; \
2242 int ch = condh; \
2243 update_fcr31(); \
2244 if (cl) \
2245 SET_FP_COND(cc, env->fpu); \
2246 else \
2247 CLEAR_FP_COND(cc, env->fpu); \
2248 if (ch) \
2249 SET_FP_COND(cc + 1, env->fpu); \
2250 else \
2251 CLEAR_FP_COND(cc + 1, env->fpu); \
2252 } \
2253 void do_cmpabs_ps_ ## op (long cc) \
2254 { \
2255 int cl, ch; \
2256 FST0 = float32_abs(FST0); \
2257 FSTH0 = float32_abs(FSTH0); \
2258 FST1 = float32_abs(FST1); \
2259 FSTH1 = float32_abs(FSTH1); \
2260 cl = condl; \
2261 ch = condh; \
2262 update_fcr31(); \
2263 if (cl) \
2264 SET_FP_COND(cc, env->fpu); \
2265 else \
2266 CLEAR_FP_COND(cc, env->fpu); \
2267 if (ch) \
2268 SET_FP_COND(cc + 1, env->fpu); \
2269 else \
2270 CLEAR_FP_COND(cc + 1, env->fpu); \
2271 }
2272
2273 /* NOTE: the comma operator will make "cond" to eval to false,
2274 * but float*_is_unordered() is still called. */
2275 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
2276 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
2277 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
2278 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
2279 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
2280 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2281 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
2282 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2283 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
2284 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2285 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
2286 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2287 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
2288 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2289 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
2290 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2291 /* NOTE: the comma operator will make "cond" to eval to false,
2292 * but float*_is_unordered() is still called. */
2293 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
2294 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
2295 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
2296 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
2297 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
2298 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2299 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
2300 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2301 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
2302 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2303 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
2304 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2305 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
2306 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2307 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
2308 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))