]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/op_helper.c
b8830487efe65d4e156287cfb7eca47f0cc90044
[mirror_qemu.git] / target-alpha / op_helper.c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
24
25 #include "op_helper.h"
26
27 #define MEMSUFFIX _raw
28 #include "op_helper_mem.h"
29
30 #if !defined(CONFIG_USER_ONLY)
31 #define MEMSUFFIX _kernel
32 #include "op_helper_mem.h"
33
34 #define MEMSUFFIX _executive
35 #include "op_helper_mem.h"
36
37 #define MEMSUFFIX _supervisor
38 #include "op_helper_mem.h"
39
40 #define MEMSUFFIX _user
41 #include "op_helper_mem.h"
42
43 /* This is used for pal modes */
44 #define MEMSUFFIX _data
45 #include "op_helper_mem.h"
46 #endif
47
48 void helper_tb_flush (void)
49 {
50 tlb_flush(env, 1);
51 }
52
53 void cpu_dump_EA (target_ulong EA);
54 void helper_print_mem_EA (target_ulong EA)
55 {
56 cpu_dump_EA(EA);
57 }
58
59 /*****************************************************************************/
60 /* Exceptions processing helpers */
61 void helper_excp (int excp, int error)
62 {
63 env->exception_index = excp;
64 env->error_code = error;
65 cpu_loop_exit();
66 }
67
68 uint64_t helper_amask (uint64_t arg)
69 {
70 switch (env->implver) {
71 case IMPLVER_2106x:
72 /* EV4, EV45, LCA, LCA45 & EV5 */
73 break;
74 case IMPLVER_21164:
75 case IMPLVER_21264:
76 case IMPLVER_21364:
77 arg &= ~env->amask;
78 break;
79 }
80 return arg;
81 }
82
83 uint64_t helper_load_pcc (void)
84 {
85 /* XXX: TODO */
86 return 0;
87 }
88
89 uint64_t helper_load_implver (void)
90 {
91 return env->implver;
92 }
93
94 void helper_load_fpcr (void)
95 {
96 T0 = 0;
97 #ifdef CONFIG_SOFTFLOAT
98 T0 |= env->fp_status.float_exception_flags << 52;
99 if (env->fp_status.float_exception_flags)
100 T0 |= 1ULL << 63;
101 env->ipr[IPR_EXC_SUM] &= ~0x3E:
102 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
103 #endif
104 switch (env->fp_status.float_rounding_mode) {
105 case float_round_nearest_even:
106 T0 |= 2ULL << 58;
107 break;
108 case float_round_down:
109 T0 |= 1ULL << 58;
110 break;
111 case float_round_up:
112 T0 |= 3ULL << 58;
113 break;
114 case float_round_to_zero:
115 break;
116 }
117 }
118
119 void helper_store_fpcr (void)
120 {
121 #ifdef CONFIG_SOFTFLOAT
122 set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
123 #endif
124 switch ((T0 >> 58) & 3) {
125 case 0:
126 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
127 break;
128 case 1:
129 set_float_rounding_mode(float_round_down, &FP_STATUS);
130 break;
131 case 2:
132 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
133 break;
134 case 3:
135 set_float_rounding_mode(float_round_up, &FP_STATUS);
136 break;
137 }
138 }
139
140 spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
141
142 uint64_t helper_rs(void)
143 {
144 uint64_t tmp;
145
146 spin_lock(&intr_cpu_lock);
147 tmp = env->intr_flag;
148 env->intr_flag = 1;
149 spin_unlock(&intr_cpu_lock);
150
151 return tmp;
152 }
153
154 uint64_t helper_rc(void)
155 {
156 uint64_t tmp;
157
158 spin_lock(&intr_cpu_lock);
159 tmp = env->intr_flag;
160 env->intr_flag = 0;
161 spin_unlock(&intr_cpu_lock);
162
163 return tmp;
164 }
165
166 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
167 {
168 uint64_t tmp = op1;
169 op1 += op2;
170 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
171 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
172 }
173 return op1;
174 }
175
176 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
177 {
178 uint64_t tmp = op1;
179 op1 = (uint32_t)(op1 + op2);
180 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
181 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
182 }
183 return op1;
184 }
185
186 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
187 {
188 uint64_t tmp = op1;
189 op1 -= op2;
190 if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) {
191 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
192 }
193 return op1;
194 }
195
196 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
197 {
198 uint64_t tmp = op1;
199 op1 = (uint32_t)(op1 - op2);
200 if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) {
201 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
202 }
203 return op1;
204 }
205
206 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
207 {
208 int64_t res = (int64_t)op1 * (int64_t)op2;
209
210 if (unlikely((int32_t)res != res)) {
211 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
212 }
213 return (int64_t)((int32_t)res);
214 }
215
216 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
217 {
218 uint64_t tl, th;
219
220 muls64(&tl, &th, op1, op2);
221 /* If th != 0 && th != -1, then we had an overflow */
222 if (unlikely((th + 1) > 1)) {
223 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
224 }
225 return tl;
226 }
227
228 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
229 {
230 uint64_t tl, th;
231
232 mulu64(&tl, &th, op1, op2);
233 return th;
234 }
235
236 uint64_t helper_ctpop (uint64_t arg)
237 {
238 return ctpop64(arg);
239 }
240
241 uint64_t helper_ctlz (uint64_t arg)
242 {
243 return clz64(arg);
244 }
245
246 uint64_t helper_cttz (uint64_t arg)
247 {
248 return ctz64(arg);
249 }
250
251 static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
252 {
253 uint64_t mask;
254
255 mask = 0;
256 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
257 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
258 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
259 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
260 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
261 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
262 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
263 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
264
265 return op & ~mask;
266 }
267
268 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
269 {
270 return byte_zap(val, 0x01 << (mask & 7));
271 }
272
273 uint64_t helper_insbl(uint64_t val, uint64_t mask)
274 {
275 val <<= (mask & 7) * 8;
276 return byte_zap(val, ~(0x01 << (mask & 7)));
277 }
278
279 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
280 {
281 return byte_zap(val, 0x03 << (mask & 7));
282 }
283
284 uint64_t helper_inswl(uint64_t val, uint64_t mask)
285 {
286 val <<= (mask & 7) * 8;
287 return byte_zap(val, ~(0x03 << (mask & 7)));
288 }
289
290 uint64_t helper_mskll(uint64_t val, uint64_t mask)
291 {
292 return byte_zap(val, 0x0F << (mask & 7));
293 }
294
295 uint64_t helper_insll(uint64_t val, uint64_t mask)
296 {
297 val <<= (mask & 7) * 8;
298 return byte_zap(val, ~(0x0F << (mask & 7)));
299 }
300
301 uint64_t helper_zap(uint64_t val, uint64_t mask)
302 {
303 return byte_zap(val, mask);
304 }
305
306 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
307 {
308 return byte_zap(val, ~mask);
309 }
310
311 uint64_t helper_mskql(uint64_t val, uint64_t mask)
312 {
313 return byte_zap(val, 0xFF << (mask & 7));
314 }
315
316 uint64_t helper_insql(uint64_t val, uint64_t mask)
317 {
318 val <<= (mask & 7) * 8;
319 return byte_zap(val, ~(0xFF << (mask & 7)));
320 }
321
322 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
323 {
324 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
325 }
326
327 uint64_t helper_inswh(uint64_t val, uint64_t mask)
328 {
329 val >>= 64 - ((mask & 7) * 8);
330 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
331 }
332
333 uint64_t helper_msklh(uint64_t val, uint64_t mask)
334 {
335 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
336 }
337
338 uint64_t helper_inslh(uint64_t val, uint64_t mask)
339 {
340 val >>= 64 - ((mask & 7) * 8);
341 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
342 }
343
344 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
345 {
346 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
347 }
348
349 uint64_t helper_insqh(uint64_t val, uint64_t mask)
350 {
351 val >>= 64 - ((mask & 7) * 8);
352 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
353 }
354
355 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
356 {
357 uint8_t opa, opb, res;
358 int i;
359
360 res = 0;
361 for (i = 0; i < 7; i++) {
362 opa = op1 >> (i * 8);
363 opb = op2 >> (i * 8);
364 if (opa >= opb)
365 res |= 1 << i;
366 }
367 return res;
368 }
369
370 void helper_cmov_fir (int freg)
371 {
372 if (FT0 != 0)
373 env->fir[freg] = FT1;
374 }
375
376 void helper_sqrts (void)
377 {
378 FT0 = float32_sqrt(FT0, &FP_STATUS);
379 }
380
381 void helper_cpys (void)
382 {
383 union {
384 double d;
385 uint64_t i;
386 } p, q, r;
387
388 p.d = FT0;
389 q.d = FT1;
390 r.i = p.i & 0x8000000000000000ULL;
391 r.i |= q.i & ~0x8000000000000000ULL;
392 FT0 = r.d;
393 }
394
395 void helper_cpysn (void)
396 {
397 union {
398 double d;
399 uint64_t i;
400 } p, q, r;
401
402 p.d = FT0;
403 q.d = FT1;
404 r.i = (~p.i) & 0x8000000000000000ULL;
405 r.i |= q.i & ~0x8000000000000000ULL;
406 FT0 = r.d;
407 }
408
409 void helper_cpyse (void)
410 {
411 union {
412 double d;
413 uint64_t i;
414 } p, q, r;
415
416 p.d = FT0;
417 q.d = FT1;
418 r.i = p.i & 0xFFF0000000000000ULL;
419 r.i |= q.i & ~0xFFF0000000000000ULL;
420 FT0 = r.d;
421 }
422
423 void helper_itofs (void)
424 {
425 union {
426 double d;
427 uint64_t i;
428 } p;
429
430 p.d = FT0;
431 FT0 = int64_to_float32(p.i, &FP_STATUS);
432 }
433
434 void helper_ftois (void)
435 {
436 union {
437 double d;
438 uint64_t i;
439 } p;
440
441 p.i = float32_to_int64(FT0, &FP_STATUS);
442 FT0 = p.d;
443 }
444
445 void helper_sqrtt (void)
446 {
447 FT0 = float64_sqrt(FT0, &FP_STATUS);
448 }
449
450 void helper_cmptun (void)
451 {
452 union {
453 double d;
454 uint64_t i;
455 } p;
456
457 p.i = 0;
458 if (float64_is_nan(FT0) || float64_is_nan(FT1))
459 p.i = 0x4000000000000000ULL;
460 FT0 = p.d;
461 }
462
463 void helper_cmpteq (void)
464 {
465 union {
466 double d;
467 uint64_t i;
468 } p;
469
470 p.i = 0;
471 if (float64_eq(FT0, FT1, &FP_STATUS))
472 p.i = 0x4000000000000000ULL;
473 FT0 = p.d;
474 }
475
476 void helper_cmptle (void)
477 {
478 union {
479 double d;
480 uint64_t i;
481 } p;
482
483 p.i = 0;
484 if (float64_le(FT0, FT1, &FP_STATUS))
485 p.i = 0x4000000000000000ULL;
486 FT0 = p.d;
487 }
488
489 void helper_cmptlt (void)
490 {
491 union {
492 double d;
493 uint64_t i;
494 } p;
495
496 p.i = 0;
497 if (float64_lt(FT0, FT1, &FP_STATUS))
498 p.i = 0x4000000000000000ULL;
499 FT0 = p.d;
500 }
501
502 void helper_itoft (void)
503 {
504 union {
505 double d;
506 uint64_t i;
507 } p;
508
509 p.d = FT0;
510 FT0 = int64_to_float64(p.i, &FP_STATUS);
511 }
512
513 void helper_ftoit (void)
514 {
515 union {
516 double d;
517 uint64_t i;
518 } p;
519
520 p.i = float64_to_int64(FT0, &FP_STATUS);
521 FT0 = p.d;
522 }
523
524 static always_inline int vaxf_is_valid (float ff)
525 {
526 union {
527 float f;
528 uint32_t i;
529 } p;
530 uint32_t exp, mant;
531
532 p.f = ff;
533 exp = (p.i >> 23) & 0xFF;
534 mant = p.i & 0x007FFFFF;
535 if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
536 /* Reserved operands / Dirty zero */
537 return 0;
538 }
539
540 return 1;
541 }
542
543 static always_inline float vaxf_to_ieee32 (float ff)
544 {
545 union {
546 float f;
547 uint32_t i;
548 } p;
549 uint32_t exp;
550
551 p.f = ff;
552 exp = (p.i >> 23) & 0xFF;
553 if (exp < 3) {
554 /* Underflow */
555 p.f = 0.0;
556 } else {
557 p.f *= 0.25;
558 }
559
560 return p.f;
561 }
562
563 static always_inline float ieee32_to_vaxf (float fi)
564 {
565 union {
566 float f;
567 uint32_t i;
568 } p;
569 uint32_t exp, mant;
570
571 p.f = fi;
572 exp = (p.i >> 23) & 0xFF;
573 mant = p.i & 0x007FFFFF;
574 if (exp == 255) {
575 /* NaN or infinity */
576 p.i = 1;
577 } else if (exp == 0) {
578 if (mant == 0) {
579 /* Zero */
580 p.i = 0;
581 } else {
582 /* Denormalized */
583 p.f *= 2.0;
584 }
585 } else {
586 if (exp >= 253) {
587 /* Overflow */
588 p.i = 1;
589 } else {
590 p.f *= 4.0;
591 }
592 }
593
594 return p.f;
595 }
596
597 void helper_addf (void)
598 {
599 float ft0, ft1, ft2;
600
601 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
602 /* XXX: TODO */
603 }
604 ft0 = vaxf_to_ieee32(FT0);
605 ft1 = vaxf_to_ieee32(FT1);
606 ft2 = float32_add(ft0, ft1, &FP_STATUS);
607 FT0 = ieee32_to_vaxf(ft2);
608 }
609
610 void helper_subf (void)
611 {
612 float ft0, ft1, ft2;
613
614 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
615 /* XXX: TODO */
616 }
617 ft0 = vaxf_to_ieee32(FT0);
618 ft1 = vaxf_to_ieee32(FT1);
619 ft2 = float32_sub(ft0, ft1, &FP_STATUS);
620 FT0 = ieee32_to_vaxf(ft2);
621 }
622
623 void helper_mulf (void)
624 {
625 float ft0, ft1, ft2;
626
627 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
628 /* XXX: TODO */
629 }
630 ft0 = vaxf_to_ieee32(FT0);
631 ft1 = vaxf_to_ieee32(FT1);
632 ft2 = float32_mul(ft0, ft1, &FP_STATUS);
633 FT0 = ieee32_to_vaxf(ft2);
634 }
635
636 void helper_divf (void)
637 {
638 float ft0, ft1, ft2;
639
640 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
641 /* XXX: TODO */
642 }
643 ft0 = vaxf_to_ieee32(FT0);
644 ft1 = vaxf_to_ieee32(FT1);
645 ft2 = float32_div(ft0, ft1, &FP_STATUS);
646 FT0 = ieee32_to_vaxf(ft2);
647 }
648
649 void helper_sqrtf (void)
650 {
651 float ft0, ft1;
652
653 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
654 /* XXX: TODO */
655 }
656 ft0 = vaxf_to_ieee32(FT0);
657 ft1 = float32_sqrt(ft0, &FP_STATUS);
658 FT0 = ieee32_to_vaxf(ft1);
659 }
660
661 void helper_itoff (void)
662 {
663 /* XXX: TODO */
664 }
665
666 static always_inline int vaxg_is_valid (double ff)
667 {
668 union {
669 double f;
670 uint64_t i;
671 } p;
672 uint64_t exp, mant;
673
674 p.f = ff;
675 exp = (p.i >> 52) & 0x7FF;
676 mant = p.i & 0x000FFFFFFFFFFFFFULL;
677 if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
678 /* Reserved operands / Dirty zero */
679 return 0;
680 }
681
682 return 1;
683 }
684
685 static always_inline double vaxg_to_ieee64 (double fg)
686 {
687 union {
688 double f;
689 uint64_t i;
690 } p;
691 uint32_t exp;
692
693 p.f = fg;
694 exp = (p.i >> 52) & 0x7FF;
695 if (exp < 3) {
696 /* Underflow */
697 p.f = 0.0;
698 } else {
699 p.f *= 0.25;
700 }
701
702 return p.f;
703 }
704
705 static always_inline double ieee64_to_vaxg (double fi)
706 {
707 union {
708 double f;
709 uint64_t i;
710 } p;
711 uint64_t mant;
712 uint32_t exp;
713
714 p.f = fi;
715 exp = (p.i >> 52) & 0x7FF;
716 mant = p.i & 0x000FFFFFFFFFFFFFULL;
717 if (exp == 255) {
718 /* NaN or infinity */
719 p.i = 1; /* VAX dirty zero */
720 } else if (exp == 0) {
721 if (mant == 0) {
722 /* Zero */
723 p.i = 0;
724 } else {
725 /* Denormalized */
726 p.f *= 2.0;
727 }
728 } else {
729 if (exp >= 2045) {
730 /* Overflow */
731 p.i = 1; /* VAX dirty zero */
732 } else {
733 p.f *= 4.0;
734 }
735 }
736
737 return p.f;
738 }
739
740 void helper_addg (void)
741 {
742 double ft0, ft1, ft2;
743
744 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
745 /* XXX: TODO */
746 }
747 ft0 = vaxg_to_ieee64(FT0);
748 ft1 = vaxg_to_ieee64(FT1);
749 ft2 = float64_add(ft0, ft1, &FP_STATUS);
750 FT0 = ieee64_to_vaxg(ft2);
751 }
752
753 void helper_subg (void)
754 {
755 double ft0, ft1, ft2;
756
757 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
758 /* XXX: TODO */
759 }
760 ft0 = vaxg_to_ieee64(FT0);
761 ft1 = vaxg_to_ieee64(FT1);
762 ft2 = float64_sub(ft0, ft1, &FP_STATUS);
763 FT0 = ieee64_to_vaxg(ft2);
764 }
765
766 void helper_mulg (void)
767 {
768 double ft0, ft1, ft2;
769
770 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
771 /* XXX: TODO */
772 }
773 ft0 = vaxg_to_ieee64(FT0);
774 ft1 = vaxg_to_ieee64(FT1);
775 ft2 = float64_mul(ft0, ft1, &FP_STATUS);
776 FT0 = ieee64_to_vaxg(ft2);
777 }
778
779 void helper_divg (void)
780 {
781 double ft0, ft1, ft2;
782
783 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
784 /* XXX: TODO */
785 }
786 ft0 = vaxg_to_ieee64(FT0);
787 ft1 = vaxg_to_ieee64(FT1);
788 ft2 = float64_div(ft0, ft1, &FP_STATUS);
789 FT0 = ieee64_to_vaxg(ft2);
790 }
791
792 void helper_sqrtg (void)
793 {
794 double ft0, ft1;
795
796 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
797 /* XXX: TODO */
798 }
799 ft0 = vaxg_to_ieee64(FT0);
800 ft1 = float64_sqrt(ft0, &FP_STATUS);
801 FT0 = ieee64_to_vaxg(ft1);
802 }
803
804 void helper_cmpgeq (void)
805 {
806 union {
807 double d;
808 uint64_t u;
809 } p;
810 double ft0, ft1;
811
812 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
813 /* XXX: TODO */
814 }
815 ft0 = vaxg_to_ieee64(FT0);
816 ft1 = vaxg_to_ieee64(FT1);
817 p.u = 0;
818 if (float64_eq(ft0, ft1, &FP_STATUS))
819 p.u = 0x4000000000000000ULL;
820 FT0 = p.d;
821 }
822
823 void helper_cmpglt (void)
824 {
825 union {
826 double d;
827 uint64_t u;
828 } p;
829 double ft0, ft1;
830
831 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
832 /* XXX: TODO */
833 }
834 ft0 = vaxg_to_ieee64(FT0);
835 ft1 = vaxg_to_ieee64(FT1);
836 p.u = 0;
837 if (float64_lt(ft0, ft1, &FP_STATUS))
838 p.u = 0x4000000000000000ULL;
839 FT0 = p.d;
840 }
841
842 void helper_cmpgle (void)
843 {
844 union {
845 double d;
846 uint64_t u;
847 } p;
848 double ft0, ft1;
849
850 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
851 /* XXX: TODO */
852 }
853 ft0 = vaxg_to_ieee64(FT0);
854 ft1 = vaxg_to_ieee64(FT1);
855 p.u = 0;
856 if (float64_le(ft0, ft1, &FP_STATUS))
857 p.u = 0x4000000000000000ULL;
858 FT0 = p.d;
859 }
860
861 void helper_cvtqs (void)
862 {
863 union {
864 double d;
865 uint64_t u;
866 } p;
867
868 p.d = FT0;
869 FT0 = (float)p.u;
870 }
871
872 void helper_cvttq (void)
873 {
874 union {
875 double d;
876 uint64_t u;
877 } p;
878
879 p.u = FT0;
880 FT0 = p.d;
881 }
882
883 void helper_cvtqt (void)
884 {
885 union {
886 double d;
887 uint64_t u;
888 } p;
889
890 p.d = FT0;
891 FT0 = p.u;
892 }
893
894 void helper_cvtqf (void)
895 {
896 union {
897 double d;
898 uint64_t u;
899 } p;
900
901 p.d = FT0;
902 FT0 = ieee32_to_vaxf(p.u);
903 }
904
905 void helper_cvtgf (void)
906 {
907 double ft0;
908
909 ft0 = vaxg_to_ieee64(FT0);
910 FT0 = ieee32_to_vaxf(ft0);
911 }
912
913 void helper_cvtgd (void)
914 {
915 /* XXX: TODO */
916 }
917
918 void helper_cvtgq (void)
919 {
920 union {
921 double d;
922 uint64_t u;
923 } p;
924
925 p.u = vaxg_to_ieee64(FT0);
926 FT0 = p.d;
927 }
928
929 void helper_cvtqg (void)
930 {
931 union {
932 double d;
933 uint64_t u;
934 } p;
935
936 p.d = FT0;
937 FT0 = ieee64_to_vaxg(p.u);
938 }
939
940 void helper_cvtdg (void)
941 {
942 /* XXX: TODO */
943 }
944
945 void helper_cvtlq (void)
946 {
947 union {
948 double d;
949 uint64_t u;
950 } p, q;
951
952 p.d = FT0;
953 q.u = (p.u >> 29) & 0x3FFFFFFF;
954 q.u |= (p.u >> 32);
955 q.u = (int64_t)((int32_t)q.u);
956 FT0 = q.d;
957 }
958
959 static always_inline void __helper_cvtql (int s, int v)
960 {
961 union {
962 double d;
963 uint64_t u;
964 } p, q;
965
966 p.d = FT0;
967 q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
968 q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
969 FT0 = q.d;
970 if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
971 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
972 }
973 if (s) {
974 /* TODO */
975 }
976 }
977
978 void helper_cvtql (void)
979 {
980 __helper_cvtql(0, 0);
981 }
982
983 void helper_cvtqlv (void)
984 {
985 __helper_cvtql(0, 1);
986 }
987
988 void helper_cvtqlsv (void)
989 {
990 __helper_cvtql(1, 1);
991 }
992
993 void helper_cmpfeq (void)
994 {
995 if (float64_eq(FT0, FT1, &FP_STATUS))
996 T0 = 1;
997 else
998 T0 = 0;
999 }
1000
1001 void helper_cmpfne (void)
1002 {
1003 if (float64_eq(FT0, FT1, &FP_STATUS))
1004 T0 = 0;
1005 else
1006 T0 = 1;
1007 }
1008
1009 void helper_cmpflt (void)
1010 {
1011 if (float64_lt(FT0, FT1, &FP_STATUS))
1012 T0 = 1;
1013 else
1014 T0 = 0;
1015 }
1016
1017 void helper_cmpfle (void)
1018 {
1019 if (float64_lt(FT0, FT1, &FP_STATUS))
1020 T0 = 1;
1021 else
1022 T0 = 0;
1023 }
1024
1025 void helper_cmpfgt (void)
1026 {
1027 if (float64_le(FT0, FT1, &FP_STATUS))
1028 T0 = 0;
1029 else
1030 T0 = 1;
1031 }
1032
1033 void helper_cmpfge (void)
1034 {
1035 if (float64_lt(FT0, FT1, &FP_STATUS))
1036 T0 = 0;
1037 else
1038 T0 = 1;
1039 }
1040
1041 #if !defined (CONFIG_USER_ONLY)
1042 void helper_mfpr (int iprn)
1043 {
1044 uint64_t val;
1045
1046 if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1047 T0 = val;
1048 }
1049
1050 void helper_mtpr (int iprn)
1051 {
1052 cpu_alpha_mtpr(env, iprn, T0, NULL);
1053 }
1054 #endif
1055
1056 #if defined(HOST_SPARC) || defined(HOST_SPARC64)
1057 void helper_reset_FT0 (void)
1058 {
1059 FT0 = 0;
1060 }
1061
1062 void helper_reset_FT1 (void)
1063 {
1064 FT1 = 0;
1065 }
1066
1067 void helper_reset_FT2 (void)
1068 {
1069 FT2 = 0;
1070 }
1071 #endif
1072
1073 /*****************************************************************************/
1074 /* Softmmu support */
1075 #if !defined (CONFIG_USER_ONLY)
1076
1077 /* XXX: the two following helpers are pure hacks.
1078 * Hopefully, we emulate the PALcode, then we should never see
1079 * HW_LD / HW_ST instructions.
1080 */
1081 void helper_ld_phys_to_virt (void)
1082 {
1083 uint64_t tlb_addr, physaddr;
1084 int index, mmu_idx;
1085 void *retaddr;
1086
1087 mmu_idx = cpu_mmu_index(env);
1088 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1089 redo:
1090 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1091 if ((T0 & TARGET_PAGE_MASK) ==
1092 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1093 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1094 } else {
1095 /* the page is not in the TLB : fill it */
1096 retaddr = GETPC();
1097 tlb_fill(T0, 0, mmu_idx, retaddr);
1098 goto redo;
1099 }
1100 T0 = physaddr;
1101 }
1102
1103 void helper_st_phys_to_virt (void)
1104 {
1105 uint64_t tlb_addr, physaddr;
1106 int index, mmu_idx;
1107 void *retaddr;
1108
1109 mmu_idx = cpu_mmu_index(env);
1110 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1111 redo:
1112 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1113 if ((T0 & TARGET_PAGE_MASK) ==
1114 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1115 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1116 } else {
1117 /* the page is not in the TLB : fill it */
1118 retaddr = GETPC();
1119 tlb_fill(T0, 1, mmu_idx, retaddr);
1120 goto redo;
1121 }
1122 T0 = physaddr;
1123 }
1124
1125 #define MMUSUFFIX _mmu
1126
1127 #define SHIFT 0
1128 #include "softmmu_template.h"
1129
1130 #define SHIFT 1
1131 #include "softmmu_template.h"
1132
1133 #define SHIFT 2
1134 #include "softmmu_template.h"
1135
1136 #define SHIFT 3
1137 #include "softmmu_template.h"
1138
1139 /* try to fill the TLB and return an exception if error. If retaddr is
1140 NULL, it means that the function was called in C code (i.e. not
1141 from generated code or from helper.c) */
1142 /* XXX: fix it to restore all registers */
1143 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1144 {
1145 TranslationBlock *tb;
1146 CPUState *saved_env;
1147 unsigned long pc;
1148 int ret;
1149
1150 /* XXX: hack to restore env in all cases, even if not called from
1151 generated code */
1152 saved_env = env;
1153 env = cpu_single_env;
1154 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1155 if (!likely(ret == 0)) {
1156 if (likely(retaddr)) {
1157 /* now we have a real cpu fault */
1158 pc = (unsigned long)retaddr;
1159 tb = tb_find_pc(pc);
1160 if (likely(tb)) {
1161 /* the PC is inside the translated code. It means that we have
1162 a virtual CPU fault */
1163 cpu_restore_state(tb, env, pc, NULL);
1164 }
1165 }
1166 /* Exception index and error code are already set */
1167 cpu_loop_exit();
1168 }
1169 env = saved_env;
1170 }
1171
1172 #endif