]> git.proxmox.com Git - qemu.git/blob - target-alpha/op_helper.c
Revert "Get rid of _t suffix"
[qemu.git] / target-alpha / op_helper.c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "softfloat.h"
23 #include "helper.h"
24
25 void helper_tb_flush (void)
26 {
27 tb_flush(env);
28 }
29
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
32 void helper_excp (int excp, int error)
33 {
34 env->exception_index = excp;
35 env->error_code = error;
36 cpu_loop_exit();
37 }
38
39 uint64_t helper_load_pcc (void)
40 {
41 /* XXX: TODO */
42 return 0;
43 }
44
45 uint64_t helper_load_fpcr (void)
46 {
47 uint64_t ret = 0;
48 #ifdef CONFIG_SOFTFLOAT
49 ret |= env->fp_status.float_exception_flags << 52;
50 if (env->fp_status.float_exception_flags)
51 ret |= 1ULL << 63;
52 env->ipr[IPR_EXC_SUM] &= ~0x3E:
53 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
54 #endif
55 switch (env->fp_status.float_rounding_mode) {
56 case float_round_nearest_even:
57 ret |= 2ULL << 58;
58 break;
59 case float_round_down:
60 ret |= 1ULL << 58;
61 break;
62 case float_round_up:
63 ret |= 3ULL << 58;
64 break;
65 case float_round_to_zero:
66 break;
67 }
68 return ret;
69 }
70
71 void helper_store_fpcr (uint64_t val)
72 {
73 #ifdef CONFIG_SOFTFLOAT
74 set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
75 #endif
76 switch ((val >> 58) & 3) {
77 case 0:
78 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
79 break;
80 case 1:
81 set_float_rounding_mode(float_round_down, &FP_STATUS);
82 break;
83 case 2:
84 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
85 break;
86 case 3:
87 set_float_rounding_mode(float_round_up, &FP_STATUS);
88 break;
89 }
90 }
91
92 static spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
93
94 uint64_t helper_rs(void)
95 {
96 uint64_t tmp;
97
98 spin_lock(&intr_cpu_lock);
99 tmp = env->intr_flag;
100 env->intr_flag = 1;
101 spin_unlock(&intr_cpu_lock);
102
103 return tmp;
104 }
105
106 uint64_t helper_rc(void)
107 {
108 uint64_t tmp;
109
110 spin_lock(&intr_cpu_lock);
111 tmp = env->intr_flag;
112 env->intr_flag = 0;
113 spin_unlock(&intr_cpu_lock);
114
115 return tmp;
116 }
117
118 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
119 {
120 uint64_t tmp = op1;
121 op1 += op2;
122 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
123 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
124 }
125 return op1;
126 }
127
128 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
129 {
130 uint64_t tmp = op1;
131 op1 = (uint32_t)(op1 + op2);
132 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
133 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
134 }
135 return op1;
136 }
137
138 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
139 {
140 uint64_t res;
141 res = op1 - op2;
142 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
143 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
144 }
145 return res;
146 }
147
148 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
149 {
150 uint32_t res;
151 res = op1 - op2;
152 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
153 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
154 }
155 return res;
156 }
157
158 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
159 {
160 int64_t res = (int64_t)op1 * (int64_t)op2;
161
162 if (unlikely((int32_t)res != res)) {
163 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
164 }
165 return (int64_t)((int32_t)res);
166 }
167
168 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
169 {
170 uint64_t tl, th;
171
172 muls64(&tl, &th, op1, op2);
173 /* If th != 0 && th != -1, then we had an overflow */
174 if (unlikely((th + 1) > 1)) {
175 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
176 }
177 return tl;
178 }
179
180 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
181 {
182 uint64_t tl, th;
183
184 mulu64(&tl, &th, op1, op2);
185 return th;
186 }
187
188 uint64_t helper_ctpop (uint64_t arg)
189 {
190 return ctpop64(arg);
191 }
192
193 uint64_t helper_ctlz (uint64_t arg)
194 {
195 return clz64(arg);
196 }
197
198 uint64_t helper_cttz (uint64_t arg)
199 {
200 return ctz64(arg);
201 }
202
203 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
204 {
205 uint64_t mask;
206
207 mask = 0;
208 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
209 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
210 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
211 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
212 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
213 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
214 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
215 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
216
217 return op & ~mask;
218 }
219
220 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
221 {
222 return byte_zap(val, 0x01 << (mask & 7));
223 }
224
225 uint64_t helper_insbl(uint64_t val, uint64_t mask)
226 {
227 val <<= (mask & 7) * 8;
228 return byte_zap(val, ~(0x01 << (mask & 7)));
229 }
230
231 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
232 {
233 return byte_zap(val, 0x03 << (mask & 7));
234 }
235
236 uint64_t helper_inswl(uint64_t val, uint64_t mask)
237 {
238 val <<= (mask & 7) * 8;
239 return byte_zap(val, ~(0x03 << (mask & 7)));
240 }
241
242 uint64_t helper_mskll(uint64_t val, uint64_t mask)
243 {
244 return byte_zap(val, 0x0F << (mask & 7));
245 }
246
247 uint64_t helper_insll(uint64_t val, uint64_t mask)
248 {
249 val <<= (mask & 7) * 8;
250 return byte_zap(val, ~(0x0F << (mask & 7)));
251 }
252
253 uint64_t helper_zap(uint64_t val, uint64_t mask)
254 {
255 return byte_zap(val, mask);
256 }
257
258 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
259 {
260 return byte_zap(val, ~mask);
261 }
262
263 uint64_t helper_mskql(uint64_t val, uint64_t mask)
264 {
265 return byte_zap(val, 0xFF << (mask & 7));
266 }
267
268 uint64_t helper_insql(uint64_t val, uint64_t mask)
269 {
270 val <<= (mask & 7) * 8;
271 return byte_zap(val, ~(0xFF << (mask & 7)));
272 }
273
274 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
275 {
276 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
277 }
278
279 uint64_t helper_inswh(uint64_t val, uint64_t mask)
280 {
281 val >>= 64 - ((mask & 7) * 8);
282 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
283 }
284
285 uint64_t helper_msklh(uint64_t val, uint64_t mask)
286 {
287 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
288 }
289
290 uint64_t helper_inslh(uint64_t val, uint64_t mask)
291 {
292 val >>= 64 - ((mask & 7) * 8);
293 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
294 }
295
296 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
297 {
298 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
299 }
300
301 uint64_t helper_insqh(uint64_t val, uint64_t mask)
302 {
303 val >>= 64 - ((mask & 7) * 8);
304 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
305 }
306
307 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
308 {
309 uint8_t opa, opb, res;
310 int i;
311
312 res = 0;
313 for (i = 0; i < 8; i++) {
314 opa = op1 >> (i * 8);
315 opb = op2 >> (i * 8);
316 if (opa >= opb)
317 res |= 1 << i;
318 }
319 return res;
320 }
321
322 /* Floating point helpers */
323
324 /* F floating (VAX) */
325 static inline uint64_t float32_to_f(float32 fa)
326 {
327 uint64_t r, exp, mant, sig;
328 CPU_FloatU a;
329
330 a.f = fa;
331 sig = ((uint64_t)a.l & 0x80000000) << 32;
332 exp = (a.l >> 23) & 0xff;
333 mant = ((uint64_t)a.l & 0x007fffff) << 29;
334
335 if (exp == 255) {
336 /* NaN or infinity */
337 r = 1; /* VAX dirty zero */
338 } else if (exp == 0) {
339 if (mant == 0) {
340 /* Zero */
341 r = 0;
342 } else {
343 /* Denormalized */
344 r = sig | ((exp + 1) << 52) | mant;
345 }
346 } else {
347 if (exp >= 253) {
348 /* Overflow */
349 r = 1; /* VAX dirty zero */
350 } else {
351 r = sig | ((exp + 2) << 52);
352 }
353 }
354
355 return r;
356 }
357
358 static inline float32 f_to_float32(uint64_t a)
359 {
360 uint32_t exp, mant_sig;
361 CPU_FloatU r;
362
363 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
364 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
365
366 if (unlikely(!exp && mant_sig)) {
367 /* Reserved operands / Dirty zero */
368 helper_excp(EXCP_OPCDEC, 0);
369 }
370
371 if (exp < 3) {
372 /* Underflow */
373 r.l = 0;
374 } else {
375 r.l = ((exp - 2) << 23) | mant_sig;
376 }
377
378 return r.f;
379 }
380
381 uint32_t helper_f_to_memory (uint64_t a)
382 {
383 uint32_t r;
384 r = (a & 0x00001fffe0000000ull) >> 13;
385 r |= (a & 0x07ffe00000000000ull) >> 45;
386 r |= (a & 0xc000000000000000ull) >> 48;
387 return r;
388 }
389
390 uint64_t helper_memory_to_f (uint32_t a)
391 {
392 uint64_t r;
393 r = ((uint64_t)(a & 0x0000c000)) << 48;
394 r |= ((uint64_t)(a & 0x003fffff)) << 45;
395 r |= ((uint64_t)(a & 0xffff0000)) << 13;
396 if (!(a & 0x00004000))
397 r |= 0x7ll << 59;
398 return r;
399 }
400
401 uint64_t helper_addf (uint64_t a, uint64_t b)
402 {
403 float32 fa, fb, fr;
404
405 fa = f_to_float32(a);
406 fb = f_to_float32(b);
407 fr = float32_add(fa, fb, &FP_STATUS);
408 return float32_to_f(fr);
409 }
410
411 uint64_t helper_subf (uint64_t a, uint64_t b)
412 {
413 float32 fa, fb, fr;
414
415 fa = f_to_float32(a);
416 fb = f_to_float32(b);
417 fr = float32_sub(fa, fb, &FP_STATUS);
418 return float32_to_f(fr);
419 }
420
421 uint64_t helper_mulf (uint64_t a, uint64_t b)
422 {
423 float32 fa, fb, fr;
424
425 fa = f_to_float32(a);
426 fb = f_to_float32(b);
427 fr = float32_mul(fa, fb, &FP_STATUS);
428 return float32_to_f(fr);
429 }
430
431 uint64_t helper_divf (uint64_t a, uint64_t b)
432 {
433 float32 fa, fb, fr;
434
435 fa = f_to_float32(a);
436 fb = f_to_float32(b);
437 fr = float32_div(fa, fb, &FP_STATUS);
438 return float32_to_f(fr);
439 }
440
441 uint64_t helper_sqrtf (uint64_t t)
442 {
443 float32 ft, fr;
444
445 ft = f_to_float32(t);
446 fr = float32_sqrt(ft, &FP_STATUS);
447 return float32_to_f(fr);
448 }
449
450
451 /* G floating (VAX) */
452 static inline uint64_t float64_to_g(float64 fa)
453 {
454 uint64_t r, exp, mant, sig;
455 CPU_DoubleU a;
456
457 a.d = fa;
458 sig = a.ll & 0x8000000000000000ull;
459 exp = (a.ll >> 52) & 0x7ff;
460 mant = a.ll & 0x000fffffffffffffull;
461
462 if (exp == 2047) {
463 /* NaN or infinity */
464 r = 1; /* VAX dirty zero */
465 } else if (exp == 0) {
466 if (mant == 0) {
467 /* Zero */
468 r = 0;
469 } else {
470 /* Denormalized */
471 r = sig | ((exp + 1) << 52) | mant;
472 }
473 } else {
474 if (exp >= 2045) {
475 /* Overflow */
476 r = 1; /* VAX dirty zero */
477 } else {
478 r = sig | ((exp + 2) << 52);
479 }
480 }
481
482 return r;
483 }
484
485 static inline float64 g_to_float64(uint64_t a)
486 {
487 uint64_t exp, mant_sig;
488 CPU_DoubleU r;
489
490 exp = (a >> 52) & 0x7ff;
491 mant_sig = a & 0x800fffffffffffffull;
492
493 if (!exp && mant_sig) {
494 /* Reserved operands / Dirty zero */
495 helper_excp(EXCP_OPCDEC, 0);
496 }
497
498 if (exp < 3) {
499 /* Underflow */
500 r.ll = 0;
501 } else {
502 r.ll = ((exp - 2) << 52) | mant_sig;
503 }
504
505 return r.d;
506 }
507
508 uint64_t helper_g_to_memory (uint64_t a)
509 {
510 uint64_t r;
511 r = (a & 0x000000000000ffffull) << 48;
512 r |= (a & 0x00000000ffff0000ull) << 16;
513 r |= (a & 0x0000ffff00000000ull) >> 16;
514 r |= (a & 0xffff000000000000ull) >> 48;
515 return r;
516 }
517
518 uint64_t helper_memory_to_g (uint64_t a)
519 {
520 uint64_t r;
521 r = (a & 0x000000000000ffffull) << 48;
522 r |= (a & 0x00000000ffff0000ull) << 16;
523 r |= (a & 0x0000ffff00000000ull) >> 16;
524 r |= (a & 0xffff000000000000ull) >> 48;
525 return r;
526 }
527
528 uint64_t helper_addg (uint64_t a, uint64_t b)
529 {
530 float64 fa, fb, fr;
531
532 fa = g_to_float64(a);
533 fb = g_to_float64(b);
534 fr = float64_add(fa, fb, &FP_STATUS);
535 return float64_to_g(fr);
536 }
537
538 uint64_t helper_subg (uint64_t a, uint64_t b)
539 {
540 float64 fa, fb, fr;
541
542 fa = g_to_float64(a);
543 fb = g_to_float64(b);
544 fr = float64_sub(fa, fb, &FP_STATUS);
545 return float64_to_g(fr);
546 }
547
548 uint64_t helper_mulg (uint64_t a, uint64_t b)
549 {
550 float64 fa, fb, fr;
551
552 fa = g_to_float64(a);
553 fb = g_to_float64(b);
554 fr = float64_mul(fa, fb, &FP_STATUS);
555 return float64_to_g(fr);
556 }
557
558 uint64_t helper_divg (uint64_t a, uint64_t b)
559 {
560 float64 fa, fb, fr;
561
562 fa = g_to_float64(a);
563 fb = g_to_float64(b);
564 fr = float64_div(fa, fb, &FP_STATUS);
565 return float64_to_g(fr);
566 }
567
568 uint64_t helper_sqrtg (uint64_t a)
569 {
570 float64 fa, fr;
571
572 fa = g_to_float64(a);
573 fr = float64_sqrt(fa, &FP_STATUS);
574 return float64_to_g(fr);
575 }
576
577
578 /* S floating (single) */
579 static inline uint64_t float32_to_s(float32 fa)
580 {
581 CPU_FloatU a;
582 uint64_t r;
583
584 a.f = fa;
585
586 r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
587 if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
588 r |= 0x7ll << 59;
589 return r;
590 }
591
592 static inline float32 s_to_float32(uint64_t a)
593 {
594 CPU_FloatU r;
595 r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
596 return r.f;
597 }
598
599 uint32_t helper_s_to_memory (uint64_t a)
600 {
601 /* Memory format is the same as float32 */
602 float32 fa = s_to_float32(a);
603 return *(uint32_t*)(&fa);
604 }
605
606 uint64_t helper_memory_to_s (uint32_t a)
607 {
608 /* Memory format is the same as float32 */
609 return float32_to_s(*(float32*)(&a));
610 }
611
612 uint64_t helper_adds (uint64_t a, uint64_t b)
613 {
614 float32 fa, fb, fr;
615
616 fa = s_to_float32(a);
617 fb = s_to_float32(b);
618 fr = float32_add(fa, fb, &FP_STATUS);
619 return float32_to_s(fr);
620 }
621
622 uint64_t helper_subs (uint64_t a, uint64_t b)
623 {
624 float32 fa, fb, fr;
625
626 fa = s_to_float32(a);
627 fb = s_to_float32(b);
628 fr = float32_sub(fa, fb, &FP_STATUS);
629 return float32_to_s(fr);
630 }
631
632 uint64_t helper_muls (uint64_t a, uint64_t b)
633 {
634 float32 fa, fb, fr;
635
636 fa = s_to_float32(a);
637 fb = s_to_float32(b);
638 fr = float32_mul(fa, fb, &FP_STATUS);
639 return float32_to_s(fr);
640 }
641
642 uint64_t helper_divs (uint64_t a, uint64_t b)
643 {
644 float32 fa, fb, fr;
645
646 fa = s_to_float32(a);
647 fb = s_to_float32(b);
648 fr = float32_div(fa, fb, &FP_STATUS);
649 return float32_to_s(fr);
650 }
651
652 uint64_t helper_sqrts (uint64_t a)
653 {
654 float32 fa, fr;
655
656 fa = s_to_float32(a);
657 fr = float32_sqrt(fa, &FP_STATUS);
658 return float32_to_s(fr);
659 }
660
661
662 /* T floating (double) */
663 static inline float64 t_to_float64(uint64_t a)
664 {
665 /* Memory format is the same as float64 */
666 CPU_DoubleU r;
667 r.ll = a;
668 return r.d;
669 }
670
671 static inline uint64_t float64_to_t(float64 fa)
672 {
673 /* Memory format is the same as float64 */
674 CPU_DoubleU r;
675 r.d = fa;
676 return r.ll;
677 }
678
679 uint64_t helper_addt (uint64_t a, uint64_t b)
680 {
681 float64 fa, fb, fr;
682
683 fa = t_to_float64(a);
684 fb = t_to_float64(b);
685 fr = float64_add(fa, fb, &FP_STATUS);
686 return float64_to_t(fr);
687 }
688
689 uint64_t helper_subt (uint64_t a, uint64_t b)
690 {
691 float64 fa, fb, fr;
692
693 fa = t_to_float64(a);
694 fb = t_to_float64(b);
695 fr = float64_sub(fa, fb, &FP_STATUS);
696 return float64_to_t(fr);
697 }
698
699 uint64_t helper_mult (uint64_t a, uint64_t b)
700 {
701 float64 fa, fb, fr;
702
703 fa = t_to_float64(a);
704 fb = t_to_float64(b);
705 fr = float64_mul(fa, fb, &FP_STATUS);
706 return float64_to_t(fr);
707 }
708
709 uint64_t helper_divt (uint64_t a, uint64_t b)
710 {
711 float64 fa, fb, fr;
712
713 fa = t_to_float64(a);
714 fb = t_to_float64(b);
715 fr = float64_div(fa, fb, &FP_STATUS);
716 return float64_to_t(fr);
717 }
718
719 uint64_t helper_sqrtt (uint64_t a)
720 {
721 float64 fa, fr;
722
723 fa = t_to_float64(a);
724 fr = float64_sqrt(fa, &FP_STATUS);
725 return float64_to_t(fr);
726 }
727
728
729 /* Sign copy */
730 uint64_t helper_cpys(uint64_t a, uint64_t b)
731 {
732 return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
733 }
734
735 uint64_t helper_cpysn(uint64_t a, uint64_t b)
736 {
737 return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
738 }
739
740 uint64_t helper_cpyse(uint64_t a, uint64_t b)
741 {
742 return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
743 }
744
745
746 /* Comparisons */
747 uint64_t helper_cmptun (uint64_t a, uint64_t b)
748 {
749 float64 fa, fb;
750
751 fa = t_to_float64(a);
752 fb = t_to_float64(b);
753
754 if (float64_is_nan(fa) || float64_is_nan(fb))
755 return 0x4000000000000000ULL;
756 else
757 return 0;
758 }
759
760 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
761 {
762 float64 fa, fb;
763
764 fa = t_to_float64(a);
765 fb = t_to_float64(b);
766
767 if (float64_eq(fa, fb, &FP_STATUS))
768 return 0x4000000000000000ULL;
769 else
770 return 0;
771 }
772
773 uint64_t helper_cmptle(uint64_t a, uint64_t b)
774 {
775 float64 fa, fb;
776
777 fa = t_to_float64(a);
778 fb = t_to_float64(b);
779
780 if (float64_le(fa, fb, &FP_STATUS))
781 return 0x4000000000000000ULL;
782 else
783 return 0;
784 }
785
786 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
787 {
788 float64 fa, fb;
789
790 fa = t_to_float64(a);
791 fb = t_to_float64(b);
792
793 if (float64_lt(fa, fb, &FP_STATUS))
794 return 0x4000000000000000ULL;
795 else
796 return 0;
797 }
798
799 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
800 {
801 float64 fa, fb;
802
803 fa = g_to_float64(a);
804 fb = g_to_float64(b);
805
806 if (float64_eq(fa, fb, &FP_STATUS))
807 return 0x4000000000000000ULL;
808 else
809 return 0;
810 }
811
812 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
813 {
814 float64 fa, fb;
815
816 fa = g_to_float64(a);
817 fb = g_to_float64(b);
818
819 if (float64_le(fa, fb, &FP_STATUS))
820 return 0x4000000000000000ULL;
821 else
822 return 0;
823 }
824
825 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
826 {
827 float64 fa, fb;
828
829 fa = g_to_float64(a);
830 fb = g_to_float64(b);
831
832 if (float64_lt(fa, fb, &FP_STATUS))
833 return 0x4000000000000000ULL;
834 else
835 return 0;
836 }
837
838 uint64_t helper_cmpfeq (uint64_t a)
839 {
840 return !(a & 0x7FFFFFFFFFFFFFFFULL);
841 }
842
843 uint64_t helper_cmpfne (uint64_t a)
844 {
845 return (a & 0x7FFFFFFFFFFFFFFFULL);
846 }
847
848 uint64_t helper_cmpflt (uint64_t a)
849 {
850 return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
851 }
852
853 uint64_t helper_cmpfle (uint64_t a)
854 {
855 return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
856 }
857
858 uint64_t helper_cmpfgt (uint64_t a)
859 {
860 return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
861 }
862
863 uint64_t helper_cmpfge (uint64_t a)
864 {
865 return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
866 }
867
868
869 /* Floating point format conversion */
870 uint64_t helper_cvtts (uint64_t a)
871 {
872 float64 fa;
873 float32 fr;
874
875 fa = t_to_float64(a);
876 fr = float64_to_float32(fa, &FP_STATUS);
877 return float32_to_s(fr);
878 }
879
880 uint64_t helper_cvtst (uint64_t a)
881 {
882 float32 fa;
883 float64 fr;
884
885 fa = s_to_float32(a);
886 fr = float32_to_float64(fa, &FP_STATUS);
887 return float64_to_t(fr);
888 }
889
890 uint64_t helper_cvtqs (uint64_t a)
891 {
892 float32 fr = int64_to_float32(a, &FP_STATUS);
893 return float32_to_s(fr);
894 }
895
896 uint64_t helper_cvttq (uint64_t a)
897 {
898 float64 fa = t_to_float64(a);
899 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
900 }
901
902 uint64_t helper_cvtqt (uint64_t a)
903 {
904 float64 fr = int64_to_float64(a, &FP_STATUS);
905 return float64_to_t(fr);
906 }
907
908 uint64_t helper_cvtqf (uint64_t a)
909 {
910 float32 fr = int64_to_float32(a, &FP_STATUS);
911 return float32_to_f(fr);
912 }
913
914 uint64_t helper_cvtgf (uint64_t a)
915 {
916 float64 fa;
917 float32 fr;
918
919 fa = g_to_float64(a);
920 fr = float64_to_float32(fa, &FP_STATUS);
921 return float32_to_f(fr);
922 }
923
924 uint64_t helper_cvtgq (uint64_t a)
925 {
926 float64 fa = g_to_float64(a);
927 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
928 }
929
930 uint64_t helper_cvtqg (uint64_t a)
931 {
932 float64 fr;
933 fr = int64_to_float64(a, &FP_STATUS);
934 return float64_to_g(fr);
935 }
936
937 uint64_t helper_cvtlq (uint64_t a)
938 {
939 return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
940 }
941
942 static inline uint64_t __helper_cvtql(uint64_t a, int s, int v)
943 {
944 uint64_t r;
945
946 r = ((uint64_t)(a & 0xC0000000)) << 32;
947 r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
948
949 if (v && (int64_t)((int32_t)r) != (int64_t)r) {
950 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
951 }
952 if (s) {
953 /* TODO */
954 }
955 return r;
956 }
957
958 uint64_t helper_cvtql (uint64_t a)
959 {
960 return __helper_cvtql(a, 0, 0);
961 }
962
963 uint64_t helper_cvtqlv (uint64_t a)
964 {
965 return __helper_cvtql(a, 0, 1);
966 }
967
968 uint64_t helper_cvtqlsv (uint64_t a)
969 {
970 return __helper_cvtql(a, 1, 1);
971 }
972
973 /* PALcode support special instructions */
974 #if !defined (CONFIG_USER_ONLY)
975 void helper_hw_rei (void)
976 {
977 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
978 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
979 /* XXX: re-enable interrupts and memory mapping */
980 }
981
982 void helper_hw_ret (uint64_t a)
983 {
984 env->pc = a & ~3;
985 env->ipr[IPR_EXC_ADDR] = a & 1;
986 /* XXX: re-enable interrupts and memory mapping */
987 }
988
989 uint64_t helper_mfpr (int iprn, uint64_t val)
990 {
991 uint64_t tmp;
992
993 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
994 val = tmp;
995
996 return val;
997 }
998
999 void helper_mtpr (int iprn, uint64_t val)
1000 {
1001 cpu_alpha_mtpr(env, iprn, val, NULL);
1002 }
1003
1004 void helper_set_alt_mode (void)
1005 {
1006 env->saved_mode = env->ps & 0xC;
1007 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1008 }
1009
1010 void helper_restore_mode (void)
1011 {
1012 env->ps = (env->ps & ~0xC) | env->saved_mode;
1013 }
1014
1015 #endif
1016
1017 /*****************************************************************************/
1018 /* Softmmu support */
1019 #if !defined (CONFIG_USER_ONLY)
1020
1021 /* XXX: the two following helpers are pure hacks.
1022 * Hopefully, we emulate the PALcode, then we should never see
1023 * HW_LD / HW_ST instructions.
1024 */
1025 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1026 {
1027 uint64_t tlb_addr, physaddr;
1028 int index, mmu_idx;
1029 void *retaddr;
1030
1031 mmu_idx = cpu_mmu_index(env);
1032 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1033 redo:
1034 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1035 if ((virtaddr & TARGET_PAGE_MASK) ==
1036 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1037 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1038 } else {
1039 /* the page is not in the TLB : fill it */
1040 retaddr = GETPC();
1041 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1042 goto redo;
1043 }
1044 return physaddr;
1045 }
1046
1047 uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1048 {
1049 uint64_t tlb_addr, physaddr;
1050 int index, mmu_idx;
1051 void *retaddr;
1052
1053 mmu_idx = cpu_mmu_index(env);
1054 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1055 redo:
1056 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1057 if ((virtaddr & TARGET_PAGE_MASK) ==
1058 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1059 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1060 } else {
1061 /* the page is not in the TLB : fill it */
1062 retaddr = GETPC();
1063 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1064 goto redo;
1065 }
1066 return physaddr;
1067 }
1068
1069 void helper_ldl_raw(uint64_t t0, uint64_t t1)
1070 {
1071 ldl_raw(t1, t0);
1072 }
1073
1074 void helper_ldq_raw(uint64_t t0, uint64_t t1)
1075 {
1076 ldq_raw(t1, t0);
1077 }
1078
1079 void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1080 {
1081 env->lock = t1;
1082 ldl_raw(t1, t0);
1083 }
1084
1085 void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1086 {
1087 env->lock = t1;
1088 ldl_raw(t1, t0);
1089 }
1090
1091 void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1092 {
1093 ldl_kernel(t1, t0);
1094 }
1095
1096 void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1097 {
1098 ldq_kernel(t1, t0);
1099 }
1100
1101 void helper_ldl_data(uint64_t t0, uint64_t t1)
1102 {
1103 ldl_data(t1, t0);
1104 }
1105
1106 void helper_ldq_data(uint64_t t0, uint64_t t1)
1107 {
1108 ldq_data(t1, t0);
1109 }
1110
1111 void helper_stl_raw(uint64_t t0, uint64_t t1)
1112 {
1113 stl_raw(t1, t0);
1114 }
1115
1116 void helper_stq_raw(uint64_t t0, uint64_t t1)
1117 {
1118 stq_raw(t1, t0);
1119 }
1120
1121 uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1122 {
1123 uint64_t ret;
1124
1125 if (t1 == env->lock) {
1126 stl_raw(t1, t0);
1127 ret = 0;
1128 } else
1129 ret = 1;
1130
1131 env->lock = 1;
1132
1133 return ret;
1134 }
1135
1136 uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1137 {
1138 uint64_t ret;
1139
1140 if (t1 == env->lock) {
1141 stq_raw(t1, t0);
1142 ret = 0;
1143 } else
1144 ret = 1;
1145
1146 env->lock = 1;
1147
1148 return ret;
1149 }
1150
1151 #define MMUSUFFIX _mmu
1152
1153 #define SHIFT 0
1154 #include "softmmu_template.h"
1155
1156 #define SHIFT 1
1157 #include "softmmu_template.h"
1158
1159 #define SHIFT 2
1160 #include "softmmu_template.h"
1161
1162 #define SHIFT 3
1163 #include "softmmu_template.h"
1164
1165 /* try to fill the TLB and return an exception if error. If retaddr is
1166 NULL, it means that the function was called in C code (i.e. not
1167 from generated code or from helper.c) */
1168 /* XXX: fix it to restore all registers */
1169 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1170 {
1171 TranslationBlock *tb;
1172 CPUState *saved_env;
1173 unsigned long pc;
1174 int ret;
1175
1176 /* XXX: hack to restore env in all cases, even if not called from
1177 generated code */
1178 saved_env = env;
1179 env = cpu_single_env;
1180 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1181 if (!likely(ret == 0)) {
1182 if (likely(retaddr)) {
1183 /* now we have a real cpu fault */
1184 pc = (unsigned long)retaddr;
1185 tb = tb_find_pc(pc);
1186 if (likely(tb)) {
1187 /* the PC is inside the translated code. It means that we have
1188 a virtual CPU fault */
1189 cpu_restore_state(tb, env, pc, NULL);
1190 }
1191 }
1192 /* Exception index and error code are already set */
1193 cpu_loop_exit();
1194 }
1195 env = saved_env;
1196 }
1197
1198 #endif