]> git.proxmox.com Git - qemu.git/blob - target-alpha/fpu_helper.c
Merge remote-tracking branch 'pmaydell/tags/pull-target-arm-20130820' into staging
[qemu.git] / target-alpha / fpu_helper.c
1 /*
2 * Helpers for floating point instructions.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "helper.h"
22 #include "fpu/softfloat.h"
23
24 #define FP_STATUS (env->fp_status)
25
26
27 void helper_setroundmode(CPUAlphaState *env, uint32_t val)
28 {
29 set_float_rounding_mode(val, &FP_STATUS);
30 }
31
32 void helper_setflushzero(CPUAlphaState *env, uint32_t val)
33 {
34 set_flush_to_zero(val, &FP_STATUS);
35 }
36
37 void helper_fp_exc_clear(CPUAlphaState *env)
38 {
39 set_float_exception_flags(0, &FP_STATUS);
40 }
41
42 uint32_t helper_fp_exc_get(CPUAlphaState *env)
43 {
44 return get_float_exception_flags(&FP_STATUS);
45 }
46
47 static inline void inline_fp_exc_raise(CPUAlphaState *env, uintptr_t retaddr,
48 uint32_t exc, uint32_t regno)
49 {
50 if (exc) {
51 uint32_t hw_exc = 0;
52
53 if (exc & float_flag_invalid) {
54 hw_exc |= EXC_M_INV;
55 }
56 if (exc & float_flag_divbyzero) {
57 hw_exc |= EXC_M_DZE;
58 }
59 if (exc & float_flag_overflow) {
60 hw_exc |= EXC_M_FOV;
61 }
62 if (exc & float_flag_underflow) {
63 hw_exc |= EXC_M_UNF;
64 }
65 if (exc & float_flag_inexact) {
66 hw_exc |= EXC_M_INE;
67 }
68
69 arith_excp(env, retaddr, hw_exc, 1ull << regno);
70 }
71 }
72
73 /* Raise exceptions for ieee fp insns without software completion.
74 In that case there are no exceptions that don't trap; the mask
75 doesn't apply. */
76 void helper_fp_exc_raise(CPUAlphaState *env, uint32_t exc, uint32_t regno)
77 {
78 inline_fp_exc_raise(env, GETPC(), exc, regno);
79 }
80
81 /* Raise exceptions for ieee fp insns with software completion. */
82 void helper_fp_exc_raise_s(CPUAlphaState *env, uint32_t exc, uint32_t regno)
83 {
84 if (exc) {
85 env->fpcr_exc_status |= exc;
86 exc &= ~env->fpcr_exc_mask;
87 inline_fp_exc_raise(env, GETPC(), exc, regno);
88 }
89 }
90
91 /* Input handing without software completion. Trap for all
92 non-finite numbers. */
93 void helper_ieee_input(CPUAlphaState *env, uint64_t val)
94 {
95 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
96 uint64_t frac = val & 0xfffffffffffffull;
97
98 if (exp == 0) {
99 /* Denormals without DNZ set raise an exception. */
100 if (frac != 0 && !env->fp_status.flush_inputs_to_zero) {
101 arith_excp(env, GETPC(), EXC_M_UNF, 0);
102 }
103 } else if (exp == 0x7ff) {
104 /* Infinity or NaN. */
105 /* ??? I'm not sure these exception bit flags are correct. I do
106 know that the Linux kernel, at least, doesn't rely on them and
107 just emulates the insn to figure out what exception to use. */
108 arith_excp(env, GETPC(), frac ? EXC_M_INV : EXC_M_FOV, 0);
109 }
110 }
111
112 /* Similar, but does not trap for infinities. Used for comparisons. */
113 void helper_ieee_input_cmp(CPUAlphaState *env, uint64_t val)
114 {
115 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
116 uint64_t frac = val & 0xfffffffffffffull;
117
118 if (exp == 0) {
119 /* Denormals without DNZ set raise an exception. */
120 if (frac != 0 && !env->fp_status.flush_inputs_to_zero) {
121 arith_excp(env, GETPC(), EXC_M_UNF, 0);
122 }
123 } else if (exp == 0x7ff && frac) {
124 /* NaN. */
125 arith_excp(env, GETPC(), EXC_M_INV, 0);
126 }
127 }
128
129 /* F floating (VAX) */
130 static uint64_t float32_to_f(float32 fa)
131 {
132 uint64_t r, exp, mant, sig;
133 CPU_FloatU a;
134
135 a.f = fa;
136 sig = ((uint64_t)a.l & 0x80000000) << 32;
137 exp = (a.l >> 23) & 0xff;
138 mant = ((uint64_t)a.l & 0x007fffff) << 29;
139
140 if (exp == 255) {
141 /* NaN or infinity */
142 r = 1; /* VAX dirty zero */
143 } else if (exp == 0) {
144 if (mant == 0) {
145 /* Zero */
146 r = 0;
147 } else {
148 /* Denormalized */
149 r = sig | ((exp + 1) << 52) | mant;
150 }
151 } else {
152 if (exp >= 253) {
153 /* Overflow */
154 r = 1; /* VAX dirty zero */
155 } else {
156 r = sig | ((exp + 2) << 52);
157 }
158 }
159
160 return r;
161 }
162
163 static float32 f_to_float32(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
164 {
165 uint32_t exp, mant_sig;
166 CPU_FloatU r;
167
168 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
169 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
170
171 if (unlikely(!exp && mant_sig)) {
172 /* Reserved operands / Dirty zero */
173 dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
174 }
175
176 if (exp < 3) {
177 /* Underflow */
178 r.l = 0;
179 } else {
180 r.l = ((exp - 2) << 23) | mant_sig;
181 }
182
183 return r.f;
184 }
185
186 uint32_t helper_f_to_memory(uint64_t a)
187 {
188 uint32_t r;
189 r = (a & 0x00001fffe0000000ull) >> 13;
190 r |= (a & 0x07ffe00000000000ull) >> 45;
191 r |= (a & 0xc000000000000000ull) >> 48;
192 return r;
193 }
194
195 uint64_t helper_memory_to_f(uint32_t a)
196 {
197 uint64_t r;
198 r = ((uint64_t)(a & 0x0000c000)) << 48;
199 r |= ((uint64_t)(a & 0x003fffff)) << 45;
200 r |= ((uint64_t)(a & 0xffff0000)) << 13;
201 if (!(a & 0x00004000)) {
202 r |= 0x7ll << 59;
203 }
204 return r;
205 }
206
207 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
208 either implement VAX arithmetic properly or just signal invalid opcode. */
209
210 uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b)
211 {
212 float32 fa, fb, fr;
213
214 fa = f_to_float32(env, GETPC(), a);
215 fb = f_to_float32(env, GETPC(), b);
216 fr = float32_add(fa, fb, &FP_STATUS);
217 return float32_to_f(fr);
218 }
219
220 uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b)
221 {
222 float32 fa, fb, fr;
223
224 fa = f_to_float32(env, GETPC(), a);
225 fb = f_to_float32(env, GETPC(), b);
226 fr = float32_sub(fa, fb, &FP_STATUS);
227 return float32_to_f(fr);
228 }
229
230 uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b)
231 {
232 float32 fa, fb, fr;
233
234 fa = f_to_float32(env, GETPC(), a);
235 fb = f_to_float32(env, GETPC(), b);
236 fr = float32_mul(fa, fb, &FP_STATUS);
237 return float32_to_f(fr);
238 }
239
240 uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b)
241 {
242 float32 fa, fb, fr;
243
244 fa = f_to_float32(env, GETPC(), a);
245 fb = f_to_float32(env, GETPC(), b);
246 fr = float32_div(fa, fb, &FP_STATUS);
247 return float32_to_f(fr);
248 }
249
250 uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t)
251 {
252 float32 ft, fr;
253
254 ft = f_to_float32(env, GETPC(), t);
255 fr = float32_sqrt(ft, &FP_STATUS);
256 return float32_to_f(fr);
257 }
258
259
260 /* G floating (VAX) */
261 static uint64_t float64_to_g(float64 fa)
262 {
263 uint64_t r, exp, mant, sig;
264 CPU_DoubleU a;
265
266 a.d = fa;
267 sig = a.ll & 0x8000000000000000ull;
268 exp = (a.ll >> 52) & 0x7ff;
269 mant = a.ll & 0x000fffffffffffffull;
270
271 if (exp == 2047) {
272 /* NaN or infinity */
273 r = 1; /* VAX dirty zero */
274 } else if (exp == 0) {
275 if (mant == 0) {
276 /* Zero */
277 r = 0;
278 } else {
279 /* Denormalized */
280 r = sig | ((exp + 1) << 52) | mant;
281 }
282 } else {
283 if (exp >= 2045) {
284 /* Overflow */
285 r = 1; /* VAX dirty zero */
286 } else {
287 r = sig | ((exp + 2) << 52);
288 }
289 }
290
291 return r;
292 }
293
294 static float64 g_to_float64(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
295 {
296 uint64_t exp, mant_sig;
297 CPU_DoubleU r;
298
299 exp = (a >> 52) & 0x7ff;
300 mant_sig = a & 0x800fffffffffffffull;
301
302 if (!exp && mant_sig) {
303 /* Reserved operands / Dirty zero */
304 dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
305 }
306
307 if (exp < 3) {
308 /* Underflow */
309 r.ll = 0;
310 } else {
311 r.ll = ((exp - 2) << 52) | mant_sig;
312 }
313
314 return r.d;
315 }
316
317 uint64_t helper_g_to_memory(uint64_t a)
318 {
319 uint64_t r;
320 r = (a & 0x000000000000ffffull) << 48;
321 r |= (a & 0x00000000ffff0000ull) << 16;
322 r |= (a & 0x0000ffff00000000ull) >> 16;
323 r |= (a & 0xffff000000000000ull) >> 48;
324 return r;
325 }
326
327 uint64_t helper_memory_to_g(uint64_t a)
328 {
329 uint64_t r;
330 r = (a & 0x000000000000ffffull) << 48;
331 r |= (a & 0x00000000ffff0000ull) << 16;
332 r |= (a & 0x0000ffff00000000ull) >> 16;
333 r |= (a & 0xffff000000000000ull) >> 48;
334 return r;
335 }
336
337 uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b)
338 {
339 float64 fa, fb, fr;
340
341 fa = g_to_float64(env, GETPC(), a);
342 fb = g_to_float64(env, GETPC(), b);
343 fr = float64_add(fa, fb, &FP_STATUS);
344 return float64_to_g(fr);
345 }
346
347 uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b)
348 {
349 float64 fa, fb, fr;
350
351 fa = g_to_float64(env, GETPC(), a);
352 fb = g_to_float64(env, GETPC(), b);
353 fr = float64_sub(fa, fb, &FP_STATUS);
354 return float64_to_g(fr);
355 }
356
357 uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b)
358 {
359 float64 fa, fb, fr;
360
361 fa = g_to_float64(env, GETPC(), a);
362 fb = g_to_float64(env, GETPC(), b);
363 fr = float64_mul(fa, fb, &FP_STATUS);
364 return float64_to_g(fr);
365 }
366
367 uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b)
368 {
369 float64 fa, fb, fr;
370
371 fa = g_to_float64(env, GETPC(), a);
372 fb = g_to_float64(env, GETPC(), b);
373 fr = float64_div(fa, fb, &FP_STATUS);
374 return float64_to_g(fr);
375 }
376
377 uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a)
378 {
379 float64 fa, fr;
380
381 fa = g_to_float64(env, GETPC(), a);
382 fr = float64_sqrt(fa, &FP_STATUS);
383 return float64_to_g(fr);
384 }
385
386
387 /* S floating (single) */
388
389 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
390 static inline uint64_t float32_to_s_int(uint32_t fi)
391 {
392 uint32_t frac = fi & 0x7fffff;
393 uint32_t sign = fi >> 31;
394 uint32_t exp_msb = (fi >> 30) & 1;
395 uint32_t exp_low = (fi >> 23) & 0x7f;
396 uint32_t exp;
397
398 exp = (exp_msb << 10) | exp_low;
399 if (exp_msb) {
400 if (exp_low == 0x7f) {
401 exp = 0x7ff;
402 }
403 } else {
404 if (exp_low != 0x00) {
405 exp |= 0x380;
406 }
407 }
408
409 return (((uint64_t)sign << 63)
410 | ((uint64_t)exp << 52)
411 | ((uint64_t)frac << 29));
412 }
413
414 static inline uint64_t float32_to_s(float32 fa)
415 {
416 CPU_FloatU a;
417 a.f = fa;
418 return float32_to_s_int(a.l);
419 }
420
421 static inline uint32_t s_to_float32_int(uint64_t a)
422 {
423 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
424 }
425
426 static inline float32 s_to_float32(uint64_t a)
427 {
428 CPU_FloatU r;
429 r.l = s_to_float32_int(a);
430 return r.f;
431 }
432
433 uint32_t helper_s_to_memory(uint64_t a)
434 {
435 return s_to_float32_int(a);
436 }
437
438 uint64_t helper_memory_to_s(uint32_t a)
439 {
440 return float32_to_s_int(a);
441 }
442
443 uint64_t helper_adds(CPUAlphaState *env, uint64_t a, uint64_t b)
444 {
445 float32 fa, fb, fr;
446
447 fa = s_to_float32(a);
448 fb = s_to_float32(b);
449 fr = float32_add(fa, fb, &FP_STATUS);
450 return float32_to_s(fr);
451 }
452
453 uint64_t helper_subs(CPUAlphaState *env, uint64_t a, uint64_t b)
454 {
455 float32 fa, fb, fr;
456
457 fa = s_to_float32(a);
458 fb = s_to_float32(b);
459 fr = float32_sub(fa, fb, &FP_STATUS);
460 return float32_to_s(fr);
461 }
462
463 uint64_t helper_muls(CPUAlphaState *env, uint64_t a, uint64_t b)
464 {
465 float32 fa, fb, fr;
466
467 fa = s_to_float32(a);
468 fb = s_to_float32(b);
469 fr = float32_mul(fa, fb, &FP_STATUS);
470 return float32_to_s(fr);
471 }
472
473 uint64_t helper_divs(CPUAlphaState *env, uint64_t a, uint64_t b)
474 {
475 float32 fa, fb, fr;
476
477 fa = s_to_float32(a);
478 fb = s_to_float32(b);
479 fr = float32_div(fa, fb, &FP_STATUS);
480 return float32_to_s(fr);
481 }
482
483 uint64_t helper_sqrts(CPUAlphaState *env, uint64_t a)
484 {
485 float32 fa, fr;
486
487 fa = s_to_float32(a);
488 fr = float32_sqrt(fa, &FP_STATUS);
489 return float32_to_s(fr);
490 }
491
492
493 /* T floating (double) */
494 static inline float64 t_to_float64(uint64_t a)
495 {
496 /* Memory format is the same as float64 */
497 CPU_DoubleU r;
498 r.ll = a;
499 return r.d;
500 }
501
502 static inline uint64_t float64_to_t(float64 fa)
503 {
504 /* Memory format is the same as float64 */
505 CPU_DoubleU r;
506 r.d = fa;
507 return r.ll;
508 }
509
510 uint64_t helper_addt(CPUAlphaState *env, uint64_t a, uint64_t b)
511 {
512 float64 fa, fb, fr;
513
514 fa = t_to_float64(a);
515 fb = t_to_float64(b);
516 fr = float64_add(fa, fb, &FP_STATUS);
517 return float64_to_t(fr);
518 }
519
520 uint64_t helper_subt(CPUAlphaState *env, uint64_t a, uint64_t b)
521 {
522 float64 fa, fb, fr;
523
524 fa = t_to_float64(a);
525 fb = t_to_float64(b);
526 fr = float64_sub(fa, fb, &FP_STATUS);
527 return float64_to_t(fr);
528 }
529
530 uint64_t helper_mult(CPUAlphaState *env, uint64_t a, uint64_t b)
531 {
532 float64 fa, fb, fr;
533
534 fa = t_to_float64(a);
535 fb = t_to_float64(b);
536 fr = float64_mul(fa, fb, &FP_STATUS);
537 return float64_to_t(fr);
538 }
539
540 uint64_t helper_divt(CPUAlphaState *env, uint64_t a, uint64_t b)
541 {
542 float64 fa, fb, fr;
543
544 fa = t_to_float64(a);
545 fb = t_to_float64(b);
546 fr = float64_div(fa, fb, &FP_STATUS);
547 return float64_to_t(fr);
548 }
549
550 uint64_t helper_sqrtt(CPUAlphaState *env, uint64_t a)
551 {
552 float64 fa, fr;
553
554 fa = t_to_float64(a);
555 fr = float64_sqrt(fa, &FP_STATUS);
556 return float64_to_t(fr);
557 }
558
559 /* Comparisons */
560 uint64_t helper_cmptun(CPUAlphaState *env, uint64_t a, uint64_t b)
561 {
562 float64 fa, fb;
563
564 fa = t_to_float64(a);
565 fb = t_to_float64(b);
566
567 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
568 return 0x4000000000000000ULL;
569 } else {
570 return 0;
571 }
572 }
573
574 uint64_t helper_cmpteq(CPUAlphaState *env, uint64_t a, uint64_t b)
575 {
576 float64 fa, fb;
577
578 fa = t_to_float64(a);
579 fb = t_to_float64(b);
580
581 if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
582 return 0x4000000000000000ULL;
583 } else {
584 return 0;
585 }
586 }
587
588 uint64_t helper_cmptle(CPUAlphaState *env, uint64_t a, uint64_t b)
589 {
590 float64 fa, fb;
591
592 fa = t_to_float64(a);
593 fb = t_to_float64(b);
594
595 if (float64_le(fa, fb, &FP_STATUS)) {
596 return 0x4000000000000000ULL;
597 } else {
598 return 0;
599 }
600 }
601
602 uint64_t helper_cmptlt(CPUAlphaState *env, uint64_t a, uint64_t b)
603 {
604 float64 fa, fb;
605
606 fa = t_to_float64(a);
607 fb = t_to_float64(b);
608
609 if (float64_lt(fa, fb, &FP_STATUS)) {
610 return 0x4000000000000000ULL;
611 } else {
612 return 0;
613 }
614 }
615
616 uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b)
617 {
618 float64 fa, fb;
619
620 fa = g_to_float64(env, GETPC(), a);
621 fb = g_to_float64(env, GETPC(), b);
622
623 if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
624 return 0x4000000000000000ULL;
625 } else {
626 return 0;
627 }
628 }
629
630 uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b)
631 {
632 float64 fa, fb;
633
634 fa = g_to_float64(env, GETPC(), a);
635 fb = g_to_float64(env, GETPC(), b);
636
637 if (float64_le(fa, fb, &FP_STATUS)) {
638 return 0x4000000000000000ULL;
639 } else {
640 return 0;
641 }
642 }
643
644 uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b)
645 {
646 float64 fa, fb;
647
648 fa = g_to_float64(env, GETPC(), a);
649 fb = g_to_float64(env, GETPC(), b);
650
651 if (float64_lt(fa, fb, &FP_STATUS)) {
652 return 0x4000000000000000ULL;
653 } else {
654 return 0;
655 }
656 }
657
658 /* Floating point format conversion */
659 uint64_t helper_cvtts(CPUAlphaState *env, uint64_t a)
660 {
661 float64 fa;
662 float32 fr;
663
664 fa = t_to_float64(a);
665 fr = float64_to_float32(fa, &FP_STATUS);
666 return float32_to_s(fr);
667 }
668
669 uint64_t helper_cvtst(CPUAlphaState *env, uint64_t a)
670 {
671 float32 fa;
672 float64 fr;
673
674 fa = s_to_float32(a);
675 fr = float32_to_float64(fa, &FP_STATUS);
676 return float64_to_t(fr);
677 }
678
679 uint64_t helper_cvtqs(CPUAlphaState *env, uint64_t a)
680 {
681 float32 fr = int64_to_float32(a, &FP_STATUS);
682 return float32_to_s(fr);
683 }
684
685 /* Implement float64 to uint64 conversion without saturation -- we must
686 supply the truncated result. This behaviour is used by the compiler
687 to get unsigned conversion for free with the same instruction.
688
689 The VI flag is set when overflow or inexact exceptions should be raised. */
690
691 static inline uint64_t inline_cvttq(CPUAlphaState *env, uint64_t a,
692 int roundmode, int VI)
693 {
694 uint64_t frac, ret = 0;
695 uint32_t exp, sign, exc = 0;
696 int shift;
697
698 sign = (a >> 63);
699 exp = (uint32_t)(a >> 52) & 0x7ff;
700 frac = a & 0xfffffffffffffull;
701
702 if (exp == 0) {
703 if (unlikely(frac != 0)) {
704 goto do_underflow;
705 }
706 } else if (exp == 0x7ff) {
707 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
708 } else {
709 /* Restore implicit bit. */
710 frac |= 0x10000000000000ull;
711
712 shift = exp - 1023 - 52;
713 if (shift >= 0) {
714 /* In this case the number is so large that we must shift
715 the fraction left. There is no rounding to do. */
716 if (shift < 63) {
717 ret = frac << shift;
718 if (VI && (ret >> shift) != frac) {
719 exc = float_flag_overflow;
720 }
721 }
722 } else {
723 uint64_t round;
724
725 /* In this case the number is smaller than the fraction as
726 represented by the 52 bit number. Here we must think
727 about rounding the result. Handle this by shifting the
728 fractional part of the number into the high bits of ROUND.
729 This will let us efficiently handle round-to-nearest. */
730 shift = -shift;
731 if (shift < 63) {
732 ret = frac >> shift;
733 round = frac << (64 - shift);
734 } else {
735 /* The exponent is so small we shift out everything.
736 Leave a sticky bit for proper rounding below. */
737 do_underflow:
738 round = 1;
739 }
740
741 if (round) {
742 exc = (VI ? float_flag_inexact : 0);
743 switch (roundmode) {
744 case float_round_nearest_even:
745 if (round == (1ull << 63)) {
746 /* Fraction is exactly 0.5; round to even. */
747 ret += (ret & 1);
748 } else if (round > (1ull << 63)) {
749 ret += 1;
750 }
751 break;
752 case float_round_to_zero:
753 break;
754 case float_round_up:
755 ret += 1 - sign;
756 break;
757 case float_round_down:
758 ret += sign;
759 break;
760 }
761 }
762 }
763 if (sign) {
764 ret = -ret;
765 }
766 }
767 if (unlikely(exc)) {
768 float_raise(exc, &FP_STATUS);
769 }
770
771 return ret;
772 }
773
774 uint64_t helper_cvttq(CPUAlphaState *env, uint64_t a)
775 {
776 return inline_cvttq(env, a, FP_STATUS.float_rounding_mode, 1);
777 }
778
779 uint64_t helper_cvttq_c(CPUAlphaState *env, uint64_t a)
780 {
781 return inline_cvttq(env, a, float_round_to_zero, 0);
782 }
783
784 uint64_t helper_cvttq_svic(CPUAlphaState *env, uint64_t a)
785 {
786 return inline_cvttq(env, a, float_round_to_zero, 1);
787 }
788
789 uint64_t helper_cvtqt(CPUAlphaState *env, uint64_t a)
790 {
791 float64 fr = int64_to_float64(a, &FP_STATUS);
792 return float64_to_t(fr);
793 }
794
795 uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a)
796 {
797 float32 fr = int64_to_float32(a, &FP_STATUS);
798 return float32_to_f(fr);
799 }
800
801 uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a)
802 {
803 float64 fa;
804 float32 fr;
805
806 fa = g_to_float64(env, GETPC(), a);
807 fr = float64_to_float32(fa, &FP_STATUS);
808 return float32_to_f(fr);
809 }
810
811 uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a)
812 {
813 float64 fa = g_to_float64(env, GETPC(), a);
814 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
815 }
816
817 uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
818 {
819 float64 fr;
820 fr = int64_to_float64(a, &FP_STATUS);
821 return float64_to_g(fr);
822 }