2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 if (unlikely(extract32(arg
, 23, 8) == 0xff)) {
63 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
64 ret
|= (uint64_t)0x7ff << 52;
65 ret
|= (uint64_t)extract32(arg
, 0, 23) << 29;
67 /* Normalized operand. */
68 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
69 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
70 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
73 /* Zero or Denormalized operand. */
74 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
75 if (unlikely(abs_arg
!= 0)) {
77 * Denormalized operand.
78 * Shift fraction so that the msb is in the implicit bit position.
79 * Thus, shift is in the range [1:23].
81 int shift
= clz32(abs_arg
) - 8;
83 * The first 3 terms compute the float64 exponent. We then bias
84 * this result by -1 so that we can swallow the implicit bit below.
86 int exp
= -126 - shift
+ 1023 - 1;
88 ret
|= (uint64_t)exp
<< 52;
89 ret
+= (uint64_t)abs_arg
<< (52 - 23 + shift
);
96 * This is the non-arithmatic conversion that happens e.g. on stores.
97 * In the Power ISA pseudocode, this is called SINGLE.
99 uint32_t helper_tosingle(uint64_t arg
)
101 int exp
= extract64(arg
, 52, 11);
104 if (likely(exp
> 896)) {
105 /* No denormalization required (includes Inf, NaN). */
106 ret
= extract64(arg
, 62, 2) << 30;
107 ret
|= extract64(arg
, 29, 30);
110 * Zero or Denormal result. If the exponent is in bounds for
111 * a single-precision denormal result, extract the proper
112 * bits. If the input is not zero, and the exponent is out of
113 * bounds, then the result is undefined; this underflows to
116 ret
= extract64(arg
, 63, 1) << 31;
117 if (unlikely(exp
>= 874)) {
118 /* Denormal result. */
119 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
125 static inline int ppc_float32_get_unbiased_exp(float32 f
)
127 return ((f
>> 23) & 0xFF) - 127;
130 static inline int ppc_float64_get_unbiased_exp(float64 f
)
132 return ((f
>> 52) & 0x7FF) - 1023;
135 /* Classify a floating-point number. */
146 #define COMPUTE_CLASS(tp) \
147 static int tp##_classify(tp arg) \
149 int ret = tp##_is_neg(arg) * is_neg; \
150 if (unlikely(tp##_is_any_nan(arg))) { \
151 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
152 ret |= (tp##_is_signaling_nan(arg, &dummy) \
153 ? is_snan : is_qnan); \
154 } else if (unlikely(tp##_is_infinity(arg))) { \
156 } else if (tp##_is_zero(arg)) { \
158 } else if (tp##_is_zero_or_denormal(arg)) { \
159 ret |= is_denormal; \
166 COMPUTE_CLASS(float16
)
167 COMPUTE_CLASS(float32
)
168 COMPUTE_CLASS(float64
)
169 COMPUTE_CLASS(float128
)
171 static void set_fprf_from_class(CPUPPCState
*env
, int class)
173 static const uint8_t fprf
[6][2] = {
174 { 0x04, 0x08 }, /* normalized */
175 { 0x02, 0x12 }, /* zero */
176 { 0x14, 0x18 }, /* denormalized */
177 { 0x05, 0x09 }, /* infinity */
178 { 0x11, 0x11 }, /* qnan */
179 { 0x00, 0x00 }, /* snan -- flags are undefined */
181 bool isneg
= class & is_neg
;
183 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
184 env
->fpscr
|= fprf
[ctz32(class)][isneg
] << FPSCR_FPRF
;
187 #define COMPUTE_FPRF(tp) \
188 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
190 set_fprf_from_class(env, tp##_classify(arg)); \
193 COMPUTE_FPRF(float16
)
194 COMPUTE_FPRF(float32
)
195 COMPUTE_FPRF(float64
)
196 COMPUTE_FPRF(float128
)
198 /* Floating-point invalid operations exception */
199 static void finish_invalid_op_excp(CPUPPCState
*env
, int op
, uintptr_t retaddr
)
201 /* Update the floating-point invalid operation summary */
202 env
->fpscr
|= 1 << FPSCR_VX
;
203 /* Update the floating-point exception summary */
206 /* Update the floating-point enabled exception summary */
207 env
->fpscr
|= 1 << FPSCR_FEX
;
208 if (fp_exceptions_enabled(env
)) {
209 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
210 POWERPC_EXCP_FP
| op
, retaddr
);
215 static void finish_invalid_op_arith(CPUPPCState
*env
, int op
,
216 bool set_fpcc
, uintptr_t retaddr
)
218 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
221 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
222 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
225 finish_invalid_op_excp(env
, op
, retaddr
);
229 static void float_invalid_op_vxsnan(CPUPPCState
*env
, uintptr_t retaddr
)
231 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
232 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, retaddr
);
235 /* Magnitude subtraction of infinities */
236 static void float_invalid_op_vxisi(CPUPPCState
*env
, bool set_fpcc
,
239 env
->fpscr
|= 1 << FPSCR_VXISI
;
240 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXISI
, set_fpcc
, retaddr
);
243 /* Division of infinity by infinity */
244 static void float_invalid_op_vxidi(CPUPPCState
*env
, bool set_fpcc
,
247 env
->fpscr
|= 1 << FPSCR_VXIDI
;
248 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIDI
, set_fpcc
, retaddr
);
251 /* Division of zero by zero */
252 static void float_invalid_op_vxzdz(CPUPPCState
*env
, bool set_fpcc
,
255 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
256 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXZDZ
, set_fpcc
, retaddr
);
259 /* Multiplication of zero by infinity */
260 static void float_invalid_op_vximz(CPUPPCState
*env
, bool set_fpcc
,
263 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
264 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIMZ
, set_fpcc
, retaddr
);
267 /* Square root of a negative number */
268 static void float_invalid_op_vxsqrt(CPUPPCState
*env
, bool set_fpcc
,
271 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
272 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXSQRT
, set_fpcc
, retaddr
);
275 /* Ordered comparison of NaN */
276 static void float_invalid_op_vxvc(CPUPPCState
*env
, bool set_fpcc
,
279 env
->fpscr
|= 1 << FPSCR_VXVC
;
281 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
282 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
284 /* Update the floating-point invalid operation summary */
285 env
->fpscr
|= 1 << FPSCR_VX
;
286 /* Update the floating-point exception summary */
288 /* We must update the target FPR before raising the exception */
290 CPUState
*cs
= env_cpu(env
);
292 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
293 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
294 /* Update the floating-point enabled exception summary */
295 env
->fpscr
|= 1 << FPSCR_FEX
;
296 /* Exception is differed */
300 /* Invalid conversion */
301 static void float_invalid_op_vxcvi(CPUPPCState
*env
, bool set_fpcc
,
304 env
->fpscr
|= 1 << FPSCR_VXCVI
;
305 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
308 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
309 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
312 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
, retaddr
);
315 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
317 env
->fpscr
|= 1 << FPSCR_ZX
;
318 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
319 /* Update the floating-point exception summary */
322 /* Update the floating-point enabled exception summary */
323 env
->fpscr
|= 1 << FPSCR_FEX
;
324 if (fp_exceptions_enabled(env
)) {
325 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
326 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
332 static inline void float_overflow_excp(CPUPPCState
*env
)
334 CPUState
*cs
= env_cpu(env
);
336 env
->fpscr
|= 1 << FPSCR_OX
;
337 /* Update the floating-point exception summary */
340 /* XXX: should adjust the result */
341 /* Update the floating-point enabled exception summary */
342 env
->fpscr
|= 1 << FPSCR_FEX
;
343 /* We must update the target FPR before raising the exception */
344 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
345 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
347 env
->fpscr
|= 1 << FPSCR_XX
;
348 env
->fpscr
|= 1 << FPSCR_FI
;
352 static inline void float_underflow_excp(CPUPPCState
*env
)
354 CPUState
*cs
= env_cpu(env
);
356 env
->fpscr
|= 1 << FPSCR_UX
;
357 /* Update the floating-point exception summary */
360 /* XXX: should adjust the result */
361 /* Update the floating-point enabled exception summary */
362 env
->fpscr
|= 1 << FPSCR_FEX
;
363 /* We must update the target FPR before raising the exception */
364 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
365 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
369 static inline void float_inexact_excp(CPUPPCState
*env
)
371 CPUState
*cs
= env_cpu(env
);
373 env
->fpscr
|= 1 << FPSCR_FI
;
374 env
->fpscr
|= 1 << FPSCR_XX
;
375 /* Update the floating-point exception summary */
378 /* Update the floating-point enabled exception summary */
379 env
->fpscr
|= 1 << FPSCR_FEX
;
380 /* We must update the target FPR before raising the exception */
381 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
382 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
386 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
390 /* Set rounding mode */
393 /* Best approximation (round to nearest) */
394 rnd_type
= float_round_nearest_even
;
397 /* Smaller magnitude (round toward zero) */
398 rnd_type
= float_round_to_zero
;
401 /* Round toward +infinite */
402 rnd_type
= float_round_up
;
406 /* Round toward -infinite */
407 rnd_type
= float_round_down
;
410 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
413 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
417 prev
= (env
->fpscr
>> bit
) & 1;
418 env
->fpscr
&= ~(1 << bit
);
423 fpscr_set_rounding_mode(env
);
435 /* Set VX bit to zero */
436 env
->fpscr
&= ~(1 << FPSCR_VX
);
449 /* Set the FEX bit */
450 env
->fpscr
&= ~(1 << FPSCR_FEX
);
459 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
461 CPUState
*cs
= env_cpu(env
);
464 prev
= (env
->fpscr
>> bit
) & 1;
465 env
->fpscr
|= 1 << bit
;
507 env
->fpscr
|= 1 << FPSCR_VX
;
516 env
->error_code
= POWERPC_EXCP_FP
;
518 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
521 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
524 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
527 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
530 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
533 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
536 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
539 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
542 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
550 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
557 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
564 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
571 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
577 fpscr_set_rounding_mode(env
);
582 /* Update the floating-point enabled exception summary */
583 env
->fpscr
|= 1 << FPSCR_FEX
;
584 /* We have to update Rc1 before raising the exception */
585 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
591 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
593 CPUState
*cs
= env_cpu(env
);
594 target_ulong prev
, new;
598 new = (target_ulong
)arg
;
599 new &= ~0x60000000LL
;
600 new |= prev
& 0x60000000LL
;
601 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
602 if (mask
& (1 << i
)) {
603 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
604 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
607 /* Update VX and FEX */
609 env
->fpscr
|= 1 << FPSCR_VX
;
611 env
->fpscr
&= ~(1 << FPSCR_VX
);
613 if ((fpscr_ex
& fpscr_eex
) != 0) {
614 env
->fpscr
|= 1 << FPSCR_FEX
;
615 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
616 /* XXX: we should compute it properly */
617 env
->error_code
= POWERPC_EXCP_FP
;
619 env
->fpscr
&= ~(1 << FPSCR_FEX
);
621 fpscr_set_rounding_mode(env
);
624 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
626 helper_store_fpscr(env
, arg
, mask
);
629 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
631 CPUState
*cs
= env_cpu(env
);
632 int status
= get_float_exception_flags(&env
->fp_status
);
633 bool inexact_happened
= false;
635 if (status
& float_flag_overflow
) {
636 float_overflow_excp(env
);
637 } else if (status
& float_flag_underflow
) {
638 float_underflow_excp(env
);
639 } else if (status
& float_flag_inexact
) {
640 float_inexact_excp(env
);
641 inexact_happened
= true;
644 /* if the inexact flag was not set */
645 if (inexact_happened
== false) {
646 env
->fpscr
&= ~(1 << FPSCR_FI
); /* clear the FPSCR[FI] bit */
649 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
650 (env
->error_code
& POWERPC_EXCP_FP
)) {
651 /* Differred floating-point exception after target FPR update */
652 if (fp_exceptions_enabled(env
)) {
653 raise_exception_err_ra(env
, cs
->exception_index
,
654 env
->error_code
, raddr
);
659 void helper_float_check_status(CPUPPCState
*env
)
661 do_float_check_status(env
, GETPC());
664 void helper_reset_fpstatus(CPUPPCState
*env
)
666 set_float_exception_flags(0, &env
->fp_status
);
669 static void float_invalid_op_addsub(CPUPPCState
*env
, bool set_fpcc
,
670 uintptr_t retaddr
, int classes
)
672 if ((classes
& ~is_neg
) == is_inf
) {
673 /* Magnitude subtraction of infinities */
674 float_invalid_op_vxisi(env
, set_fpcc
, retaddr
);
675 } else if (classes
& is_snan
) {
676 float_invalid_op_vxsnan(env
, retaddr
);
681 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
683 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
684 int status
= get_float_exception_flags(&env
->fp_status
);
686 if (unlikely(status
& float_flag_invalid
)) {
687 float_invalid_op_addsub(env
, 1, GETPC(),
688 float64_classify(arg1
) |
689 float64_classify(arg2
));
696 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
698 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
699 int status
= get_float_exception_flags(&env
->fp_status
);
701 if (unlikely(status
& float_flag_invalid
)) {
702 float_invalid_op_addsub(env
, 1, GETPC(),
703 float64_classify(arg1
) |
704 float64_classify(arg2
));
710 static void float_invalid_op_mul(CPUPPCState
*env
, bool set_fprc
,
711 uintptr_t retaddr
, int classes
)
713 if ((classes
& (is_zero
| is_inf
)) == (is_zero
| is_inf
)) {
714 /* Multiplication of zero by infinity */
715 float_invalid_op_vximz(env
, set_fprc
, retaddr
);
716 } else if (classes
& is_snan
) {
717 float_invalid_op_vxsnan(env
, retaddr
);
722 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
724 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
725 int status
= get_float_exception_flags(&env
->fp_status
);
727 if (unlikely(status
& float_flag_invalid
)) {
728 float_invalid_op_mul(env
, 1, GETPC(),
729 float64_classify(arg1
) |
730 float64_classify(arg2
));
736 static void float_invalid_op_div(CPUPPCState
*env
, bool set_fprc
,
737 uintptr_t retaddr
, int classes
)
740 if (classes
== is_inf
) {
741 /* Division of infinity by infinity */
742 float_invalid_op_vxidi(env
, set_fprc
, retaddr
);
743 } else if (classes
== is_zero
) {
744 /* Division of zero by zero */
745 float_invalid_op_vxzdz(env
, set_fprc
, retaddr
);
746 } else if (classes
& is_snan
) {
747 float_invalid_op_vxsnan(env
, retaddr
);
752 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
754 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
755 int status
= get_float_exception_flags(&env
->fp_status
);
757 if (unlikely(status
)) {
758 if (status
& float_flag_invalid
) {
759 float_invalid_op_div(env
, 1, GETPC(),
760 float64_classify(arg1
) |
761 float64_classify(arg2
));
763 if (status
& float_flag_divbyzero
) {
764 float_zero_divide_excp(env
, GETPC());
771 static void float_invalid_cvt(CPUPPCState
*env
, bool set_fprc
,
772 uintptr_t retaddr
, int class1
)
774 float_invalid_op_vxcvi(env
, set_fprc
, retaddr
);
775 if (class1
& is_snan
) {
776 float_invalid_op_vxsnan(env
, retaddr
);
780 #define FPU_FCTI(op, cvt, nanval) \
781 uint64_t helper_##op(CPUPPCState *env, float64 arg) \
783 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
784 int status = get_float_exception_flags(&env->fp_status); \
786 if (unlikely(status)) { \
787 if (status & float_flag_invalid) { \
788 float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
791 do_float_check_status(env, GETPC()); \
796 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
797 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
798 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
799 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
800 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
801 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
802 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
803 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
805 #define FPU_FCFI(op, cvtr, is_single) \
806 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
811 float32 tmp = cvtr(arg, &env->fp_status); \
812 farg.d = float32_to_float64(tmp, &env->fp_status); \
814 farg.d = cvtr(arg, &env->fp_status); \
816 do_float_check_status(env, GETPC()); \
820 FPU_FCFI(fcfid
, int64_to_float64
, 0)
821 FPU_FCFI(fcfids
, int64_to_float32
, 1)
822 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
823 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
825 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
832 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
834 float_invalid_op_vxsnan(env
, GETPC());
835 farg
.ll
= arg
| 0x0008000000000000ULL
;
837 int inexact
= get_float_exception_flags(&env
->fp_status
) &
839 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
840 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
841 /* Restore rounding mode from FPSCR */
842 fpscr_set_rounding_mode(env
);
844 /* fri* does not set FPSCR[XX] */
846 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
849 do_float_check_status(env
, GETPC());
853 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
855 return do_fri(env
, arg
, float_round_ties_away
);
858 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
860 return do_fri(env
, arg
, float_round_to_zero
);
863 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
865 return do_fri(env
, arg
, float_round_up
);
868 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
870 return do_fri(env
, arg
, float_round_down
);
873 #define FPU_MADDSUB_UPDATE(NAME, TP) \
874 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
875 unsigned int madd_flags, uintptr_t retaddr) \
877 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
878 TP##_is_signaling_nan(arg2, &env->fp_status) || \
879 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
880 /* sNaN operation */ \
881 float_invalid_op_vxsnan(env, retaddr); \
883 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
884 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
885 /* Multiplication of zero by infinity */ \
886 float_invalid_op_vximz(env, 1, retaddr); \
888 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
889 TP##_is_infinity(arg3)) { \
890 uint8_t aSign, bSign, cSign; \
892 aSign = TP##_is_neg(arg1); \
893 bSign = TP##_is_neg(arg2); \
894 cSign = TP##_is_neg(arg3); \
895 if (madd_flags & float_muladd_negate_c) { \
898 if (aSign ^ bSign ^ cSign) { \
899 float_invalid_op_vxisi(env, 1, retaddr); \
903 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
904 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
906 #define FPU_FMADD(op, madd_flags) \
907 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
908 uint64_t arg2, uint64_t arg3) \
911 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
913 flags = get_float_exception_flags(&env->fp_status); \
915 if (flags & float_flag_invalid) { \
916 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
917 madd_flags, GETPC()); \
919 do_float_check_status(env, GETPC()); \
925 #define MSUB_FLGS float_muladd_negate_c
926 #define NMADD_FLGS float_muladd_negate_result
927 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
929 FPU_FMADD(fmadd
, MADD_FLGS
)
930 FPU_FMADD(fnmadd
, NMADD_FLGS
)
931 FPU_FMADD(fmsub
, MSUB_FLGS
)
932 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
935 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
942 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
943 float_invalid_op_vxsnan(env
, GETPC());
945 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
946 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
952 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
954 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
955 int status
= get_float_exception_flags(&env
->fp_status
);
957 if (unlikely(status
& float_flag_invalid
)) {
958 if (unlikely(float64_is_any_nan(arg
))) {
959 if (unlikely(float64_is_signaling_nan(arg
, &env
->fp_status
))) {
960 /* sNaN square root */
961 float_invalid_op_vxsnan(env
, GETPC());
964 /* Square root of a negative nonzero number */
965 float_invalid_op_vxsqrt(env
, 1, GETPC());
973 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
975 /* "Estimate" the reciprocal with actual division. */
976 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
977 int status
= get_float_exception_flags(&env
->fp_status
);
979 if (unlikely(status
)) {
980 if (status
& float_flag_invalid
) {
981 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
982 /* sNaN reciprocal */
983 float_invalid_op_vxsnan(env
, GETPC());
986 if (status
& float_flag_divbyzero
) {
987 float_zero_divide_excp(env
, GETPC());
988 /* For FPSCR.ZE == 0, the result is 1/2. */
989 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
997 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
1004 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
1005 /* sNaN reciprocal */
1006 float_invalid_op_vxsnan(env
, GETPC());
1008 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1009 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1010 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1015 /* frsqrte - frsqrte. */
1016 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
1018 /* "Estimate" the reciprocal with actual division. */
1019 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
1020 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
1021 int status
= get_float_exception_flags(&env
->fp_status
);
1023 if (unlikely(status
)) {
1024 if (status
& float_flag_invalid
) {
1025 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
1026 /* sNaN reciprocal */
1027 float_invalid_op_vxsnan(env
, GETPC());
1029 /* Square root of a negative nonzero number */
1030 float_invalid_op_vxsqrt(env
, 1, GETPC());
1033 if (status
& float_flag_divbyzero
) {
1034 /* Reciprocal of (square root of) zero. */
1035 float_zero_divide_excp(env
, GETPC());
1043 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1050 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
1051 !float64_is_any_nan(farg1
.d
)) {
1058 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
1063 if (unlikely(float64_is_infinity(fra
) ||
1064 float64_is_infinity(frb
) ||
1065 float64_is_zero(frb
))) {
1069 int e_a
= ppc_float64_get_unbiased_exp(fra
);
1070 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1072 if (unlikely(float64_is_any_nan(fra
) ||
1073 float64_is_any_nan(frb
))) {
1075 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
1077 } else if (!float64_is_zero(fra
) &&
1078 (((e_a
- e_b
) >= 1023) ||
1079 ((e_a
- e_b
) <= -1021) ||
1084 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1085 /* XB is not zero because of the above check and */
1086 /* so must be denormalized. */
1091 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1094 uint32_t helper_ftsqrt(uint64_t frb
)
1099 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1103 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1105 if (unlikely(float64_is_any_nan(frb
))) {
1107 } else if (unlikely(float64_is_zero(frb
))) {
1109 } else if (unlikely(float64_is_neg(frb
))) {
1111 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022 + 52))) {
1115 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1116 /* XB is not zero because of the above check and */
1117 /* therefore must be denormalized. */
1122 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1125 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1128 CPU_DoubleU farg1
, farg2
;
1134 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1135 float64_is_any_nan(farg2
.d
))) {
1137 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1139 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1145 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1146 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1147 env
->crf
[crfD
] = ret
;
1148 if (unlikely(ret
== 0x01UL
1149 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1150 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1151 /* sNaN comparison */
1152 float_invalid_op_vxsnan(env
, GETPC());
1156 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1159 CPU_DoubleU farg1
, farg2
;
1165 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1166 float64_is_any_nan(farg2
.d
))) {
1168 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1170 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1176 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1177 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1178 env
->crf
[crfD
] = ret
;
1179 if (unlikely(ret
== 0x01UL
)) {
1180 float_invalid_op_vxvc(env
, 1, GETPC());
1181 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1182 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1183 /* sNaN comparison */
1184 float_invalid_op_vxsnan(env
, GETPC());
1189 /* Single-precision floating-point conversions */
1190 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1194 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1199 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1203 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1208 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1213 /* NaN are not treated the same way IEEE 754 does */
1214 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1218 return float32_to_int32(u
.f
, &env
->vec_status
);
1221 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1226 /* NaN are not treated the same way IEEE 754 does */
1227 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1231 return float32_to_uint32(u
.f
, &env
->vec_status
);
1234 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1239 /* NaN are not treated the same way IEEE 754 does */
1240 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1244 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1247 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1252 /* NaN are not treated the same way IEEE 754 does */
1253 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1257 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1260 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1265 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1266 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1267 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1272 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1277 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1278 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1279 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1284 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1290 /* NaN are not treated the same way IEEE 754 does */
1291 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1294 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1295 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1297 return float32_to_int32(u
.f
, &env
->vec_status
);
1300 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1306 /* NaN are not treated the same way IEEE 754 does */
1307 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1310 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1311 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1313 return float32_to_uint32(u
.f
, &env
->vec_status
);
1316 #define HELPER_SPE_SINGLE_CONV(name) \
1317 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1319 return e##name(env, val); \
1322 HELPER_SPE_SINGLE_CONV(fscfsi
);
1324 HELPER_SPE_SINGLE_CONV(fscfui
);
1326 HELPER_SPE_SINGLE_CONV(fscfuf
);
1328 HELPER_SPE_SINGLE_CONV(fscfsf
);
1330 HELPER_SPE_SINGLE_CONV(fsctsi
);
1332 HELPER_SPE_SINGLE_CONV(fsctui
);
1334 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1336 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1338 HELPER_SPE_SINGLE_CONV(fsctsf
);
1340 HELPER_SPE_SINGLE_CONV(fsctuf
);
1342 #define HELPER_SPE_VECTOR_CONV(name) \
1343 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1345 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1346 (uint64_t)e##name(env, val); \
1349 HELPER_SPE_VECTOR_CONV(fscfsi
);
1351 HELPER_SPE_VECTOR_CONV(fscfui
);
1353 HELPER_SPE_VECTOR_CONV(fscfuf
);
1355 HELPER_SPE_VECTOR_CONV(fscfsf
);
1357 HELPER_SPE_VECTOR_CONV(fsctsi
);
1359 HELPER_SPE_VECTOR_CONV(fsctui
);
1361 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1363 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1365 HELPER_SPE_VECTOR_CONV(fsctsf
);
1367 HELPER_SPE_VECTOR_CONV(fsctuf
);
1369 /* Single-precision floating-point arithmetic */
1370 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1376 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1380 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1386 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1390 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1396 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1400 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1406 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1410 #define HELPER_SPE_SINGLE_ARITH(name) \
1411 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1413 return e##name(env, op1, op2); \
1416 HELPER_SPE_SINGLE_ARITH(fsadd
);
1418 HELPER_SPE_SINGLE_ARITH(fssub
);
1420 HELPER_SPE_SINGLE_ARITH(fsmul
);
1422 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1424 #define HELPER_SPE_VECTOR_ARITH(name) \
1425 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1427 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1428 (uint64_t)e##name(env, op1, op2); \
1431 HELPER_SPE_VECTOR_ARITH(fsadd
);
1433 HELPER_SPE_VECTOR_ARITH(fssub
);
1435 HELPER_SPE_VECTOR_ARITH(fsmul
);
1437 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1439 /* Single-precision floating-point comparisons */
1440 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1446 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1449 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1455 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1458 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1464 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1467 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1469 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1470 return efscmplt(env
, op1
, op2
);
1473 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1475 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1476 return efscmpgt(env
, op1
, op2
);
1479 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1481 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1482 return efscmpeq(env
, op1
, op2
);
1485 #define HELPER_SINGLE_SPE_CMP(name) \
1486 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1488 return e##name(env, op1, op2); \
1491 HELPER_SINGLE_SPE_CMP(fststlt
);
1493 HELPER_SINGLE_SPE_CMP(fststgt
);
1495 HELPER_SINGLE_SPE_CMP(fststeq
);
1497 HELPER_SINGLE_SPE_CMP(fscmplt
);
1499 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1501 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1503 static inline uint32_t evcmp_merge(int t0
, int t1
)
1505 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1508 #define HELPER_VECTOR_SPE_CMP(name) \
1509 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1511 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1512 e##name(env, op1, op2)); \
1515 HELPER_VECTOR_SPE_CMP(fststlt
);
1517 HELPER_VECTOR_SPE_CMP(fststgt
);
1519 HELPER_VECTOR_SPE_CMP(fststeq
);
1521 HELPER_VECTOR_SPE_CMP(fscmplt
);
1523 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1525 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1527 /* Double-precision floating-point conversion */
1528 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1532 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1537 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1541 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1546 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1550 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1555 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1559 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1564 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1569 /* NaN are not treated the same way IEEE 754 does */
1570 if (unlikely(float64_is_any_nan(u
.d
))) {
1574 return float64_to_int32(u
.d
, &env
->vec_status
);
1577 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1582 /* NaN are not treated the same way IEEE 754 does */
1583 if (unlikely(float64_is_any_nan(u
.d
))) {
1587 return float64_to_uint32(u
.d
, &env
->vec_status
);
1590 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1595 /* NaN are not treated the same way IEEE 754 does */
1596 if (unlikely(float64_is_any_nan(u
.d
))) {
1600 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1603 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1608 /* NaN are not treated the same way IEEE 754 does */
1609 if (unlikely(float64_is_any_nan(u
.d
))) {
1613 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1616 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1621 /* NaN are not treated the same way IEEE 754 does */
1622 if (unlikely(float64_is_any_nan(u
.d
))) {
1626 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1629 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1634 /* NaN are not treated the same way IEEE 754 does */
1635 if (unlikely(float64_is_any_nan(u
.d
))) {
1639 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1642 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1647 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1648 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1649 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1654 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1659 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1660 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1661 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1666 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1672 /* NaN are not treated the same way IEEE 754 does */
1673 if (unlikely(float64_is_any_nan(u
.d
))) {
1676 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1677 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1679 return float64_to_int32(u
.d
, &env
->vec_status
);
1682 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1688 /* NaN are not treated the same way IEEE 754 does */
1689 if (unlikely(float64_is_any_nan(u
.d
))) {
1692 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1693 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1695 return float64_to_uint32(u
.d
, &env
->vec_status
);
1698 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1704 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1709 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1715 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1720 /* Double precision fixed-point arithmetic */
1721 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1727 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1731 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1737 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1741 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1747 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1751 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1757 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1761 /* Double precision floating point helpers */
1762 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1768 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1771 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1777 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1780 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1786 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1789 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1791 /* XXX: TODO: test special values (NaN, infinites, ...) */
1792 return helper_efdtstlt(env
, op1
, op2
);
1795 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1797 /* XXX: TODO: test special values (NaN, infinites, ...) */
1798 return helper_efdtstgt(env
, op1
, op2
);
1801 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1803 /* XXX: TODO: test special values (NaN, infinites, ...) */
1804 return helper_efdtsteq(env
, op1
, op2
);
1807 #define float64_to_float64(x, env) x
1811 * VSX_ADD_SUB - VSX floating point add/subract
1812 * name - instruction mnemonic
1813 * op - operation (add or sub)
1814 * nels - number of elements (1, 2 or 4)
1815 * tp - type (float32 or float64)
1816 * fld - vsr_t field (VsrD(*) or VsrW(*))
1819 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1820 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1821 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1823 ppc_vsr_t t = *xt; \
1826 helper_reset_fpstatus(env); \
1828 for (i = 0; i < nels; i++) { \
1829 float_status tstat = env->fp_status; \
1830 set_float_exception_flags(0, &tstat); \
1831 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1832 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1834 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1835 float_invalid_op_addsub(env, sfprf, GETPC(), \
1836 tp##_classify(xa->fld) | \
1837 tp##_classify(xb->fld)); \
1841 t.fld = helper_frsp(env, t.fld); \
1845 helper_compute_fprf_float64(env, t.fld); \
1849 do_float_check_status(env, GETPC()); \
1852 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1853 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1854 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1855 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1856 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1857 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1858 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1859 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1861 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
,
1862 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1867 helper_reset_fpstatus(env
);
1869 tstat
= env
->fp_status
;
1870 if (unlikely(Rc(opcode
) != 0)) {
1871 tstat
.float_rounding_mode
= float_round_to_odd
;
1874 set_float_exception_flags(0, &tstat
);
1875 t
.f128
= float128_add(xa
->f128
, xb
->f128
, &tstat
);
1876 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1878 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1879 float_invalid_op_addsub(env
, 1, GETPC(),
1880 float128_classify(xa
->f128
) |
1881 float128_classify(xb
->f128
));
1884 helper_compute_fprf_float128(env
, t
.f128
);
1887 do_float_check_status(env
, GETPC());
1891 * VSX_MUL - VSX floating point multiply
1892 * op - instruction mnemonic
1893 * nels - number of elements (1, 2 or 4)
1894 * tp - type (float32 or float64)
1895 * fld - vsr_t field (VsrD(*) or VsrW(*))
1898 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1899 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1900 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1902 ppc_vsr_t t = *xt; \
1905 helper_reset_fpstatus(env); \
1907 for (i = 0; i < nels; i++) { \
1908 float_status tstat = env->fp_status; \
1909 set_float_exception_flags(0, &tstat); \
1910 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1911 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1913 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1914 float_invalid_op_mul(env, sfprf, GETPC(), \
1915 tp##_classify(xa->fld) | \
1916 tp##_classify(xb->fld)); \
1920 t.fld = helper_frsp(env, t.fld); \
1924 helper_compute_fprf_float64(env, t.fld); \
1929 do_float_check_status(env, GETPC()); \
1932 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1933 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1934 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1935 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1937 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
,
1938 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1943 helper_reset_fpstatus(env
);
1944 tstat
= env
->fp_status
;
1945 if (unlikely(Rc(opcode
) != 0)) {
1946 tstat
.float_rounding_mode
= float_round_to_odd
;
1949 set_float_exception_flags(0, &tstat
);
1950 t
.f128
= float128_mul(xa
->f128
, xb
->f128
, &tstat
);
1951 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1953 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1954 float_invalid_op_mul(env
, 1, GETPC(),
1955 float128_classify(xa
->f128
) |
1956 float128_classify(xb
->f128
));
1958 helper_compute_fprf_float128(env
, t
.f128
);
1961 do_float_check_status(env
, GETPC());
1965 * VSX_DIV - VSX floating point divide
1966 * op - instruction mnemonic
1967 * nels - number of elements (1, 2 or 4)
1968 * tp - type (float32 or float64)
1969 * fld - vsr_t field (VsrD(*) or VsrW(*))
1972 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1973 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1974 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1976 ppc_vsr_t t = *xt; \
1979 helper_reset_fpstatus(env); \
1981 for (i = 0; i < nels; i++) { \
1982 float_status tstat = env->fp_status; \
1983 set_float_exception_flags(0, &tstat); \
1984 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1985 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1987 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1988 float_invalid_op_div(env, sfprf, GETPC(), \
1989 tp##_classify(xa->fld) | \
1990 tp##_classify(xb->fld)); \
1992 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1993 float_zero_divide_excp(env, GETPC()); \
1997 t.fld = helper_frsp(env, t.fld); \
2001 helper_compute_fprf_float64(env, t.fld); \
2006 do_float_check_status(env, GETPC()); \
2009 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
2010 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
2011 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
2012 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
2014 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
,
2015 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2020 helper_reset_fpstatus(env
);
2021 tstat
= env
->fp_status
;
2022 if (unlikely(Rc(opcode
) != 0)) {
2023 tstat
.float_rounding_mode
= float_round_to_odd
;
2026 set_float_exception_flags(0, &tstat
);
2027 t
.f128
= float128_div(xa
->f128
, xb
->f128
, &tstat
);
2028 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2030 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
2031 float_invalid_op_div(env
, 1, GETPC(),
2032 float128_classify(xa
->f128
) |
2033 float128_classify(xb
->f128
));
2035 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
2036 float_zero_divide_excp(env
, GETPC());
2039 helper_compute_fprf_float128(env
, t
.f128
);
2041 do_float_check_status(env
, GETPC());
2045 * VSX_RE - VSX floating point reciprocal estimate
2046 * op - instruction mnemonic
2047 * nels - number of elements (1, 2 or 4)
2048 * tp - type (float32 or float64)
2049 * fld - vsr_t field (VsrD(*) or VsrW(*))
2052 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
2053 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2055 ppc_vsr_t t = *xt; \
2058 helper_reset_fpstatus(env); \
2060 for (i = 0; i < nels; i++) { \
2061 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2062 float_invalid_op_vxsnan(env, GETPC()); \
2064 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
2067 t.fld = helper_frsp(env, t.fld); \
2071 helper_compute_fprf_float64(env, t.fld); \
2076 do_float_check_status(env, GETPC()); \
2079 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2080 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2081 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2082 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2085 * VSX_SQRT - VSX floating point square root
2086 * op - instruction mnemonic
2087 * nels - number of elements (1, 2 or 4)
2088 * tp - type (float32 or float64)
2089 * fld - vsr_t field (VsrD(*) or VsrW(*))
2092 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2093 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2095 ppc_vsr_t t = *xt; \
2098 helper_reset_fpstatus(env); \
2100 for (i = 0; i < nels; i++) { \
2101 float_status tstat = env->fp_status; \
2102 set_float_exception_flags(0, &tstat); \
2103 t.fld = tp##_sqrt(xb->fld, &tstat); \
2104 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2106 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2107 if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
2108 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2109 } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
2110 float_invalid_op_vxsnan(env, GETPC()); \
2115 t.fld = helper_frsp(env, t.fld); \
2119 helper_compute_fprf_float64(env, t.fld); \
2124 do_float_check_status(env, GETPC()); \
2127 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2128 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2129 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2130 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2133 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2134 * op - instruction mnemonic
2135 * nels - number of elements (1, 2 or 4)
2136 * tp - type (float32 or float64)
2137 * fld - vsr_t field (VsrD(*) or VsrW(*))
2140 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2141 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2143 ppc_vsr_t t = *xt; \
2146 helper_reset_fpstatus(env); \
2148 for (i = 0; i < nels; i++) { \
2149 float_status tstat = env->fp_status; \
2150 set_float_exception_flags(0, &tstat); \
2151 t.fld = tp##_sqrt(xb->fld, &tstat); \
2152 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
2153 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2155 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2156 if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
2157 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2158 } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
2159 float_invalid_op_vxsnan(env, GETPC()); \
2164 t.fld = helper_frsp(env, t.fld); \
2168 helper_compute_fprf_float64(env, t.fld); \
2173 do_float_check_status(env, GETPC()); \
2176 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2177 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2178 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2179 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2182 * VSX_TDIV - VSX floating point test for divide
2183 * op - instruction mnemonic
2184 * nels - number of elements (1, 2 or 4)
2185 * tp - type (float32 or float64)
2186 * fld - vsr_t field (VsrD(*) or VsrW(*))
2187 * emin - minimum unbiased exponent
2188 * emax - maximum unbiased exponent
2189 * nbits - number of fraction bits
2191 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2192 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2193 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2199 for (i = 0; i < nels; i++) { \
2200 if (unlikely(tp##_is_infinity(xa->fld) || \
2201 tp##_is_infinity(xb->fld) || \
2202 tp##_is_zero(xb->fld))) { \
2206 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
2207 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2209 if (unlikely(tp##_is_any_nan(xa->fld) || \
2210 tp##_is_any_nan(xb->fld))) { \
2212 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2214 } else if (!tp##_is_zero(xa->fld) && \
2215 (((e_a - e_b) >= emax) || \
2216 ((e_a - e_b) <= (emin + 1)) || \
2217 (e_a <= (emin + nbits)))) { \
2221 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2223 * XB is not zero because of the above check and so \
2224 * must be denormalized. \
2231 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2234 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2235 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2236 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2239 * VSX_TSQRT - VSX floating point test for square root
2240 * op - instruction mnemonic
2241 * nels - number of elements (1, 2 or 4)
2242 * tp - type (float32 or float64)
2243 * fld - vsr_t field (VsrD(*) or VsrW(*))
2244 * emin - minimum unbiased exponent
2245 * emax - maximum unbiased exponent
2246 * nbits - number of fraction bits
2248 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2249 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2255 for (i = 0; i < nels; i++) { \
2256 if (unlikely(tp##_is_infinity(xb->fld) || \
2257 tp##_is_zero(xb->fld))) { \
2261 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2263 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2265 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2267 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2269 } else if (!tp##_is_zero(xb->fld) && \
2270 (e_b <= (emin + nbits))) { \
2274 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2276 * XB is not zero because of the above check and \
2277 * therefore must be denormalized. \
2284 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2287 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2288 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2289 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2292 * VSX_MADD - VSX floating point muliply/add variations
2293 * op - instruction mnemonic
2294 * nels - number of elements (1, 2 or 4)
2295 * tp - type (float32 or float64)
2296 * fld - vsr_t field (VsrD(*) or VsrW(*))
2297 * maddflgs - flags for the float*muladd routine that control the
2298 * various forms (madd, msub, nmadd, nmsub)
2301 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
2302 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2303 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
2305 ppc_vsr_t t = *xt; \
2308 helper_reset_fpstatus(env); \
2310 for (i = 0; i < nels; i++) { \
2311 float_status tstat = env->fp_status; \
2312 set_float_exception_flags(0, &tstat); \
2313 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2315 * Avoid double rounding errors by rounding the intermediate \
2318 set_float_rounding_mode(float_round_to_zero, &tstat); \
2319 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2320 maddflgs, &tstat); \
2321 t.fld |= (get_float_exception_flags(&tstat) & \
2322 float_flag_inexact) != 0; \
2324 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2325 maddflgs, &tstat); \
2327 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2329 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2330 tp##_maddsub_update_excp(env, xa->fld, b->fld, \
2331 c->fld, maddflgs, GETPC()); \
2335 t.fld = helper_frsp(env, t.fld); \
2339 helper_compute_fprf_float64(env, t.fld); \
2343 do_float_check_status(env, GETPC()); \
2346 VSX_MADD(xsmadddp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 0)
2347 VSX_MADD(xsmsubdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 0)
2348 VSX_MADD(xsnmadddp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 0)
2349 VSX_MADD(xsnmsubdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 0)
2350 VSX_MADD(xsmaddsp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1)
2351 VSX_MADD(xsmsubsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1)
2352 VSX_MADD(xsnmaddsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1)
2353 VSX_MADD(xsnmsubsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1)
2355 VSX_MADD(xvmadddp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0)
2356 VSX_MADD(xvmsubdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0)
2357 VSX_MADD(xvnmadddp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0)
2358 VSX_MADD(xvnmsubdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0)
2360 VSX_MADD(xvmaddsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0)
2361 VSX_MADD(xvmsubsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0)
2362 VSX_MADD(xvnmaddsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0)
2363 VSX_MADD(xvnmsubsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0)
2366 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2367 * op - instruction mnemonic
2368 * cmp - comparison operation
2369 * exp - expected result of comparison
2370 * svxvc - set VXVC bit
2372 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2373 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2374 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2376 ppc_vsr_t t = *xt; \
2377 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2379 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2380 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2381 vxsnan_flag = true; \
2382 if (fpscr_ve == 0 && svxvc) { \
2385 } else if (svxvc) { \
2386 vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
2387 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
2389 if (vxsnan_flag) { \
2390 float_invalid_op_vxsnan(env, GETPC()); \
2393 float_invalid_op_vxvc(env, 0, GETPC()); \
2395 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2398 if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
2399 &env->fp_status) == exp) { \
2408 do_float_check_status(env, GETPC()); \
2411 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2412 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2413 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2414 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2416 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
,
2417 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2419 int64_t exp_a
, exp_b
;
2422 exp_a
= extract64(xa
->VsrD(0), 52, 11);
2423 exp_b
= extract64(xb
->VsrD(0), 52, 11);
2425 if (unlikely(float64_is_any_nan(xa
->VsrD(0)) ||
2426 float64_is_any_nan(xb
->VsrD(0)))) {
2429 if (exp_a
< exp_b
) {
2431 } else if (exp_a
> exp_b
) {
2438 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2439 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2440 env
->crf
[BF(opcode
)] = cc
;
2442 do_float_check_status(env
, GETPC());
2445 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
,
2446 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2448 int64_t exp_a
, exp_b
;
2451 exp_a
= extract64(xa
->VsrD(0), 48, 15);
2452 exp_b
= extract64(xb
->VsrD(0), 48, 15);
2454 if (unlikely(float128_is_any_nan(xa
->f128
) ||
2455 float128_is_any_nan(xb
->f128
))) {
2458 if (exp_a
< exp_b
) {
2460 } else if (exp_a
> exp_b
) {
2467 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2468 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2469 env
->crf
[BF(opcode
)] = cc
;
2471 do_float_check_status(env
, GETPC());
2474 #define VSX_SCALAR_CMP(op, ordered) \
2475 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2476 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2479 bool vxsnan_flag = false, vxvc_flag = false; \
2481 helper_reset_fpstatus(env); \
2483 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2484 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2485 vxsnan_flag = true; \
2487 if (fpscr_ve == 0 && ordered) { \
2490 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
2491 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { \
2497 if (vxsnan_flag) { \
2498 float_invalid_op_vxsnan(env, GETPC()); \
2501 float_invalid_op_vxvc(env, 0, GETPC()); \
2504 if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
2506 } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
2512 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2513 env->fpscr |= cc << FPSCR_FPRF; \
2514 env->crf[BF(opcode)] = cc; \
2516 do_float_check_status(env, GETPC()); \
2519 VSX_SCALAR_CMP(xscmpodp
, 1)
2520 VSX_SCALAR_CMP(xscmpudp
, 0)
2522 #define VSX_SCALAR_CMPQ(op, ordered) \
2523 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2524 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2527 bool vxsnan_flag = false, vxvc_flag = false; \
2529 helper_reset_fpstatus(env); \
2531 if (float128_is_signaling_nan(xa->f128, &env->fp_status) || \
2532 float128_is_signaling_nan(xb->f128, &env->fp_status)) { \
2533 vxsnan_flag = true; \
2535 if (fpscr_ve == 0 && ordered) { \
2538 } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || \
2539 float128_is_quiet_nan(xb->f128, &env->fp_status)) { \
2545 if (vxsnan_flag) { \
2546 float_invalid_op_vxsnan(env, GETPC()); \
2549 float_invalid_op_vxvc(env, 0, GETPC()); \
2552 if (float128_lt(xa->f128, xb->f128, &env->fp_status)) { \
2554 } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) { \
2560 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2561 env->fpscr |= cc << FPSCR_FPRF; \
2562 env->crf[BF(opcode)] = cc; \
2564 do_float_check_status(env, GETPC()); \
2567 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2568 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2571 * VSX_MAX_MIN - VSX floating point maximum/minimum
2572 * name - instruction mnemonic
2573 * op - operation (max or min)
2574 * nels - number of elements (1, 2 or 4)
2575 * tp - type (float32 or float64)
2576 * fld - vsr_t field (VsrD(*) or VsrW(*))
2578 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2579 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2580 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2582 ppc_vsr_t t = *xt; \
2585 for (i = 0; i < nels; i++) { \
2586 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2587 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2588 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2589 float_invalid_op_vxsnan(env, GETPC()); \
2594 do_float_check_status(env, GETPC()); \
2597 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2598 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2599 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2600 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2601 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2602 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2604 #define VSX_MAX_MINC(name, max) \
2605 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2606 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2608 ppc_vsr_t t = *xt; \
2609 bool vxsnan_flag = false, vex_flag = false; \
2611 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
2612 float64_is_any_nan(xb->VsrD(0)))) { \
2613 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2614 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2615 vxsnan_flag = true; \
2617 t.VsrD(0) = xb->VsrD(0); \
2618 } else if ((max && \
2619 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2621 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2622 t.VsrD(0) = xa->VsrD(0); \
2624 t.VsrD(0) = xb->VsrD(0); \
2627 vex_flag = fpscr_ve & vxsnan_flag; \
2628 if (vxsnan_flag) { \
2629 float_invalid_op_vxsnan(env, GETPC()); \
2636 VSX_MAX_MINC(xsmaxcdp, 1);
2637 VSX_MAX_MINC(xsmincdp
, 0);
2639 #define VSX_MAX_MINJ(name, max) \
2640 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2641 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2643 ppc_vsr_t t = *xt; \
2644 bool vxsnan_flag = false, vex_flag = false; \
2646 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2647 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2648 vxsnan_flag = true; \
2650 t.VsrD(0) = xa->VsrD(0); \
2651 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2652 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2653 vxsnan_flag = true; \
2655 t.VsrD(0) = xb->VsrD(0); \
2656 } else if (float64_is_zero(xa->VsrD(0)) && \
2657 float64_is_zero(xb->VsrD(0))) { \
2659 if (!float64_is_neg(xa->VsrD(0)) || \
2660 !float64_is_neg(xb->VsrD(0))) { \
2663 t.VsrD(0) = 0x8000000000000000ULL; \
2666 if (float64_is_neg(xa->VsrD(0)) || \
2667 float64_is_neg(xb->VsrD(0))) { \
2668 t.VsrD(0) = 0x8000000000000000ULL; \
2673 } else if ((max && \
2674 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2676 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2677 t.VsrD(0) = xa->VsrD(0); \
2679 t.VsrD(0) = xb->VsrD(0); \
2682 vex_flag = fpscr_ve & vxsnan_flag; \
2683 if (vxsnan_flag) { \
2684 float_invalid_op_vxsnan(env, GETPC()); \
2691 VSX_MAX_MINJ(xsmaxjdp, 1);
2692 VSX_MAX_MINJ(xsminjdp
, 0);
2695 * VSX_CMP - VSX floating point compare
2696 * op - instruction mnemonic
2697 * nels - number of elements (1, 2 or 4)
2698 * tp - type (float32 or float64)
2699 * fld - vsr_t field (VsrD(*) or VsrW(*))
2700 * cmp - comparison operation
2701 * svxvc - set VXVC bit
2702 * exp - expected result of comparison
2704 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2705 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2706 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2708 ppc_vsr_t t = *xt; \
2709 uint32_t crf6 = 0; \
2712 int all_false = 1; \
2714 for (i = 0; i < nels; i++) { \
2715 if (unlikely(tp##_is_any_nan(xa->fld) || \
2716 tp##_is_any_nan(xb->fld))) { \
2717 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2718 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2719 float_invalid_op_vxsnan(env, GETPC()); \
2722 float_invalid_op_vxvc(env, 0, GETPC()); \
2727 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2738 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2742 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2743 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2744 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2745 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2746 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2747 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2748 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2749 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2752 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2753 * op - instruction mnemonic
2754 * nels - number of elements (1, 2 or 4)
2755 * stp - source type (float32 or float64)
2756 * ttp - target type (float32 or float64)
2757 * sfld - source vsr_t field
2758 * tfld - target vsr_t field (f32 or f64)
2761 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2762 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2764 ppc_vsr_t t = *xt; \
2767 for (i = 0; i < nels; i++) { \
2768 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2769 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2770 &env->fp_status))) { \
2771 float_invalid_op_vxsnan(env, GETPC()); \
2772 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2775 helper_compute_fprf_##ttp(env, t.tfld); \
2780 do_float_check_status(env, GETPC()); \
2783 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2784 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2785 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2 * i
), 0)
2786 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2 * i
), VsrD(i
), 0)
2789 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2790 * op - instruction mnemonic
2791 * nels - number of elements (1, 2 or 4)
2792 * stp - source type (float32 or float64)
2793 * ttp - target type (float32 or float64)
2794 * sfld - source vsr_t field
2795 * tfld - target vsr_t field (f32 or f64)
2798 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2799 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2800 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2802 ppc_vsr_t t = *xt; \
2805 for (i = 0; i < nels; i++) { \
2806 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2807 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2808 &env->fp_status))) { \
2809 float_invalid_op_vxsnan(env, GETPC()); \
2810 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2813 helper_compute_fprf_##ttp(env, t.tfld); \
2818 do_float_check_status(env, GETPC()); \
2821 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2824 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2825 * involving one half precision value
2826 * op - instruction mnemonic
2827 * nels - number of elements (1, 2 or 4)
2830 * sfld - source vsr_t field
2831 * tfld - target vsr_t field
2834 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2835 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2837 ppc_vsr_t t = { }; \
2840 for (i = 0; i < nels; i++) { \
2841 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2842 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2843 &env->fp_status))) { \
2844 float_invalid_op_vxsnan(env, GETPC()); \
2845 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2848 helper_compute_fprf_##ttp(env, t.tfld); \
2853 do_float_check_status(env, GETPC()); \
2856 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2857 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2858 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2859 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2862 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2863 * added to this later.
2865 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
,
2866 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
2871 tstat
= env
->fp_status
;
2872 if (unlikely(Rc(opcode
) != 0)) {
2873 tstat
.float_rounding_mode
= float_round_to_odd
;
2876 t
.VsrD(0) = float128_to_float64(xb
->f128
, &tstat
);
2877 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2878 if (unlikely(float128_is_signaling_nan(xb
->f128
, &tstat
))) {
2879 float_invalid_op_vxsnan(env
, GETPC());
2880 t
.VsrD(0) = float64_snan_to_qnan(t
.VsrD(0));
2882 helper_compute_fprf_float64(env
, t
.VsrD(0));
2885 do_float_check_status(env
, GETPC());
2888 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2892 float_status tstat
= env
->fp_status
;
2893 set_float_exception_flags(0, &tstat
);
2895 result
= (uint64_t)float64_to_float32(xb
, &tstat
);
2896 /* hardware replicates result to both words of the doubleword result. */
2897 return (result
<< 32) | result
;
2900 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2902 float_status tstat
= env
->fp_status
;
2903 set_float_exception_flags(0, &tstat
);
2905 return float32_to_float64(xb
>> 32, &tstat
);
2909 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2910 * op - instruction mnemonic
2911 * nels - number of elements (1, 2 or 4)
2912 * stp - source type (float32 or float64)
2913 * ttp - target type (int32, uint32, int64 or uint64)
2914 * sfld - source vsr_t field
2915 * tfld - target vsr_t field
2916 * rnan - resulting NaN
2918 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2919 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2921 int all_flags = env->fp_status.float_exception_flags, flags; \
2922 ppc_vsr_t t = *xt; \
2925 for (i = 0; i < nels; i++) { \
2926 env->fp_status.float_exception_flags = 0; \
2927 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2928 flags = env->fp_status.float_exception_flags; \
2929 if (unlikely(flags & float_flag_invalid)) { \
2930 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
2933 all_flags |= flags; \
2937 env->fp_status.float_exception_flags = all_flags; \
2938 do_float_check_status(env, GETPC()); \
2941 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2942 0x8000000000000000ULL
)
2943 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2945 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2946 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2947 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2948 0x8000000000000000ULL
)
2949 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2 * i
), \
2951 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2952 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2 * i
), 0U)
2953 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2 * i
), VsrD(i
), \
2954 0x8000000000000000ULL
)
2955 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2956 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2 * i
), VsrD(i
), 0ULL)
2957 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2960 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2961 * op - instruction mnemonic
2962 * stp - source type (float32 or float64)
2963 * ttp - target type (int32, uint32, int64 or uint64)
2964 * sfld - source vsr_t field
2965 * tfld - target vsr_t field
2966 * rnan - resulting NaN
2968 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2969 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2970 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2972 ppc_vsr_t t = { }; \
2974 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2975 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2976 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
2981 do_float_check_status(env, GETPC()); \
2984 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2985 0x8000000000000000ULL
)
2987 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2988 0xffffffff80000000ULL
)
2989 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2990 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2993 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2994 * op - instruction mnemonic
2995 * nels - number of elements (1, 2 or 4)
2996 * stp - source type (int32, uint32, int64 or uint64)
2997 * ttp - target type (float32 or float64)
2998 * sfld - source vsr_t field
2999 * tfld - target vsr_t field
3000 * jdef - definition of the j index (i or 2*i)
3003 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3004 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3006 ppc_vsr_t t = *xt; \
3009 for (i = 0; i < nels; i++) { \
3010 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3012 t.tfld = helper_frsp(env, t.tfld); \
3015 helper_compute_fprf_float64(env, t.tfld); \
3020 do_float_check_status(env, GETPC()); \
3023 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3024 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3025 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3026 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3027 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3028 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3029 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3030 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3031 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
3032 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
3033 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3034 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3037 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3038 * op - instruction mnemonic
3039 * stp - source type (int32, uint32, int64 or uint64)
3040 * ttp - target type (float32 or float64)
3041 * sfld - source vsr_t field
3042 * tfld - target vsr_t field
3044 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3045 void helper_##op(CPUPPCState *env, uint32_t opcode, \
3046 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3048 ppc_vsr_t t = *xt; \
3050 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3051 helper_compute_fprf_##ttp(env, t.tfld); \
3054 do_float_check_status(env, GETPC()); \
3057 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3058 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3061 * For "use current rounding mode", define a value that will not be
3062 * one of the existing rounding model enums.
3064 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3065 float_round_up + float_round_to_zero)
3068 * VSX_ROUND - VSX floating point round
3069 * op - instruction mnemonic
3070 * nels - number of elements (1, 2 or 4)
3071 * tp - type (float32 or float64)
3072 * fld - vsr_t field (VsrD(*) or VsrW(*))
3073 * rmode - rounding mode
3076 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3077 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3079 ppc_vsr_t t = *xt; \
3082 if (rmode != FLOAT_ROUND_CURRENT) { \
3083 set_float_rounding_mode(rmode, &env->fp_status); \
3086 for (i = 0; i < nels; i++) { \
3087 if (unlikely(tp##_is_signaling_nan(xb->fld, \
3088 &env->fp_status))) { \
3089 float_invalid_op_vxsnan(env, GETPC()); \
3090 t.fld = tp##_snan_to_qnan(xb->fld); \
3092 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3095 helper_compute_fprf_float64(env, t.fld); \
3100 * If this is not a "use current rounding mode" instruction, \
3101 * then inhibit setting of the XX bit and restore rounding \
3104 if (rmode != FLOAT_ROUND_CURRENT) { \
3105 fpscr_set_rounding_mode(env); \
3106 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3110 do_float_check_status(env, GETPC()); \
3113 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3114 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3115 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3116 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3117 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3119 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3120 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3121 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3122 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3123 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3125 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3126 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3127 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3128 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3129 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3131 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3133 helper_reset_fpstatus(env
);
3135 uint64_t xt
= helper_frsp(env
, xb
);
3137 helper_compute_fprf_float64(env
, xt
);
3138 do_float_check_status(env
, GETPC());
3142 #define VSX_XXPERM(op, indexed) \
3143 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
3144 ppc_vsr_t *xa, ppc_vsr_t *pcv) \
3146 ppc_vsr_t t = *xt; \
3149 for (i = 0; i < 16; i++) { \
3150 idx = pcv->VsrB(i) & 0x1F; \
3154 t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
3155 : xt->VsrB(idx - 16); \
3160 VSX_XXPERM(xxperm
, 0)
3161 VSX_XXPERM(xxpermr
, 1)
3163 void helper_xvxsigsp(CPUPPCState
*env
, ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3166 uint32_t exp
, i
, fraction
;
3168 for (i
= 0; i
< 4; i
++) {
3169 exp
= (xb
->VsrW(i
) >> 23) & 0xFF;
3170 fraction
= xb
->VsrW(i
) & 0x7FFFFF;
3171 if (exp
!= 0 && exp
!= 255) {
3172 t
.VsrW(i
) = fraction
| 0x00800000;
3174 t
.VsrW(i
) = fraction
;
3181 * VSX_TEST_DC - VSX floating point test data class
3182 * op - instruction mnemonic
3183 * nels - number of elements (1, 2 or 4)
3184 * xbn - VSR register number
3185 * tp - type (float32 or float64)
3186 * fld - vsr_t field (VsrD(*) or VsrW(*))
3187 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3188 * fld_max - target field max
3189 * scrf - set result in CR and FPCC
3191 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3192 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3194 ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
3195 ppc_vsr_t *xb = &env->vsr[xbn]; \
3196 ppc_vsr_t t = { }; \
3197 uint32_t i, sign, dcmx; \
3198 uint32_t cc, match = 0; \
3201 dcmx = DCMX_XV(opcode); \
3204 dcmx = DCMX(opcode); \
3207 for (i = 0; i < nels; i++) { \
3208 sign = tp##_is_neg(xb->fld); \
3209 if (tp##_is_any_nan(xb->fld)) { \
3210 match = extract32(dcmx, 6, 1); \
3211 } else if (tp##_is_infinity(xb->fld)) { \
3212 match = extract32(dcmx, 4 + !sign, 1); \
3213 } else if (tp##_is_zero(xb->fld)) { \
3214 match = extract32(dcmx, 2 + !sign, 1); \
3215 } else if (tp##_is_zero_or_denormal(xb->fld)) { \
3216 match = extract32(dcmx, 0 + !sign, 1); \
3220 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3221 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3222 env->fpscr |= cc << FPSCR_FPRF; \
3223 env->crf[BF(opcode)] = cc; \
3225 t.tfld = match ? fld_max : 0; \
3234 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3235 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3236 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3237 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3239 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xb
)
3241 uint32_t dcmx
, sign
, exp
;
3242 uint32_t cc
, match
= 0, not_sp
= 0;
3244 dcmx
= DCMX(opcode
);
3245 exp
= (xb
->VsrD(0) >> 52) & 0x7FF;
3247 sign
= float64_is_neg(xb
->VsrD(0));
3248 if (float64_is_any_nan(xb
->VsrD(0))) {
3249 match
= extract32(dcmx
, 6, 1);
3250 } else if (float64_is_infinity(xb
->VsrD(0))) {
3251 match
= extract32(dcmx
, 4 + !sign
, 1);
3252 } else if (float64_is_zero(xb
->VsrD(0))) {
3253 match
= extract32(dcmx
, 2 + !sign
, 1);
3254 } else if (float64_is_zero_or_denormal(xb
->VsrD(0)) ||
3255 (exp
> 0 && exp
< 0x381)) {
3256 match
= extract32(dcmx
, 0 + !sign
, 1);
3259 not_sp
= !float64_eq(xb
->VsrD(0),
3261 float64_to_float32(xb
->VsrD(0), &env
->fp_status
),
3262 &env
->fp_status
), &env
->fp_status
);
3264 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3265 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3266 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3267 env
->crf
[BF(opcode
)] = cc
;
3270 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
,
3271 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3274 uint8_t r
= Rrm(opcode
);
3275 uint8_t ex
= Rc(opcode
);
3276 uint8_t rmc
= RMC(opcode
);
3280 helper_reset_fpstatus(env
);
3282 if (r
== 0 && rmc
== 0) {
3283 rmode
= float_round_ties_away
;
3284 } else if (r
== 0 && rmc
== 0x3) {
3286 } else if (r
== 1) {
3289 rmode
= float_round_nearest_even
;
3292 rmode
= float_round_to_zero
;
3295 rmode
= float_round_up
;
3298 rmode
= float_round_down
;
3305 tstat
= env
->fp_status
;
3306 set_float_exception_flags(0, &tstat
);
3307 set_float_rounding_mode(rmode
, &tstat
);
3308 t
.f128
= float128_round_to_int(xb
->f128
, &tstat
);
3309 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3311 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3312 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3313 float_invalid_op_vxsnan(env
, GETPC());
3314 t
.f128
= float128_snan_to_qnan(t
.f128
);
3318 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3319 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3322 helper_compute_fprf_float128(env
, t
.f128
);
3323 do_float_check_status(env
, GETPC());
3327 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
,
3328 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3331 uint8_t r
= Rrm(opcode
);
3332 uint8_t rmc
= RMC(opcode
);
3337 helper_reset_fpstatus(env
);
3339 if (r
== 0 && rmc
== 0) {
3340 rmode
= float_round_ties_away
;
3341 } else if (r
== 0 && rmc
== 0x3) {
3343 } else if (r
== 1) {
3346 rmode
= float_round_nearest_even
;
3349 rmode
= float_round_to_zero
;
3352 rmode
= float_round_up
;
3355 rmode
= float_round_down
;
3362 tstat
= env
->fp_status
;
3363 set_float_exception_flags(0, &tstat
);
3364 set_float_rounding_mode(rmode
, &tstat
);
3365 round_res
= float128_to_floatx80(xb
->f128
, &tstat
);
3366 t
.f128
= floatx80_to_float128(round_res
, &tstat
);
3367 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3369 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3370 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3371 float_invalid_op_vxsnan(env
, GETPC());
3372 t
.f128
= float128_snan_to_qnan(t
.f128
);
3376 helper_compute_fprf_float128(env
, t
.f128
);
3378 do_float_check_status(env
, GETPC());
3381 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
,
3382 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3387 helper_reset_fpstatus(env
);
3389 tstat
= env
->fp_status
;
3390 if (unlikely(Rc(opcode
) != 0)) {
3391 tstat
.float_rounding_mode
= float_round_to_odd
;
3394 set_float_exception_flags(0, &tstat
);
3395 t
.f128
= float128_sqrt(xb
->f128
, &tstat
);
3396 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3398 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3399 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3400 float_invalid_op_vxsnan(env
, GETPC());
3401 t
.f128
= float128_snan_to_qnan(xb
->f128
);
3402 } else if (float128_is_quiet_nan(xb
->f128
, &tstat
)) {
3404 } else if (float128_is_neg(xb
->f128
) && !float128_is_zero(xb
->f128
)) {
3405 float_invalid_op_vxsqrt(env
, 1, GETPC());
3406 t
.f128
= float128_default_nan(&env
->fp_status
);
3410 helper_compute_fprf_float128(env
, t
.f128
);
3412 do_float_check_status(env
, GETPC());
3415 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
,
3416 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
3421 helper_reset_fpstatus(env
);
3423 tstat
= env
->fp_status
;
3424 if (unlikely(Rc(opcode
) != 0)) {
3425 tstat
.float_rounding_mode
= float_round_to_odd
;
3428 set_float_exception_flags(0, &tstat
);
3429 t
.f128
= float128_sub(xa
->f128
, xb
->f128
, &tstat
);
3430 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3432 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3433 float_invalid_op_addsub(env
, 1, GETPC(),
3434 float128_classify(xa
->f128
) |
3435 float128_classify(xb
->f128
));
3438 helper_compute_fprf_float128(env
, t
.f128
);
3440 do_float_check_status(env
, GETPC());