2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 /* Normalized operand, or Inf, or NaN. */
62 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
63 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
64 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
66 /* Zero or Denormalized operand. */
67 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
68 if (unlikely(abs_arg
!= 0)) {
69 /* Denormalized operand. */
70 int shift
= clz32(abs_arg
) - 9;
71 int exp
= -126 - shift
+ 1023;
72 ret
|= (uint64_t)exp
<< 52;
73 ret
|= abs_arg
<< (shift
+ 29);
80 * This is the non-arithmatic conversion that happens e.g. on stores.
81 * In the Power ISA pseudocode, this is called SINGLE.
83 uint32_t helper_tosingle(uint64_t arg
)
85 int exp
= extract64(arg
, 52, 11);
88 if (likely(exp
> 896)) {
89 /* No denormalization required (includes Inf, NaN). */
90 ret
= extract64(arg
, 62, 2) << 30;
91 ret
|= extract64(arg
, 29, 30);
94 * Zero or Denormal result. If the exponent is in bounds for
95 * a single-precision denormal result, extract the proper
96 * bits. If the input is not zero, and the exponent is out of
97 * bounds, then the result is undefined; this underflows to
100 ret
= extract64(arg
, 63, 1) << 31;
101 if (unlikely(exp
>= 874)) {
102 /* Denormal result. */
103 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
109 static inline int ppc_float32_get_unbiased_exp(float32 f
)
111 return ((f
>> 23) & 0xFF) - 127;
114 static inline int ppc_float64_get_unbiased_exp(float64 f
)
116 return ((f
>> 52) & 0x7FF) - 1023;
119 /* Classify a floating-point number. */
130 #define COMPUTE_CLASS(tp) \
131 static int tp##_classify(tp arg) \
133 int ret = tp##_is_neg(arg) * is_neg; \
134 if (unlikely(tp##_is_any_nan(arg))) { \
135 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
136 ret |= (tp##_is_signaling_nan(arg, &dummy) \
137 ? is_snan : is_qnan); \
138 } else if (unlikely(tp##_is_infinity(arg))) { \
140 } else if (tp##_is_zero(arg)) { \
142 } else if (tp##_is_zero_or_denormal(arg)) { \
143 ret |= is_denormal; \
150 COMPUTE_CLASS(float16
)
151 COMPUTE_CLASS(float32
)
152 COMPUTE_CLASS(float64
)
153 COMPUTE_CLASS(float128
)
155 static void set_fprf_from_class(CPUPPCState
*env
, int class)
157 static const uint8_t fprf
[6][2] = {
158 { 0x04, 0x08 }, /* normalized */
159 { 0x02, 0x12 }, /* zero */
160 { 0x14, 0x18 }, /* denormalized */
161 { 0x05, 0x09 }, /* infinity */
162 { 0x11, 0x11 }, /* qnan */
163 { 0x00, 0x00 }, /* snan -- flags are undefined */
165 bool isneg
= class & is_neg
;
167 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
168 env
->fpscr
|= fprf
[ctz32(class)][isneg
] << FPSCR_FPRF
;
171 #define COMPUTE_FPRF(tp) \
172 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
174 set_fprf_from_class(env, tp##_classify(arg)); \
177 COMPUTE_FPRF(float16
)
178 COMPUTE_FPRF(float32
)
179 COMPUTE_FPRF(float64
)
180 COMPUTE_FPRF(float128
)
182 /* Floating-point invalid operations exception */
183 static void finish_invalid_op_excp(CPUPPCState
*env
, int op
, uintptr_t retaddr
)
185 /* Update the floating-point invalid operation summary */
186 env
->fpscr
|= 1 << FPSCR_VX
;
187 /* Update the floating-point exception summary */
190 /* Update the floating-point enabled exception summary */
191 env
->fpscr
|= 1 << FPSCR_FEX
;
192 if (fp_exceptions_enabled(env
)) {
193 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
194 POWERPC_EXCP_FP
| op
, retaddr
);
199 static void finish_invalid_op_arith(CPUPPCState
*env
, int op
,
200 bool set_fpcc
, uintptr_t retaddr
)
202 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
205 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
206 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
209 finish_invalid_op_excp(env
, op
, retaddr
);
213 static void float_invalid_op_vxsnan(CPUPPCState
*env
, uintptr_t retaddr
)
215 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
216 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, retaddr
);
219 /* Magnitude subtraction of infinities */
220 static void float_invalid_op_vxisi(CPUPPCState
*env
, bool set_fpcc
,
223 env
->fpscr
|= 1 << FPSCR_VXISI
;
224 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXISI
, set_fpcc
, retaddr
);
227 /* Division of infinity by infinity */
228 static void float_invalid_op_vxidi(CPUPPCState
*env
, bool set_fpcc
,
231 env
->fpscr
|= 1 << FPSCR_VXIDI
;
232 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIDI
, set_fpcc
, retaddr
);
235 /* Division of zero by zero */
236 static void float_invalid_op_vxzdz(CPUPPCState
*env
, bool set_fpcc
,
239 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
240 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXZDZ
, set_fpcc
, retaddr
);
243 /* Multiplication of zero by infinity */
244 static void float_invalid_op_vximz(CPUPPCState
*env
, bool set_fpcc
,
247 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
248 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIMZ
, set_fpcc
, retaddr
);
251 /* Square root of a negative number */
252 static void float_invalid_op_vxsqrt(CPUPPCState
*env
, bool set_fpcc
,
255 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
256 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXSQRT
, set_fpcc
, retaddr
);
259 /* Ordered comparison of NaN */
260 static void float_invalid_op_vxvc(CPUPPCState
*env
, bool set_fpcc
,
263 env
->fpscr
|= 1 << FPSCR_VXVC
;
265 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
266 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
268 /* Update the floating-point invalid operation summary */
269 env
->fpscr
|= 1 << FPSCR_VX
;
270 /* Update the floating-point exception summary */
272 /* We must update the target FPR before raising the exception */
274 CPUState
*cs
= env_cpu(env
);
276 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
277 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
278 /* Update the floating-point enabled exception summary */
279 env
->fpscr
|= 1 << FPSCR_FEX
;
280 /* Exception is differed */
284 /* Invalid conversion */
285 static void float_invalid_op_vxcvi(CPUPPCState
*env
, bool set_fpcc
,
288 env
->fpscr
|= 1 << FPSCR_VXCVI
;
289 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
292 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
293 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
296 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
, retaddr
);
299 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
301 env
->fpscr
|= 1 << FPSCR_ZX
;
302 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
303 /* Update the floating-point exception summary */
306 /* Update the floating-point enabled exception summary */
307 env
->fpscr
|= 1 << FPSCR_FEX
;
308 if (fp_exceptions_enabled(env
)) {
309 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
310 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
316 static inline void float_overflow_excp(CPUPPCState
*env
)
318 CPUState
*cs
= env_cpu(env
);
320 env
->fpscr
|= 1 << FPSCR_OX
;
321 /* Update the floating-point exception summary */
324 /* XXX: should adjust the result */
325 /* Update the floating-point enabled exception summary */
326 env
->fpscr
|= 1 << FPSCR_FEX
;
327 /* We must update the target FPR before raising the exception */
328 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
329 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
331 env
->fpscr
|= 1 << FPSCR_XX
;
332 env
->fpscr
|= 1 << FPSCR_FI
;
336 static inline void float_underflow_excp(CPUPPCState
*env
)
338 CPUState
*cs
= env_cpu(env
);
340 env
->fpscr
|= 1 << FPSCR_UX
;
341 /* Update the floating-point exception summary */
344 /* XXX: should adjust the result */
345 /* Update the floating-point enabled exception summary */
346 env
->fpscr
|= 1 << FPSCR_FEX
;
347 /* We must update the target FPR before raising the exception */
348 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
349 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
353 static inline void float_inexact_excp(CPUPPCState
*env
)
355 CPUState
*cs
= env_cpu(env
);
357 env
->fpscr
|= 1 << FPSCR_FI
;
358 env
->fpscr
|= 1 << FPSCR_XX
;
359 /* Update the floating-point exception summary */
362 /* Update the floating-point enabled exception summary */
363 env
->fpscr
|= 1 << FPSCR_FEX
;
364 /* We must update the target FPR before raising the exception */
365 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
366 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
370 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
374 /* Set rounding mode */
377 /* Best approximation (round to nearest) */
378 rnd_type
= float_round_nearest_even
;
381 /* Smaller magnitude (round toward zero) */
382 rnd_type
= float_round_to_zero
;
385 /* Round toward +infinite */
386 rnd_type
= float_round_up
;
390 /* Round toward -infinite */
391 rnd_type
= float_round_down
;
394 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
397 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
401 prev
= (env
->fpscr
>> bit
) & 1;
402 env
->fpscr
&= ~(1 << bit
);
407 fpscr_set_rounding_mode(env
);
419 /* Set VX bit to zero */
420 env
->fpscr
&= ~(1 << FPSCR_VX
);
433 /* Set the FEX bit */
434 env
->fpscr
&= ~(1 << FPSCR_FEX
);
443 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
445 CPUState
*cs
= env_cpu(env
);
448 prev
= (env
->fpscr
>> bit
) & 1;
449 env
->fpscr
|= 1 << bit
;
491 env
->fpscr
|= 1 << FPSCR_VX
;
500 env
->error_code
= POWERPC_EXCP_FP
;
502 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
505 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
508 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
511 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
514 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
517 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
520 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
523 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
526 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
534 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
541 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
548 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
555 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
561 fpscr_set_rounding_mode(env
);
566 /* Update the floating-point enabled exception summary */
567 env
->fpscr
|= 1 << FPSCR_FEX
;
568 /* We have to update Rc1 before raising the exception */
569 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
575 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
577 CPUState
*cs
= env_cpu(env
);
578 target_ulong prev
, new;
582 new = (target_ulong
)arg
;
583 new &= ~0x60000000LL
;
584 new |= prev
& 0x60000000LL
;
585 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
586 if (mask
& (1 << i
)) {
587 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
588 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
591 /* Update VX and FEX */
593 env
->fpscr
|= 1 << FPSCR_VX
;
595 env
->fpscr
&= ~(1 << FPSCR_VX
);
597 if ((fpscr_ex
& fpscr_eex
) != 0) {
598 env
->fpscr
|= 1 << FPSCR_FEX
;
599 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
600 /* XXX: we should compute it properly */
601 env
->error_code
= POWERPC_EXCP_FP
;
603 env
->fpscr
&= ~(1 << FPSCR_FEX
);
605 fpscr_set_rounding_mode(env
);
608 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
610 helper_store_fpscr(env
, arg
, mask
);
613 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
615 CPUState
*cs
= env_cpu(env
);
616 int status
= get_float_exception_flags(&env
->fp_status
);
617 bool inexact_happened
= false;
619 if (status
& float_flag_overflow
) {
620 float_overflow_excp(env
);
621 } else if (status
& float_flag_underflow
) {
622 float_underflow_excp(env
);
623 } else if (status
& float_flag_inexact
) {
624 float_inexact_excp(env
);
625 inexact_happened
= true;
628 /* if the inexact flag was not set */
629 if (inexact_happened
== false) {
630 env
->fpscr
&= ~(1 << FPSCR_FI
); /* clear the FPSCR[FI] bit */
633 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
634 (env
->error_code
& POWERPC_EXCP_FP
)) {
635 /* Differred floating-point exception after target FPR update */
636 if (fp_exceptions_enabled(env
)) {
637 raise_exception_err_ra(env
, cs
->exception_index
,
638 env
->error_code
, raddr
);
643 void helper_float_check_status(CPUPPCState
*env
)
645 do_float_check_status(env
, GETPC());
648 void helper_reset_fpstatus(CPUPPCState
*env
)
650 set_float_exception_flags(0, &env
->fp_status
);
653 static void float_invalid_op_addsub(CPUPPCState
*env
, bool set_fpcc
,
654 uintptr_t retaddr
, int classes
)
656 if ((classes
& ~is_neg
) == is_inf
) {
657 /* Magnitude subtraction of infinities */
658 float_invalid_op_vxisi(env
, set_fpcc
, retaddr
);
659 } else if (classes
& is_snan
) {
660 float_invalid_op_vxsnan(env
, retaddr
);
665 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
667 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
668 int status
= get_float_exception_flags(&env
->fp_status
);
670 if (unlikely(status
& float_flag_invalid
)) {
671 float_invalid_op_addsub(env
, 1, GETPC(),
672 float64_classify(arg1
) |
673 float64_classify(arg2
));
680 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
682 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
683 int status
= get_float_exception_flags(&env
->fp_status
);
685 if (unlikely(status
& float_flag_invalid
)) {
686 float_invalid_op_addsub(env
, 1, GETPC(),
687 float64_classify(arg1
) |
688 float64_classify(arg2
));
694 static void float_invalid_op_mul(CPUPPCState
*env
, bool set_fprc
,
695 uintptr_t retaddr
, int classes
)
697 if ((classes
& (is_zero
| is_inf
)) == (is_zero
| is_inf
)) {
698 /* Multiplication of zero by infinity */
699 float_invalid_op_vximz(env
, set_fprc
, retaddr
);
700 } else if (classes
& is_snan
) {
701 float_invalid_op_vxsnan(env
, retaddr
);
706 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
708 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
709 int status
= get_float_exception_flags(&env
->fp_status
);
711 if (unlikely(status
& float_flag_invalid
)) {
712 float_invalid_op_mul(env
, 1, GETPC(),
713 float64_classify(arg1
) |
714 float64_classify(arg2
));
720 static void float_invalid_op_div(CPUPPCState
*env
, bool set_fprc
,
721 uintptr_t retaddr
, int classes
)
724 if (classes
== is_inf
) {
725 /* Division of infinity by infinity */
726 float_invalid_op_vxidi(env
, set_fprc
, retaddr
);
727 } else if (classes
== is_zero
) {
728 /* Division of zero by zero */
729 float_invalid_op_vxzdz(env
, set_fprc
, retaddr
);
730 } else if (classes
& is_snan
) {
731 float_invalid_op_vxsnan(env
, retaddr
);
736 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
738 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
739 int status
= get_float_exception_flags(&env
->fp_status
);
741 if (unlikely(status
)) {
742 if (status
& float_flag_invalid
) {
743 float_invalid_op_div(env
, 1, GETPC(),
744 float64_classify(arg1
) |
745 float64_classify(arg2
));
747 if (status
& float_flag_divbyzero
) {
748 float_zero_divide_excp(env
, GETPC());
755 static void float_invalid_cvt(CPUPPCState
*env
, bool set_fprc
,
756 uintptr_t retaddr
, int class1
)
758 float_invalid_op_vxcvi(env
, set_fprc
, retaddr
);
759 if (class1
& is_snan
) {
760 float_invalid_op_vxsnan(env
, retaddr
);
764 #define FPU_FCTI(op, cvt, nanval) \
765 uint64_t helper_##op(CPUPPCState *env, float64 arg) \
767 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
768 int status = get_float_exception_flags(&env->fp_status); \
770 if (unlikely(status)) { \
771 if (status & float_flag_invalid) { \
772 float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
775 do_float_check_status(env, GETPC()); \
780 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
781 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
782 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
783 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
784 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
785 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
786 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
787 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
789 #define FPU_FCFI(op, cvtr, is_single) \
790 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
795 float32 tmp = cvtr(arg, &env->fp_status); \
796 farg.d = float32_to_float64(tmp, &env->fp_status); \
798 farg.d = cvtr(arg, &env->fp_status); \
800 do_float_check_status(env, GETPC()); \
804 FPU_FCFI(fcfid
, int64_to_float64
, 0)
805 FPU_FCFI(fcfids
, int64_to_float32
, 1)
806 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
807 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
809 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
816 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
818 float_invalid_op_vxsnan(env
, GETPC());
819 farg
.ll
= arg
| 0x0008000000000000ULL
;
821 int inexact
= get_float_exception_flags(&env
->fp_status
) &
823 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
824 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
825 /* Restore rounding mode from FPSCR */
826 fpscr_set_rounding_mode(env
);
828 /* fri* does not set FPSCR[XX] */
830 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
833 do_float_check_status(env
, GETPC());
837 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
839 return do_fri(env
, arg
, float_round_ties_away
);
842 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
844 return do_fri(env
, arg
, float_round_to_zero
);
847 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
849 return do_fri(env
, arg
, float_round_up
);
852 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
854 return do_fri(env
, arg
, float_round_down
);
857 #define FPU_MADDSUB_UPDATE(NAME, TP) \
858 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
859 unsigned int madd_flags, uintptr_t retaddr) \
861 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
862 TP##_is_signaling_nan(arg2, &env->fp_status) || \
863 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
864 /* sNaN operation */ \
865 float_invalid_op_vxsnan(env, retaddr); \
867 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
868 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
869 /* Multiplication of zero by infinity */ \
870 float_invalid_op_vximz(env, 1, retaddr); \
872 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
873 TP##_is_infinity(arg3)) { \
874 uint8_t aSign, bSign, cSign; \
876 aSign = TP##_is_neg(arg1); \
877 bSign = TP##_is_neg(arg2); \
878 cSign = TP##_is_neg(arg3); \
879 if (madd_flags & float_muladd_negate_c) { \
882 if (aSign ^ bSign ^ cSign) { \
883 float_invalid_op_vxisi(env, 1, retaddr); \
887 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
888 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
890 #define FPU_FMADD(op, madd_flags) \
891 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
892 uint64_t arg2, uint64_t arg3) \
895 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
897 flags = get_float_exception_flags(&env->fp_status); \
899 if (flags & float_flag_invalid) { \
900 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
901 madd_flags, GETPC()); \
903 do_float_check_status(env, GETPC()); \
909 #define MSUB_FLGS float_muladd_negate_c
910 #define NMADD_FLGS float_muladd_negate_result
911 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
913 FPU_FMADD(fmadd
, MADD_FLGS
)
914 FPU_FMADD(fnmadd
, NMADD_FLGS
)
915 FPU_FMADD(fmsub
, MSUB_FLGS
)
916 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
919 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
926 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
927 float_invalid_op_vxsnan(env
, GETPC());
929 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
930 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
936 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
938 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
939 int status
= get_float_exception_flags(&env
->fp_status
);
941 if (unlikely(status
& float_flag_invalid
)) {
942 if (unlikely(float64_is_any_nan(arg
))) {
943 if (unlikely(float64_is_signaling_nan(arg
, &env
->fp_status
))) {
944 /* sNaN square root */
945 float_invalid_op_vxsnan(env
, GETPC());
948 /* Square root of a negative nonzero number */
949 float_invalid_op_vxsqrt(env
, 1, GETPC());
957 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
959 /* "Estimate" the reciprocal with actual division. */
960 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
961 int status
= get_float_exception_flags(&env
->fp_status
);
963 if (unlikely(status
)) {
964 if (status
& float_flag_invalid
) {
965 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
966 /* sNaN reciprocal */
967 float_invalid_op_vxsnan(env
, GETPC());
970 if (status
& float_flag_divbyzero
) {
971 float_zero_divide_excp(env
, GETPC());
972 /* For FPSCR.ZE == 0, the result is 1/2. */
973 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
981 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
988 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
989 /* sNaN reciprocal */
990 float_invalid_op_vxsnan(env
, GETPC());
992 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
993 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
994 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
999 /* frsqrte - frsqrte. */
1000 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
1002 /* "Estimate" the reciprocal with actual division. */
1003 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
1004 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
1005 int status
= get_float_exception_flags(&env
->fp_status
);
1007 if (unlikely(status
)) {
1008 if (status
& float_flag_invalid
) {
1009 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
1010 /* sNaN reciprocal */
1011 float_invalid_op_vxsnan(env
, GETPC());
1013 /* Square root of a negative nonzero number */
1014 float_invalid_op_vxsqrt(env
, 1, GETPC());
1017 if (status
& float_flag_divbyzero
) {
1018 /* Reciprocal of (square root of) zero. */
1019 float_zero_divide_excp(env
, GETPC());
1027 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1034 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
1035 !float64_is_any_nan(farg1
.d
)) {
1042 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
1047 if (unlikely(float64_is_infinity(fra
) ||
1048 float64_is_infinity(frb
) ||
1049 float64_is_zero(frb
))) {
1053 int e_a
= ppc_float64_get_unbiased_exp(fra
);
1054 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1056 if (unlikely(float64_is_any_nan(fra
) ||
1057 float64_is_any_nan(frb
))) {
1059 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
1061 } else if (!float64_is_zero(fra
) &&
1062 (((e_a
- e_b
) >= 1023) ||
1063 ((e_a
- e_b
) <= -1021) ||
1068 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1069 /* XB is not zero because of the above check and */
1070 /* so must be denormalized. */
1075 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1078 uint32_t helper_ftsqrt(uint64_t frb
)
1083 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1087 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1089 if (unlikely(float64_is_any_nan(frb
))) {
1091 } else if (unlikely(float64_is_zero(frb
))) {
1093 } else if (unlikely(float64_is_neg(frb
))) {
1095 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022 + 52))) {
1099 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1100 /* XB is not zero because of the above check and */
1101 /* therefore must be denormalized. */
1106 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1109 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1112 CPU_DoubleU farg1
, farg2
;
1118 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1119 float64_is_any_nan(farg2
.d
))) {
1121 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1123 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1129 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1130 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1131 env
->crf
[crfD
] = ret
;
1132 if (unlikely(ret
== 0x01UL
1133 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1134 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1135 /* sNaN comparison */
1136 float_invalid_op_vxsnan(env
, GETPC());
1140 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1143 CPU_DoubleU farg1
, farg2
;
1149 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1150 float64_is_any_nan(farg2
.d
))) {
1152 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1154 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1160 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1161 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1162 env
->crf
[crfD
] = ret
;
1163 if (unlikely(ret
== 0x01UL
)) {
1164 float_invalid_op_vxvc(env
, 1, GETPC());
1165 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1166 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1167 /* sNaN comparison */
1168 float_invalid_op_vxsnan(env
, GETPC());
1173 /* Single-precision floating-point conversions */
1174 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1178 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1183 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1187 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1192 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1197 /* NaN are not treated the same way IEEE 754 does */
1198 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1202 return float32_to_int32(u
.f
, &env
->vec_status
);
1205 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1210 /* NaN are not treated the same way IEEE 754 does */
1211 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1215 return float32_to_uint32(u
.f
, &env
->vec_status
);
1218 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1223 /* NaN are not treated the same way IEEE 754 does */
1224 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1228 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1231 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1236 /* NaN are not treated the same way IEEE 754 does */
1237 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1241 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1244 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1249 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1250 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1251 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1256 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1261 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1262 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1263 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1268 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1274 /* NaN are not treated the same way IEEE 754 does */
1275 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1278 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1279 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1281 return float32_to_int32(u
.f
, &env
->vec_status
);
1284 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1290 /* NaN are not treated the same way IEEE 754 does */
1291 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1294 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1295 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1297 return float32_to_uint32(u
.f
, &env
->vec_status
);
1300 #define HELPER_SPE_SINGLE_CONV(name) \
1301 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1303 return e##name(env, val); \
1306 HELPER_SPE_SINGLE_CONV(fscfsi
);
1308 HELPER_SPE_SINGLE_CONV(fscfui
);
1310 HELPER_SPE_SINGLE_CONV(fscfuf
);
1312 HELPER_SPE_SINGLE_CONV(fscfsf
);
1314 HELPER_SPE_SINGLE_CONV(fsctsi
);
1316 HELPER_SPE_SINGLE_CONV(fsctui
);
1318 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1320 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1322 HELPER_SPE_SINGLE_CONV(fsctsf
);
1324 HELPER_SPE_SINGLE_CONV(fsctuf
);
1326 #define HELPER_SPE_VECTOR_CONV(name) \
1327 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1329 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1330 (uint64_t)e##name(env, val); \
1333 HELPER_SPE_VECTOR_CONV(fscfsi
);
1335 HELPER_SPE_VECTOR_CONV(fscfui
);
1337 HELPER_SPE_VECTOR_CONV(fscfuf
);
1339 HELPER_SPE_VECTOR_CONV(fscfsf
);
1341 HELPER_SPE_VECTOR_CONV(fsctsi
);
1343 HELPER_SPE_VECTOR_CONV(fsctui
);
1345 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1347 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1349 HELPER_SPE_VECTOR_CONV(fsctsf
);
1351 HELPER_SPE_VECTOR_CONV(fsctuf
);
1353 /* Single-precision floating-point arithmetic */
1354 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1360 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1364 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1370 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1374 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1380 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1384 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1390 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1394 #define HELPER_SPE_SINGLE_ARITH(name) \
1395 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1397 return e##name(env, op1, op2); \
1400 HELPER_SPE_SINGLE_ARITH(fsadd
);
1402 HELPER_SPE_SINGLE_ARITH(fssub
);
1404 HELPER_SPE_SINGLE_ARITH(fsmul
);
1406 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1408 #define HELPER_SPE_VECTOR_ARITH(name) \
1409 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1411 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1412 (uint64_t)e##name(env, op1, op2); \
1415 HELPER_SPE_VECTOR_ARITH(fsadd
);
1417 HELPER_SPE_VECTOR_ARITH(fssub
);
1419 HELPER_SPE_VECTOR_ARITH(fsmul
);
1421 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1423 /* Single-precision floating-point comparisons */
1424 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1430 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1433 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1439 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1442 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1448 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1451 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1453 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1454 return efscmplt(env
, op1
, op2
);
1457 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1459 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1460 return efscmpgt(env
, op1
, op2
);
1463 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1465 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1466 return efscmpeq(env
, op1
, op2
);
1469 #define HELPER_SINGLE_SPE_CMP(name) \
1470 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1472 return e##name(env, op1, op2); \
1475 HELPER_SINGLE_SPE_CMP(fststlt
);
1477 HELPER_SINGLE_SPE_CMP(fststgt
);
1479 HELPER_SINGLE_SPE_CMP(fststeq
);
1481 HELPER_SINGLE_SPE_CMP(fscmplt
);
1483 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1485 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1487 static inline uint32_t evcmp_merge(int t0
, int t1
)
1489 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1492 #define HELPER_VECTOR_SPE_CMP(name) \
1493 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1495 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1496 e##name(env, op1, op2)); \
1499 HELPER_VECTOR_SPE_CMP(fststlt
);
1501 HELPER_VECTOR_SPE_CMP(fststgt
);
1503 HELPER_VECTOR_SPE_CMP(fststeq
);
1505 HELPER_VECTOR_SPE_CMP(fscmplt
);
1507 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1509 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1511 /* Double-precision floating-point conversion */
1512 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1516 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1521 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1525 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1530 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1534 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1539 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1543 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1548 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1553 /* NaN are not treated the same way IEEE 754 does */
1554 if (unlikely(float64_is_any_nan(u
.d
))) {
1558 return float64_to_int32(u
.d
, &env
->vec_status
);
1561 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1566 /* NaN are not treated the same way IEEE 754 does */
1567 if (unlikely(float64_is_any_nan(u
.d
))) {
1571 return float64_to_uint32(u
.d
, &env
->vec_status
);
1574 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1579 /* NaN are not treated the same way IEEE 754 does */
1580 if (unlikely(float64_is_any_nan(u
.d
))) {
1584 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1587 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1592 /* NaN are not treated the same way IEEE 754 does */
1593 if (unlikely(float64_is_any_nan(u
.d
))) {
1597 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1600 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1605 /* NaN are not treated the same way IEEE 754 does */
1606 if (unlikely(float64_is_any_nan(u
.d
))) {
1610 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1613 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1618 /* NaN are not treated the same way IEEE 754 does */
1619 if (unlikely(float64_is_any_nan(u
.d
))) {
1623 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1626 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1631 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1632 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1633 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1638 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1643 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1644 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1645 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1650 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1656 /* NaN are not treated the same way IEEE 754 does */
1657 if (unlikely(float64_is_any_nan(u
.d
))) {
1660 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1661 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1663 return float64_to_int32(u
.d
, &env
->vec_status
);
1666 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1672 /* NaN are not treated the same way IEEE 754 does */
1673 if (unlikely(float64_is_any_nan(u
.d
))) {
1676 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1677 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1679 return float64_to_uint32(u
.d
, &env
->vec_status
);
1682 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1688 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1693 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1699 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1704 /* Double precision fixed-point arithmetic */
1705 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1711 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1715 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1721 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1725 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1731 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1735 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1741 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1745 /* Double precision floating point helpers */
1746 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1752 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1755 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1761 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1764 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1770 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1773 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1775 /* XXX: TODO: test special values (NaN, infinites, ...) */
1776 return helper_efdtstlt(env
, op1
, op2
);
1779 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1781 /* XXX: TODO: test special values (NaN, infinites, ...) */
1782 return helper_efdtstgt(env
, op1
, op2
);
1785 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1787 /* XXX: TODO: test special values (NaN, infinites, ...) */
1788 return helper_efdtsteq(env
, op1
, op2
);
1791 #define float64_to_float64(x, env) x
1795 * VSX_ADD_SUB - VSX floating point add/subract
1796 * name - instruction mnemonic
1797 * op - operation (add or sub)
1798 * nels - number of elements (1, 2 or 4)
1799 * tp - type (float32 or float64)
1800 * fld - vsr_t field (VsrD(*) or VsrW(*))
1803 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1804 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1806 ppc_vsr_t xt, xa, xb; \
1809 getVSR(xA(opcode), &xa, env); \
1810 getVSR(xB(opcode), &xb, env); \
1811 getVSR(xT(opcode), &xt, env); \
1812 helper_reset_fpstatus(env); \
1814 for (i = 0; i < nels; i++) { \
1815 float_status tstat = env->fp_status; \
1816 set_float_exception_flags(0, &tstat); \
1817 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1818 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1820 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1821 float_invalid_op_addsub(env, sfprf, GETPC(), \
1822 tp##_classify(xa.fld) | \
1823 tp##_classify(xb.fld)); \
1827 xt.fld = helper_frsp(env, xt.fld); \
1831 helper_compute_fprf_float64(env, xt.fld); \
1834 putVSR(xT(opcode), &xt, env); \
1835 do_float_check_status(env, GETPC()); \
1838 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1839 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1840 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1841 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1842 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1843 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1844 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1845 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1847 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
)
1849 ppc_vsr_t xt
, xa
, xb
;
1852 getVSR(rA(opcode
) + 32, &xa
, env
);
1853 getVSR(rB(opcode
) + 32, &xb
, env
);
1854 getVSR(rD(opcode
) + 32, &xt
, env
);
1855 helper_reset_fpstatus(env
);
1857 tstat
= env
->fp_status
;
1858 if (unlikely(Rc(opcode
) != 0)) {
1859 tstat
.float_rounding_mode
= float_round_to_odd
;
1862 set_float_exception_flags(0, &tstat
);
1863 xt
.f128
= float128_add(xa
.f128
, xb
.f128
, &tstat
);
1864 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1866 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1867 float_invalid_op_addsub(env
, 1, GETPC(),
1868 float128_classify(xa
.f128
) |
1869 float128_classify(xb
.f128
));
1872 helper_compute_fprf_float128(env
, xt
.f128
);
1874 putVSR(rD(opcode
) + 32, &xt
, env
);
1875 do_float_check_status(env
, GETPC());
1879 * VSX_MUL - VSX floating point multiply
1880 * op - instruction mnemonic
1881 * nels - number of elements (1, 2 or 4)
1882 * tp - type (float32 or float64)
1883 * fld - vsr_t field (VsrD(*) or VsrW(*))
1886 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1887 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1889 ppc_vsr_t xt, xa, xb; \
1892 getVSR(xA(opcode), &xa, env); \
1893 getVSR(xB(opcode), &xb, env); \
1894 getVSR(xT(opcode), &xt, env); \
1895 helper_reset_fpstatus(env); \
1897 for (i = 0; i < nels; i++) { \
1898 float_status tstat = env->fp_status; \
1899 set_float_exception_flags(0, &tstat); \
1900 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1901 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1903 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1904 float_invalid_op_mul(env, sfprf, GETPC(), \
1905 tp##_classify(xa.fld) | \
1906 tp##_classify(xb.fld)); \
1910 xt.fld = helper_frsp(env, xt.fld); \
1914 helper_compute_fprf_float64(env, xt.fld); \
1918 putVSR(xT(opcode), &xt, env); \
1919 do_float_check_status(env, GETPC()); \
1922 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1923 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1924 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1925 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1927 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
)
1929 ppc_vsr_t xt
, xa
, xb
;
1932 getVSR(rA(opcode
) + 32, &xa
, env
);
1933 getVSR(rB(opcode
) + 32, &xb
, env
);
1934 getVSR(rD(opcode
) + 32, &xt
, env
);
1936 helper_reset_fpstatus(env
);
1937 tstat
= env
->fp_status
;
1938 if (unlikely(Rc(opcode
) != 0)) {
1939 tstat
.float_rounding_mode
= float_round_to_odd
;
1942 set_float_exception_flags(0, &tstat
);
1943 xt
.f128
= float128_mul(xa
.f128
, xb
.f128
, &tstat
);
1944 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1946 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1947 float_invalid_op_mul(env
, 1, GETPC(),
1948 float128_classify(xa
.f128
) |
1949 float128_classify(xb
.f128
));
1951 helper_compute_fprf_float128(env
, xt
.f128
);
1953 putVSR(rD(opcode
) + 32, &xt
, env
);
1954 do_float_check_status(env
, GETPC());
1958 * VSX_DIV - VSX floating point divide
1959 * op - instruction mnemonic
1960 * nels - number of elements (1, 2 or 4)
1961 * tp - type (float32 or float64)
1962 * fld - vsr_t field (VsrD(*) or VsrW(*))
1965 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1966 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1968 ppc_vsr_t xt, xa, xb; \
1971 getVSR(xA(opcode), &xa, env); \
1972 getVSR(xB(opcode), &xb, env); \
1973 getVSR(xT(opcode), &xt, env); \
1974 helper_reset_fpstatus(env); \
1976 for (i = 0; i < nels; i++) { \
1977 float_status tstat = env->fp_status; \
1978 set_float_exception_flags(0, &tstat); \
1979 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1980 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1982 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1983 float_invalid_op_div(env, sfprf, GETPC(), \
1984 tp##_classify(xa.fld) | \
1985 tp##_classify(xb.fld)); \
1987 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1988 float_zero_divide_excp(env, GETPC()); \
1992 xt.fld = helper_frsp(env, xt.fld); \
1996 helper_compute_fprf_float64(env, xt.fld); \
2000 putVSR(xT(opcode), &xt, env); \
2001 do_float_check_status(env, GETPC()); \
2004 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
2005 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
2006 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
2007 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
2009 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
)
2011 ppc_vsr_t xt
, xa
, xb
;
2014 getVSR(rA(opcode
) + 32, &xa
, env
);
2015 getVSR(rB(opcode
) + 32, &xb
, env
);
2016 getVSR(rD(opcode
) + 32, &xt
, env
);
2018 helper_reset_fpstatus(env
);
2019 tstat
= env
->fp_status
;
2020 if (unlikely(Rc(opcode
) != 0)) {
2021 tstat
.float_rounding_mode
= float_round_to_odd
;
2024 set_float_exception_flags(0, &tstat
);
2025 xt
.f128
= float128_div(xa
.f128
, xb
.f128
, &tstat
);
2026 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2028 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
2029 float_invalid_op_div(env
, 1, GETPC(),
2030 float128_classify(xa
.f128
) |
2031 float128_classify(xb
.f128
));
2033 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
2034 float_zero_divide_excp(env
, GETPC());
2037 helper_compute_fprf_float128(env
, xt
.f128
);
2038 putVSR(rD(opcode
) + 32, &xt
, env
);
2039 do_float_check_status(env
, GETPC());
2043 * VSX_RE - VSX floating point reciprocal estimate
2044 * op - instruction mnemonic
2045 * nels - number of elements (1, 2 or 4)
2046 * tp - type (float32 or float64)
2047 * fld - vsr_t field (VsrD(*) or VsrW(*))
2050 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
2051 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2056 getVSR(xB(opcode), &xb, env); \
2057 getVSR(xT(opcode), &xt, env); \
2058 helper_reset_fpstatus(env); \
2060 for (i = 0; i < nels; i++) { \
2061 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2062 float_invalid_op_vxsnan(env, GETPC()); \
2064 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2067 xt.fld = helper_frsp(env, xt.fld); \
2071 helper_compute_fprf_float64(env, xt.fld); \
2075 putVSR(xT(opcode), &xt, env); \
2076 do_float_check_status(env, GETPC()); \
2079 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2080 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2081 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2082 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2085 * VSX_SQRT - VSX floating point square root
2086 * op - instruction mnemonic
2087 * nels - number of elements (1, 2 or 4)
2088 * tp - type (float32 or float64)
2089 * fld - vsr_t field (VsrD(*) or VsrW(*))
2092 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2093 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2098 getVSR(xB(opcode), &xb, env); \
2099 getVSR(xT(opcode), &xt, env); \
2100 helper_reset_fpstatus(env); \
2102 for (i = 0; i < nels; i++) { \
2103 float_status tstat = env->fp_status; \
2104 set_float_exception_flags(0, &tstat); \
2105 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2106 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2108 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2109 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2110 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2111 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2112 float_invalid_op_vxsnan(env, GETPC()); \
2117 xt.fld = helper_frsp(env, xt.fld); \
2121 helper_compute_fprf_float64(env, xt.fld); \
2125 putVSR(xT(opcode), &xt, env); \
2126 do_float_check_status(env, GETPC()); \
2129 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2130 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2131 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2132 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2135 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2136 * op - instruction mnemonic
2137 * nels - number of elements (1, 2 or 4)
2138 * tp - type (float32 or float64)
2139 * fld - vsr_t field (VsrD(*) or VsrW(*))
2142 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2143 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2148 getVSR(xB(opcode), &xb, env); \
2149 getVSR(xT(opcode), &xt, env); \
2150 helper_reset_fpstatus(env); \
2152 for (i = 0; i < nels; i++) { \
2153 float_status tstat = env->fp_status; \
2154 set_float_exception_flags(0, &tstat); \
2155 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2156 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2157 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2159 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2160 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2161 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2162 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2163 float_invalid_op_vxsnan(env, GETPC()); \
2168 xt.fld = helper_frsp(env, xt.fld); \
2172 helper_compute_fprf_float64(env, xt.fld); \
2176 putVSR(xT(opcode), &xt, env); \
2177 do_float_check_status(env, GETPC()); \
2180 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2181 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2182 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2183 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2186 * VSX_TDIV - VSX floating point test for divide
2187 * op - instruction mnemonic
2188 * nels - number of elements (1, 2 or 4)
2189 * tp - type (float32 or float64)
2190 * fld - vsr_t field (VsrD(*) or VsrW(*))
2191 * emin - minimum unbiased exponent
2192 * emax - maximum unbiased exponent
2193 * nbits - number of fraction bits
2195 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2196 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2203 getVSR(xA(opcode), &xa, env); \
2204 getVSR(xB(opcode), &xb, env); \
2206 for (i = 0; i < nels; i++) { \
2207 if (unlikely(tp##_is_infinity(xa.fld) || \
2208 tp##_is_infinity(xb.fld) || \
2209 tp##_is_zero(xb.fld))) { \
2213 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2214 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2216 if (unlikely(tp##_is_any_nan(xa.fld) || \
2217 tp##_is_any_nan(xb.fld))) { \
2219 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2221 } else if (!tp##_is_zero(xa.fld) && \
2222 (((e_a - e_b) >= emax) || \
2223 ((e_a - e_b) <= (emin + 1)) || \
2224 (e_a <= (emin + nbits)))) { \
2228 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2230 * XB is not zero because of the above check and so \
2231 * must be denormalized. \
2238 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2241 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2242 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2243 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2246 * VSX_TSQRT - VSX floating point test for square root
2247 * op - instruction mnemonic
2248 * nels - number of elements (1, 2 or 4)
2249 * tp - type (float32 or float64)
2250 * fld - vsr_t field (VsrD(*) or VsrW(*))
2251 * emin - minimum unbiased exponent
2252 * emax - maximum unbiased exponent
2253 * nbits - number of fraction bits
2255 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2256 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2263 getVSR(xA(opcode), &xa, env); \
2264 getVSR(xB(opcode), &xb, env); \
2266 for (i = 0; i < nels; i++) { \
2267 if (unlikely(tp##_is_infinity(xb.fld) || \
2268 tp##_is_zero(xb.fld))) { \
2272 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2274 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2276 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2278 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2280 } else if (!tp##_is_zero(xb.fld) && \
2281 (e_b <= (emin + nbits))) { \
2285 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2287 * XB is not zero because of the above check and \
2288 * therefore must be denormalized. \
2295 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2298 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2299 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2300 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2303 * VSX_MADD - VSX floating point muliply/add variations
2304 * op - instruction mnemonic
2305 * nels - number of elements (1, 2 or 4)
2306 * tp - type (float32 or float64)
2307 * fld - vsr_t field (VsrD(*) or VsrW(*))
2308 * maddflgs - flags for the float*muladd routine that control the
2309 * various forms (madd, msub, nmadd, nmsub)
2310 * afrm - A form (1=A, 0=M)
2313 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2314 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2316 ppc_vsr_t xt_in, xa, xb, xt_out; \
2320 if (afrm) { /* AxB + T */ \
2323 } else { /* AxT + B */ \
2328 getVSR(xA(opcode), &xa, env); \
2329 getVSR(xB(opcode), &xb, env); \
2330 getVSR(xT(opcode), &xt_in, env); \
2334 helper_reset_fpstatus(env); \
2336 for (i = 0; i < nels; i++) { \
2337 float_status tstat = env->fp_status; \
2338 set_float_exception_flags(0, &tstat); \
2339 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2341 * Avoid double rounding errors by rounding the intermediate \
2344 set_float_rounding_mode(float_round_to_zero, &tstat); \
2345 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2346 maddflgs, &tstat); \
2347 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2348 float_flag_inexact) != 0; \
2350 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2351 maddflgs, &tstat); \
2353 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2355 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2356 tp##_maddsub_update_excp(env, xa.fld, b->fld, \
2357 c->fld, maddflgs, GETPC()); \
2361 xt_out.fld = helper_frsp(env, xt_out.fld); \
2365 helper_compute_fprf_float64(env, xt_out.fld); \
2368 putVSR(xT(opcode), &xt_out, env); \
2369 do_float_check_status(env, GETPC()); \
2372 VSX_MADD(xsmaddadp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 0)
2373 VSX_MADD(xsmaddmdp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 0)
2374 VSX_MADD(xsmsubadp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 0)
2375 VSX_MADD(xsmsubmdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 0)
2376 VSX_MADD(xsnmaddadp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 0)
2377 VSX_MADD(xsnmaddmdp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 0)
2378 VSX_MADD(xsnmsubadp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 0)
2379 VSX_MADD(xsnmsubmdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 0)
2381 VSX_MADD(xsmaddasp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 1)
2382 VSX_MADD(xsmaddmsp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 1)
2383 VSX_MADD(xsmsubasp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 1)
2384 VSX_MADD(xsmsubmsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 1)
2385 VSX_MADD(xsnmaddasp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 1)
2386 VSX_MADD(xsnmaddmsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 1)
2387 VSX_MADD(xsnmsubasp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 1)
2388 VSX_MADD(xsnmsubmsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 1)
2390 VSX_MADD(xvmaddadp
, 2, float64
, VsrD(i
), MADD_FLGS
, 1, 0, 0)
2391 VSX_MADD(xvmaddmdp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0, 0)
2392 VSX_MADD(xvmsubadp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 1, 0, 0)
2393 VSX_MADD(xvmsubmdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0, 0)
2394 VSX_MADD(xvnmaddadp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 1, 0, 0)
2395 VSX_MADD(xvnmaddmdp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0, 0)
2396 VSX_MADD(xvnmsubadp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 1, 0, 0)
2397 VSX_MADD(xvnmsubmdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0, 0)
2399 VSX_MADD(xvmaddasp
, 4, float32
, VsrW(i
), MADD_FLGS
, 1, 0, 0)
2400 VSX_MADD(xvmaddmsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0, 0)
2401 VSX_MADD(xvmsubasp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 1, 0, 0)
2402 VSX_MADD(xvmsubmsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0, 0)
2403 VSX_MADD(xvnmaddasp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 1, 0, 0)
2404 VSX_MADD(xvnmaddmsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0, 0)
2405 VSX_MADD(xvnmsubasp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 1, 0, 0)
2406 VSX_MADD(xvnmsubmsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0, 0)
2409 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2410 * op - instruction mnemonic
2411 * cmp - comparison operation
2412 * exp - expected result of comparison
2413 * svxvc - set VXVC bit
2415 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2416 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2418 ppc_vsr_t xt, xa, xb; \
2419 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2421 getVSR(xA(opcode), &xa, env); \
2422 getVSR(xB(opcode), &xb, env); \
2423 getVSR(xT(opcode), &xt, env); \
2425 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2426 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2427 vxsnan_flag = true; \
2428 if (fpscr_ve == 0 && svxvc) { \
2431 } else if (svxvc) { \
2432 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2433 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2435 if (vxsnan_flag) { \
2436 float_invalid_op_vxsnan(env, GETPC()); \
2439 float_invalid_op_vxvc(env, 0, GETPC()); \
2441 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2444 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2452 putVSR(xT(opcode), &xt, env); \
2453 do_float_check_status(env, GETPC()); \
2456 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2457 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2458 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2459 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2461 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
)
2464 int64_t exp_a
, exp_b
;
2467 getVSR(xA(opcode
), &xa
, env
);
2468 getVSR(xB(opcode
), &xb
, env
);
2470 exp_a
= extract64(xa
.VsrD(0), 52, 11);
2471 exp_b
= extract64(xb
.VsrD(0), 52, 11);
2473 if (unlikely(float64_is_any_nan(xa
.VsrD(0)) ||
2474 float64_is_any_nan(xb
.VsrD(0)))) {
2477 if (exp_a
< exp_b
) {
2479 } else if (exp_a
> exp_b
) {
2486 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2487 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2488 env
->crf
[BF(opcode
)] = cc
;
2490 do_float_check_status(env
, GETPC());
2493 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
)
2496 int64_t exp_a
, exp_b
;
2499 getVSR(rA(opcode
) + 32, &xa
, env
);
2500 getVSR(rB(opcode
) + 32, &xb
, env
);
2502 exp_a
= extract64(xa
.VsrD(0), 48, 15);
2503 exp_b
= extract64(xb
.VsrD(0), 48, 15);
2505 if (unlikely(float128_is_any_nan(xa
.f128
) ||
2506 float128_is_any_nan(xb
.f128
))) {
2509 if (exp_a
< exp_b
) {
2511 } else if (exp_a
> exp_b
) {
2518 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2519 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2520 env
->crf
[BF(opcode
)] = cc
;
2522 do_float_check_status(env
, GETPC());
2525 #define VSX_SCALAR_CMP(op, ordered) \
2526 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2530 bool vxsnan_flag = false, vxvc_flag = false; \
2532 helper_reset_fpstatus(env); \
2533 getVSR(xA(opcode), &xa, env); \
2534 getVSR(xB(opcode), &xb, env); \
2536 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2537 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2538 vxsnan_flag = true; \
2540 if (fpscr_ve == 0 && ordered) { \
2543 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2544 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2550 if (vxsnan_flag) { \
2551 float_invalid_op_vxsnan(env, GETPC()); \
2554 float_invalid_op_vxvc(env, 0, GETPC()); \
2557 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2559 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2565 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2566 env->fpscr |= cc << FPSCR_FPRF; \
2567 env->crf[BF(opcode)] = cc; \
2569 do_float_check_status(env, GETPC()); \
2572 VSX_SCALAR_CMP(xscmpodp
, 1)
2573 VSX_SCALAR_CMP(xscmpudp
, 0)
2575 #define VSX_SCALAR_CMPQ(op, ordered) \
2576 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2580 bool vxsnan_flag = false, vxvc_flag = false; \
2582 helper_reset_fpstatus(env); \
2583 getVSR(rA(opcode) + 32, &xa, env); \
2584 getVSR(rB(opcode) + 32, &xb, env); \
2586 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2587 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2588 vxsnan_flag = true; \
2590 if (fpscr_ve == 0 && ordered) { \
2593 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2594 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2600 if (vxsnan_flag) { \
2601 float_invalid_op_vxsnan(env, GETPC()); \
2604 float_invalid_op_vxvc(env, 0, GETPC()); \
2607 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2609 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2615 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2616 env->fpscr |= cc << FPSCR_FPRF; \
2617 env->crf[BF(opcode)] = cc; \
2619 do_float_check_status(env, GETPC()); \
2622 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2623 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2626 * VSX_MAX_MIN - VSX floating point maximum/minimum
2627 * name - instruction mnemonic
2628 * op - operation (max or min)
2629 * nels - number of elements (1, 2 or 4)
2630 * tp - type (float32 or float64)
2631 * fld - vsr_t field (VsrD(*) or VsrW(*))
2633 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2634 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2636 ppc_vsr_t xt, xa, xb; \
2639 getVSR(xA(opcode), &xa, env); \
2640 getVSR(xB(opcode), &xb, env); \
2641 getVSR(xT(opcode), &xt, env); \
2643 for (i = 0; i < nels; i++) { \
2644 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2645 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2646 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2647 float_invalid_op_vxsnan(env, GETPC()); \
2651 putVSR(xT(opcode), &xt, env); \
2652 do_float_check_status(env, GETPC()); \
2655 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2656 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2657 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2658 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2659 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2660 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2662 #define VSX_MAX_MINC(name, max) \
2663 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2665 ppc_vsr_t xt, xa, xb; \
2666 bool vxsnan_flag = false, vex_flag = false; \
2668 getVSR(rA(opcode) + 32, &xa, env); \
2669 getVSR(rB(opcode) + 32, &xb, env); \
2670 getVSR(rD(opcode) + 32, &xt, env); \
2672 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2673 float64_is_any_nan(xb.VsrD(0)))) { \
2674 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2675 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2676 vxsnan_flag = true; \
2678 xt.VsrD(0) = xb.VsrD(0); \
2679 } else if ((max && \
2680 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2682 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2683 xt.VsrD(0) = xa.VsrD(0); \
2685 xt.VsrD(0) = xb.VsrD(0); \
2688 vex_flag = fpscr_ve & vxsnan_flag; \
2689 if (vxsnan_flag) { \
2690 float_invalid_op_vxsnan(env, GETPC()); \
2693 putVSR(rD(opcode) + 32, &xt, env); \
2697 VSX_MAX_MINC(xsmaxcdp, 1);
2698 VSX_MAX_MINC(xsmincdp
, 0);
2700 #define VSX_MAX_MINJ(name, max) \
2701 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2703 ppc_vsr_t xt, xa, xb; \
2704 bool vxsnan_flag = false, vex_flag = false; \
2706 getVSR(rA(opcode) + 32, &xa, env); \
2707 getVSR(rB(opcode) + 32, &xb, env); \
2708 getVSR(rD(opcode) + 32, &xt, env); \
2710 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2711 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2712 vxsnan_flag = true; \
2714 xt.VsrD(0) = xa.VsrD(0); \
2715 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2716 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2717 vxsnan_flag = true; \
2719 xt.VsrD(0) = xb.VsrD(0); \
2720 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2722 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2723 xt.VsrD(0) = 0ULL; \
2725 xt.VsrD(0) = 0x8000000000000000ULL; \
2728 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2729 xt.VsrD(0) = 0x8000000000000000ULL; \
2731 xt.VsrD(0) = 0ULL; \
2734 } else if ((max && \
2735 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2737 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2738 xt.VsrD(0) = xa.VsrD(0); \
2740 xt.VsrD(0) = xb.VsrD(0); \
2743 vex_flag = fpscr_ve & vxsnan_flag; \
2744 if (vxsnan_flag) { \
2745 float_invalid_op_vxsnan(env, GETPC()); \
2748 putVSR(rD(opcode) + 32, &xt, env); \
2752 VSX_MAX_MINJ(xsmaxjdp, 1);
2753 VSX_MAX_MINJ(xsminjdp
, 0);
2756 * VSX_CMP - VSX floating point compare
2757 * op - instruction mnemonic
2758 * nels - number of elements (1, 2 or 4)
2759 * tp - type (float32 or float64)
2760 * fld - vsr_t field (VsrD(*) or VsrW(*))
2761 * cmp - comparison operation
2762 * svxvc - set VXVC bit
2763 * exp - expected result of comparison
2765 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2766 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2768 ppc_vsr_t xt, xa, xb; \
2771 int all_false = 1; \
2773 getVSR(xA(opcode), &xa, env); \
2774 getVSR(xB(opcode), &xb, env); \
2775 getVSR(xT(opcode), &xt, env); \
2777 for (i = 0; i < nels; i++) { \
2778 if (unlikely(tp##_is_any_nan(xa.fld) || \
2779 tp##_is_any_nan(xb.fld))) { \
2780 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2781 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2782 float_invalid_op_vxsnan(env, GETPC()); \
2785 float_invalid_op_vxvc(env, 0, GETPC()); \
2790 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2800 putVSR(xT(opcode), &xt, env); \
2801 if ((opcode >> (31 - 21)) & 1) { \
2802 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2804 do_float_check_status(env, GETPC()); \
2807 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2808 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2809 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2810 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2811 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2812 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2813 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2814 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2817 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2818 * op - instruction mnemonic
2819 * nels - number of elements (1, 2 or 4)
2820 * stp - source type (float32 or float64)
2821 * ttp - target type (float32 or float64)
2822 * sfld - source vsr_t field
2823 * tfld - target vsr_t field (f32 or f64)
2826 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2827 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2832 getVSR(xB(opcode), &xb, env); \
2833 getVSR(xT(opcode), &xt, env); \
2835 for (i = 0; i < nels; i++) { \
2836 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2837 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2838 &env->fp_status))) { \
2839 float_invalid_op_vxsnan(env, GETPC()); \
2840 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2843 helper_compute_fprf_##ttp(env, xt.tfld); \
2847 putVSR(xT(opcode), &xt, env); \
2848 do_float_check_status(env, GETPC()); \
2851 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2852 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2853 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2 * i
), 0)
2854 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2 * i
), VsrD(i
), 0)
2857 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2858 * op - instruction mnemonic
2859 * nels - number of elements (1, 2 or 4)
2860 * stp - source type (float32 or float64)
2861 * ttp - target type (float32 or float64)
2862 * sfld - source vsr_t field
2863 * tfld - target vsr_t field (f32 or f64)
2866 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2867 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2872 getVSR(rB(opcode) + 32, &xb, env); \
2873 getVSR(rD(opcode) + 32, &xt, env); \
2875 for (i = 0; i < nels; i++) { \
2876 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2877 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2878 &env->fp_status))) { \
2879 float_invalid_op_vxsnan(env, GETPC()); \
2880 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2883 helper_compute_fprf_##ttp(env, xt.tfld); \
2887 putVSR(rD(opcode) + 32, &xt, env); \
2888 do_float_check_status(env, GETPC()); \
2891 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2894 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2895 * involving one half precision value
2896 * op - instruction mnemonic
2897 * nels - number of elements (1, 2 or 4)
2900 * sfld - source vsr_t field
2901 * tfld - target vsr_t field
2904 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2905 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2910 getVSR(xB(opcode), &xb, env); \
2911 memset(&xt, 0, sizeof(xt)); \
2913 for (i = 0; i < nels; i++) { \
2914 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2915 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2916 &env->fp_status))) { \
2917 float_invalid_op_vxsnan(env, GETPC()); \
2918 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2921 helper_compute_fprf_##ttp(env, xt.tfld); \
2925 putVSR(xT(opcode), &xt, env); \
2926 do_float_check_status(env, GETPC()); \
2929 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2930 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2931 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2932 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2935 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2936 * added to this later.
2938 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
)
2943 getVSR(rB(opcode
) + 32, &xb
, env
);
2944 memset(&xt
, 0, sizeof(xt
));
2946 tstat
= env
->fp_status
;
2947 if (unlikely(Rc(opcode
) != 0)) {
2948 tstat
.float_rounding_mode
= float_round_to_odd
;
2951 xt
.VsrD(0) = float128_to_float64(xb
.f128
, &tstat
);
2952 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2953 if (unlikely(float128_is_signaling_nan(xb
.f128
, &tstat
))) {
2954 float_invalid_op_vxsnan(env
, GETPC());
2955 xt
.VsrD(0) = float64_snan_to_qnan(xt
.VsrD(0));
2957 helper_compute_fprf_float64(env
, xt
.VsrD(0));
2959 putVSR(rD(opcode
) + 32, &xt
, env
);
2960 do_float_check_status(env
, GETPC());
2963 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2965 float_status tstat
= env
->fp_status
;
2966 set_float_exception_flags(0, &tstat
);
2968 return (uint64_t)float64_to_float32(xb
, &tstat
) << 32;
2971 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2973 float_status tstat
= env
->fp_status
;
2974 set_float_exception_flags(0, &tstat
);
2976 return float32_to_float64(xb
>> 32, &tstat
);
2980 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2981 * op - instruction mnemonic
2982 * nels - number of elements (1, 2 or 4)
2983 * stp - source type (float32 or float64)
2984 * ttp - target type (int32, uint32, int64 or uint64)
2985 * sfld - source vsr_t field
2986 * tfld - target vsr_t field
2987 * rnan - resulting NaN
2989 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2990 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2992 int all_flags = env->fp_status.float_exception_flags, flags; \
2996 getVSR(xB(opcode), &xb, env); \
2997 getVSR(xT(opcode), &xt, env); \
2999 for (i = 0; i < nels; i++) { \
3000 env->fp_status.float_exception_flags = 0; \
3001 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status); \
3002 flags = env->fp_status.float_exception_flags; \
3003 if (unlikely(flags & float_flag_invalid)) { \
3004 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld)); \
3007 all_flags |= flags; \
3010 putVSR(xT(opcode), &xt, env); \
3011 env->fp_status.float_exception_flags = all_flags; \
3012 do_float_check_status(env, GETPC()); \
3015 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
3016 0x8000000000000000ULL
)
3017 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
3019 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
3020 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
3021 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
3022 0x8000000000000000ULL
)
3023 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2 * i
), \
3025 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
3026 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2 * i
), 0U)
3027 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2 * i
), VsrD(i
), \
3028 0x8000000000000000ULL
)
3029 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
3030 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2 * i
), VsrD(i
), 0ULL)
3031 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
3034 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3035 * op - instruction mnemonic
3036 * stp - source type (float32 or float64)
3037 * ttp - target type (int32, uint32, int64 or uint64)
3038 * sfld - source vsr_t field
3039 * tfld - target vsr_t field
3040 * rnan - resulting NaN
3042 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
3043 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3047 getVSR(rB(opcode) + 32, &xb, env); \
3048 memset(&xt, 0, sizeof(xt)); \
3050 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status); \
3051 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
3052 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld)); \
3056 putVSR(rD(opcode) + 32, &xt, env); \
3057 do_float_check_status(env, GETPC()); \
3060 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
3061 0x8000000000000000ULL
)
3063 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
3064 0xffffffff80000000ULL
)
3065 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
3066 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
3069 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3070 * op - instruction mnemonic
3071 * nels - number of elements (1, 2 or 4)
3072 * stp - source type (int32, uint32, int64 or uint64)
3073 * ttp - target type (float32 or float64)
3074 * sfld - source vsr_t field
3075 * tfld - target vsr_t field
3076 * jdef - definition of the j index (i or 2*i)
3079 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3080 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3085 getVSR(xB(opcode), &xb, env); \
3086 getVSR(xT(opcode), &xt, env); \
3088 for (i = 0; i < nels; i++) { \
3089 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3091 xt.tfld = helper_frsp(env, xt.tfld); \
3094 helper_compute_fprf_float64(env, xt.tfld); \
3098 putVSR(xT(opcode), &xt, env); \
3099 do_float_check_status(env, GETPC()); \
3102 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3103 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3104 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3105 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3106 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3107 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3108 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3109 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3110 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
3111 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
3112 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3113 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3116 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3117 * op - instruction mnemonic
3118 * stp - source type (int32, uint32, int64 or uint64)
3119 * ttp - target type (float32 or float64)
3120 * sfld - source vsr_t field
3121 * tfld - target vsr_t field
3123 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3124 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3128 getVSR(rB(opcode) + 32, &xb, env); \
3129 getVSR(rD(opcode) + 32, &xt, env); \
3131 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3132 helper_compute_fprf_##ttp(env, xt.tfld); \
3134 putVSR(xT(opcode) + 32, &xt, env); \
3135 do_float_check_status(env, GETPC()); \
3138 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3139 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3142 * For "use current rounding mode", define a value that will not be
3143 * one of the existing rounding model enums.
3145 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3146 float_round_up + float_round_to_zero)
3149 * VSX_ROUND - VSX floating point round
3150 * op - instruction mnemonic
3151 * nels - number of elements (1, 2 or 4)
3152 * tp - type (float32 or float64)
3153 * fld - vsr_t field (VsrD(*) or VsrW(*))
3154 * rmode - rounding mode
3157 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3158 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3162 getVSR(xB(opcode), &xb, env); \
3163 getVSR(xT(opcode), &xt, env); \
3165 if (rmode != FLOAT_ROUND_CURRENT) { \
3166 set_float_rounding_mode(rmode, &env->fp_status); \
3169 for (i = 0; i < nels; i++) { \
3170 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3171 &env->fp_status))) { \
3172 float_invalid_op_vxsnan(env, GETPC()); \
3173 xt.fld = tp##_snan_to_qnan(xb.fld); \
3175 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3178 helper_compute_fprf_float64(env, xt.fld); \
3183 * If this is not a "use current rounding mode" instruction, \
3184 * then inhibit setting of the XX bit and restore rounding \
3187 if (rmode != FLOAT_ROUND_CURRENT) { \
3188 fpscr_set_rounding_mode(env); \
3189 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3192 putVSR(xT(opcode), &xt, env); \
3193 do_float_check_status(env, GETPC()); \
3196 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3197 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3198 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3199 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3200 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3202 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3203 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3204 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3205 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3206 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3208 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3209 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3210 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3211 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3212 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3214 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3216 helper_reset_fpstatus(env
);
3218 uint64_t xt
= helper_frsp(env
, xb
);
3220 helper_compute_fprf_float64(env
, xt
);
3221 do_float_check_status(env
, GETPC());
3225 #define VSX_XXPERM(op, indexed) \
3226 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3228 ppc_vsr_t xt, xa, pcv, xto; \
3231 getVSR(xA(opcode), &xa, env); \
3232 getVSR(xT(opcode), &xt, env); \
3233 getVSR(xB(opcode), &pcv, env); \
3235 for (i = 0; i < 16; i++) { \
3236 idx = pcv.VsrB(i) & 0x1F; \
3240 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3242 putVSR(xT(opcode), &xto, env); \
3245 VSX_XXPERM(xxperm
, 0)
3246 VSX_XXPERM(xxpermr
, 1)
3248 void helper_xvxsigsp(CPUPPCState
*env
, uint32_t opcode
)
3251 uint32_t exp
, i
, fraction
;
3253 getVSR(xB(opcode
), &xb
, env
);
3254 memset(&xt
, 0, sizeof(xt
));
3256 for (i
= 0; i
< 4; i
++) {
3257 exp
= (xb
.VsrW(i
) >> 23) & 0xFF;
3258 fraction
= xb
.VsrW(i
) & 0x7FFFFF;
3259 if (exp
!= 0 && exp
!= 255) {
3260 xt
.VsrW(i
) = fraction
| 0x00800000;
3262 xt
.VsrW(i
) = fraction
;
3265 putVSR(xT(opcode
), &xt
, env
);
3269 * VSX_TEST_DC - VSX floating point test data class
3270 * op - instruction mnemonic
3271 * nels - number of elements (1, 2 or 4)
3272 * xbn - VSR register number
3273 * tp - type (float32 or float64)
3274 * fld - vsr_t field (VsrD(*) or VsrW(*))
3275 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3276 * fld_max - target field max
3277 * scrf - set result in CR and FPCC
3279 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3280 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3283 uint32_t i, sign, dcmx; \
3284 uint32_t cc, match = 0; \
3286 getVSR(xbn, &xb, env); \
3288 memset(&xt, 0, sizeof(xt)); \
3289 dcmx = DCMX_XV(opcode); \
3291 dcmx = DCMX(opcode); \
3294 for (i = 0; i < nels; i++) { \
3295 sign = tp##_is_neg(xb.fld); \
3296 if (tp##_is_any_nan(xb.fld)) { \
3297 match = extract32(dcmx, 6, 1); \
3298 } else if (tp##_is_infinity(xb.fld)) { \
3299 match = extract32(dcmx, 4 + !sign, 1); \
3300 } else if (tp##_is_zero(xb.fld)) { \
3301 match = extract32(dcmx, 2 + !sign, 1); \
3302 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3303 match = extract32(dcmx, 0 + !sign, 1); \
3307 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3308 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3309 env->fpscr |= cc << FPSCR_FPRF; \
3310 env->crf[BF(opcode)] = cc; \
3312 xt.tfld = match ? fld_max : 0; \
3317 putVSR(xT(opcode), &xt, env); \
3321 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3322 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3323 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3324 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3326 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
)
3329 uint32_t dcmx
, sign
, exp
;
3330 uint32_t cc
, match
= 0, not_sp
= 0;
3332 getVSR(xB(opcode
), &xb
, env
);
3333 dcmx
= DCMX(opcode
);
3334 exp
= (xb
.VsrD(0) >> 52) & 0x7FF;
3336 sign
= float64_is_neg(xb
.VsrD(0));
3337 if (float64_is_any_nan(xb
.VsrD(0))) {
3338 match
= extract32(dcmx
, 6, 1);
3339 } else if (float64_is_infinity(xb
.VsrD(0))) {
3340 match
= extract32(dcmx
, 4 + !sign
, 1);
3341 } else if (float64_is_zero(xb
.VsrD(0))) {
3342 match
= extract32(dcmx
, 2 + !sign
, 1);
3343 } else if (float64_is_zero_or_denormal(xb
.VsrD(0)) ||
3344 (exp
> 0 && exp
< 0x381)) {
3345 match
= extract32(dcmx
, 0 + !sign
, 1);
3348 not_sp
= !float64_eq(xb
.VsrD(0),
3350 float64_to_float32(xb
.VsrD(0), &env
->fp_status
),
3351 &env
->fp_status
), &env
->fp_status
);
3353 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3354 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3355 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3356 env
->crf
[BF(opcode
)] = cc
;
3359 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
)
3363 uint8_t r
= Rrm(opcode
);
3364 uint8_t ex
= Rc(opcode
);
3365 uint8_t rmc
= RMC(opcode
);
3369 getVSR(rB(opcode
) + 32, &xb
, env
);
3370 memset(&xt
, 0, sizeof(xt
));
3371 helper_reset_fpstatus(env
);
3373 if (r
== 0 && rmc
== 0) {
3374 rmode
= float_round_ties_away
;
3375 } else if (r
== 0 && rmc
== 0x3) {
3377 } else if (r
== 1) {
3380 rmode
= float_round_nearest_even
;
3383 rmode
= float_round_to_zero
;
3386 rmode
= float_round_up
;
3389 rmode
= float_round_down
;
3396 tstat
= env
->fp_status
;
3397 set_float_exception_flags(0, &tstat
);
3398 set_float_rounding_mode(rmode
, &tstat
);
3399 xt
.f128
= float128_round_to_int(xb
.f128
, &tstat
);
3400 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3402 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3403 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3404 float_invalid_op_vxsnan(env
, GETPC());
3405 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3409 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3410 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3413 helper_compute_fprf_float128(env
, xt
.f128
);
3414 do_float_check_status(env
, GETPC());
3415 putVSR(rD(opcode
) + 32, &xt
, env
);
3418 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
)
3422 uint8_t r
= Rrm(opcode
);
3423 uint8_t rmc
= RMC(opcode
);
3428 getVSR(rB(opcode
) + 32, &xb
, env
);
3429 memset(&xt
, 0, sizeof(xt
));
3430 helper_reset_fpstatus(env
);
3432 if (r
== 0 && rmc
== 0) {
3433 rmode
= float_round_ties_away
;
3434 } else if (r
== 0 && rmc
== 0x3) {
3436 } else if (r
== 1) {
3439 rmode
= float_round_nearest_even
;
3442 rmode
= float_round_to_zero
;
3445 rmode
= float_round_up
;
3448 rmode
= float_round_down
;
3455 tstat
= env
->fp_status
;
3456 set_float_exception_flags(0, &tstat
);
3457 set_float_rounding_mode(rmode
, &tstat
);
3458 round_res
= float128_to_floatx80(xb
.f128
, &tstat
);
3459 xt
.f128
= floatx80_to_float128(round_res
, &tstat
);
3460 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3462 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3463 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3464 float_invalid_op_vxsnan(env
, GETPC());
3465 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3469 helper_compute_fprf_float128(env
, xt
.f128
);
3470 putVSR(rD(opcode
) + 32, &xt
, env
);
3471 do_float_check_status(env
, GETPC());
3474 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
)
3480 getVSR(rB(opcode
) + 32, &xb
, env
);
3481 memset(&xt
, 0, sizeof(xt
));
3482 helper_reset_fpstatus(env
);
3484 tstat
= env
->fp_status
;
3485 if (unlikely(Rc(opcode
) != 0)) {
3486 tstat
.float_rounding_mode
= float_round_to_odd
;
3489 set_float_exception_flags(0, &tstat
);
3490 xt
.f128
= float128_sqrt(xb
.f128
, &tstat
);
3491 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3493 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3494 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3495 float_invalid_op_vxsnan(env
, GETPC());
3496 xt
.f128
= float128_snan_to_qnan(xb
.f128
);
3497 } else if (float128_is_quiet_nan(xb
.f128
, &tstat
)) {
3499 } else if (float128_is_neg(xb
.f128
) && !float128_is_zero(xb
.f128
)) {
3500 float_invalid_op_vxsqrt(env
, 1, GETPC());
3501 xt
.f128
= float128_default_nan(&env
->fp_status
);
3505 helper_compute_fprf_float128(env
, xt
.f128
);
3506 putVSR(rD(opcode
) + 32, &xt
, env
);
3507 do_float_check_status(env
, GETPC());
3510 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
)
3512 ppc_vsr_t xt
, xa
, xb
;
3515 getVSR(rA(opcode
) + 32, &xa
, env
);
3516 getVSR(rB(opcode
) + 32, &xb
, env
);
3517 getVSR(rD(opcode
) + 32, &xt
, env
);
3518 helper_reset_fpstatus(env
);
3520 tstat
= env
->fp_status
;
3521 if (unlikely(Rc(opcode
) != 0)) {
3522 tstat
.float_rounding_mode
= float_round_to_odd
;
3525 set_float_exception_flags(0, &tstat
);
3526 xt
.f128
= float128_sub(xa
.f128
, xb
.f128
, &tstat
);
3527 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3529 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3530 float_invalid_op_addsub(env
, 1, GETPC(),
3531 float128_classify(xa
.f128
) |
3532 float128_classify(xb
.f128
));
3535 helper_compute_fprf_float128(env
, xt
.f128
);
3536 putVSR(rD(opcode
) + 32, &xt
, env
);
3537 do_float_check_status(env
, GETPC());