]> git.proxmox.com Git - qemu.git/blob - target-ppc/fpu_helper.c
target-ppc: optimize fabs, fnabs, fneg
[qemu.git] / target-ppc / fpu_helper.c
1 /*
2 * PowerPC floating point and SPE emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "cpu.h"
20 #include "helper.h"
21
22 /*****************************************************************************/
23 /* Floating point operations helpers */
24 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
25 {
26 CPU_FloatU f;
27 CPU_DoubleU d;
28
29 f.l = arg;
30 d.d = float32_to_float64(f.f, &env->fp_status);
31 return d.ll;
32 }
33
34 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
35 {
36 CPU_FloatU f;
37 CPU_DoubleU d;
38
39 d.ll = arg;
40 f.f = float64_to_float32(d.d, &env->fp_status);
41 return f.l;
42 }
43
44 static inline int isden(float64 d)
45 {
46 CPU_DoubleU u;
47
48 u.d = d;
49
50 return ((u.ll >> 52) & 0x7FF) == 0;
51 }
52
53 uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf)
54 {
55 CPU_DoubleU farg;
56 int isneg;
57 int ret;
58
59 farg.ll = arg;
60 isneg = float64_is_neg(farg.d);
61 if (unlikely(float64_is_any_nan(farg.d))) {
62 if (float64_is_signaling_nan(farg.d)) {
63 /* Signaling NaN: flags are undefined */
64 ret = 0x00;
65 } else {
66 /* Quiet NaN */
67 ret = 0x11;
68 }
69 } else if (unlikely(float64_is_infinity(farg.d))) {
70 /* +/- infinity */
71 if (isneg) {
72 ret = 0x09;
73 } else {
74 ret = 0x05;
75 }
76 } else {
77 if (float64_is_zero(farg.d)) {
78 /* +/- zero */
79 if (isneg) {
80 ret = 0x12;
81 } else {
82 ret = 0x02;
83 }
84 } else {
85 if (isden(farg.d)) {
86 /* Denormalized numbers */
87 ret = 0x10;
88 } else {
89 /* Normalized numbers */
90 ret = 0x00;
91 }
92 if (isneg) {
93 ret |= 0x08;
94 } else {
95 ret |= 0x04;
96 }
97 }
98 }
99 if (set_fprf) {
100 /* We update FPSCR_FPRF */
101 env->fpscr &= ~(0x1F << FPSCR_FPRF);
102 env->fpscr |= ret << FPSCR_FPRF;
103 }
104 /* We just need fpcc to update Rc1 */
105 return ret & 0xF;
106 }
107
108 /* Floating-point invalid operations exception */
109 static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op)
110 {
111 uint64_t ret = 0;
112 int ve;
113
114 ve = fpscr_ve;
115 switch (op) {
116 case POWERPC_EXCP_FP_VXSNAN:
117 env->fpscr |= 1 << FPSCR_VXSNAN;
118 break;
119 case POWERPC_EXCP_FP_VXSOFT:
120 env->fpscr |= 1 << FPSCR_VXSOFT;
121 break;
122 case POWERPC_EXCP_FP_VXISI:
123 /* Magnitude subtraction of infinities */
124 env->fpscr |= 1 << FPSCR_VXISI;
125 goto update_arith;
126 case POWERPC_EXCP_FP_VXIDI:
127 /* Division of infinity by infinity */
128 env->fpscr |= 1 << FPSCR_VXIDI;
129 goto update_arith;
130 case POWERPC_EXCP_FP_VXZDZ:
131 /* Division of zero by zero */
132 env->fpscr |= 1 << FPSCR_VXZDZ;
133 goto update_arith;
134 case POWERPC_EXCP_FP_VXIMZ:
135 /* Multiplication of zero by infinity */
136 env->fpscr |= 1 << FPSCR_VXIMZ;
137 goto update_arith;
138 case POWERPC_EXCP_FP_VXVC:
139 /* Ordered comparison of NaN */
140 env->fpscr |= 1 << FPSCR_VXVC;
141 env->fpscr &= ~(0xF << FPSCR_FPCC);
142 env->fpscr |= 0x11 << FPSCR_FPCC;
143 /* We must update the target FPR before raising the exception */
144 if (ve != 0) {
145 env->exception_index = POWERPC_EXCP_PROGRAM;
146 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
147 /* Update the floating-point enabled exception summary */
148 env->fpscr |= 1 << FPSCR_FEX;
149 /* Exception is differed */
150 ve = 0;
151 }
152 break;
153 case POWERPC_EXCP_FP_VXSQRT:
154 /* Square root of a negative number */
155 env->fpscr |= 1 << FPSCR_VXSQRT;
156 update_arith:
157 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
158 if (ve == 0) {
159 /* Set the result to quiet NaN */
160 ret = 0x7FF8000000000000ULL;
161 env->fpscr &= ~(0xF << FPSCR_FPCC);
162 env->fpscr |= 0x11 << FPSCR_FPCC;
163 }
164 break;
165 case POWERPC_EXCP_FP_VXCVI:
166 /* Invalid conversion */
167 env->fpscr |= 1 << FPSCR_VXCVI;
168 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
169 if (ve == 0) {
170 /* Set the result to quiet NaN */
171 ret = 0x7FF8000000000000ULL;
172 env->fpscr &= ~(0xF << FPSCR_FPCC);
173 env->fpscr |= 0x11 << FPSCR_FPCC;
174 }
175 break;
176 }
177 /* Update the floating-point invalid operation summary */
178 env->fpscr |= 1 << FPSCR_VX;
179 /* Update the floating-point exception summary */
180 env->fpscr |= 1 << FPSCR_FX;
181 if (ve != 0) {
182 /* Update the floating-point enabled exception summary */
183 env->fpscr |= 1 << FPSCR_FEX;
184 if (msr_fe0 != 0 || msr_fe1 != 0) {
185 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
186 POWERPC_EXCP_FP | op);
187 }
188 }
189 return ret;
190 }
191
192 static inline void float_zero_divide_excp(CPUPPCState *env)
193 {
194 env->fpscr |= 1 << FPSCR_ZX;
195 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
196 /* Update the floating-point exception summary */
197 env->fpscr |= 1 << FPSCR_FX;
198 if (fpscr_ze != 0) {
199 /* Update the floating-point enabled exception summary */
200 env->fpscr |= 1 << FPSCR_FEX;
201 if (msr_fe0 != 0 || msr_fe1 != 0) {
202 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
203 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
204 }
205 }
206 }
207
208 static inline void float_overflow_excp(CPUPPCState *env)
209 {
210 env->fpscr |= 1 << FPSCR_OX;
211 /* Update the floating-point exception summary */
212 env->fpscr |= 1 << FPSCR_FX;
213 if (fpscr_oe != 0) {
214 /* XXX: should adjust the result */
215 /* Update the floating-point enabled exception summary */
216 env->fpscr |= 1 << FPSCR_FEX;
217 /* We must update the target FPR before raising the exception */
218 env->exception_index = POWERPC_EXCP_PROGRAM;
219 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
220 } else {
221 env->fpscr |= 1 << FPSCR_XX;
222 env->fpscr |= 1 << FPSCR_FI;
223 }
224 }
225
226 static inline void float_underflow_excp(CPUPPCState *env)
227 {
228 env->fpscr |= 1 << FPSCR_UX;
229 /* Update the floating-point exception summary */
230 env->fpscr |= 1 << FPSCR_FX;
231 if (fpscr_ue != 0) {
232 /* XXX: should adjust the result */
233 /* Update the floating-point enabled exception summary */
234 env->fpscr |= 1 << FPSCR_FEX;
235 /* We must update the target FPR before raising the exception */
236 env->exception_index = POWERPC_EXCP_PROGRAM;
237 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
238 }
239 }
240
241 static inline void float_inexact_excp(CPUPPCState *env)
242 {
243 env->fpscr |= 1 << FPSCR_XX;
244 /* Update the floating-point exception summary */
245 env->fpscr |= 1 << FPSCR_FX;
246 if (fpscr_xe != 0) {
247 /* Update the floating-point enabled exception summary */
248 env->fpscr |= 1 << FPSCR_FEX;
249 /* We must update the target FPR before raising the exception */
250 env->exception_index = POWERPC_EXCP_PROGRAM;
251 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
252 }
253 }
254
255 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
256 {
257 int rnd_type;
258
259 /* Set rounding mode */
260 switch (fpscr_rn) {
261 case 0:
262 /* Best approximation (round to nearest) */
263 rnd_type = float_round_nearest_even;
264 break;
265 case 1:
266 /* Smaller magnitude (round toward zero) */
267 rnd_type = float_round_to_zero;
268 break;
269 case 2:
270 /* Round toward +infinite */
271 rnd_type = float_round_up;
272 break;
273 default:
274 case 3:
275 /* Round toward -infinite */
276 rnd_type = float_round_down;
277 break;
278 }
279 set_float_rounding_mode(rnd_type, &env->fp_status);
280 }
281
282 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
283 {
284 int prev;
285
286 prev = (env->fpscr >> bit) & 1;
287 env->fpscr &= ~(1 << bit);
288 if (prev == 1) {
289 switch (bit) {
290 case FPSCR_RN1:
291 case FPSCR_RN:
292 fpscr_set_rounding_mode(env);
293 break;
294 default:
295 break;
296 }
297 }
298 }
299
300 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
301 {
302 int prev;
303
304 prev = (env->fpscr >> bit) & 1;
305 env->fpscr |= 1 << bit;
306 if (prev == 0) {
307 switch (bit) {
308 case FPSCR_VX:
309 env->fpscr |= 1 << FPSCR_FX;
310 if (fpscr_ve) {
311 goto raise_ve;
312 }
313 break;
314 case FPSCR_OX:
315 env->fpscr |= 1 << FPSCR_FX;
316 if (fpscr_oe) {
317 goto raise_oe;
318 }
319 break;
320 case FPSCR_UX:
321 env->fpscr |= 1 << FPSCR_FX;
322 if (fpscr_ue) {
323 goto raise_ue;
324 }
325 break;
326 case FPSCR_ZX:
327 env->fpscr |= 1 << FPSCR_FX;
328 if (fpscr_ze) {
329 goto raise_ze;
330 }
331 break;
332 case FPSCR_XX:
333 env->fpscr |= 1 << FPSCR_FX;
334 if (fpscr_xe) {
335 goto raise_xe;
336 }
337 break;
338 case FPSCR_VXSNAN:
339 case FPSCR_VXISI:
340 case FPSCR_VXIDI:
341 case FPSCR_VXZDZ:
342 case FPSCR_VXIMZ:
343 case FPSCR_VXVC:
344 case FPSCR_VXSOFT:
345 case FPSCR_VXSQRT:
346 case FPSCR_VXCVI:
347 env->fpscr |= 1 << FPSCR_VX;
348 env->fpscr |= 1 << FPSCR_FX;
349 if (fpscr_ve != 0) {
350 goto raise_ve;
351 }
352 break;
353 case FPSCR_VE:
354 if (fpscr_vx != 0) {
355 raise_ve:
356 env->error_code = POWERPC_EXCP_FP;
357 if (fpscr_vxsnan) {
358 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
359 }
360 if (fpscr_vxisi) {
361 env->error_code |= POWERPC_EXCP_FP_VXISI;
362 }
363 if (fpscr_vxidi) {
364 env->error_code |= POWERPC_EXCP_FP_VXIDI;
365 }
366 if (fpscr_vxzdz) {
367 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
368 }
369 if (fpscr_vximz) {
370 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
371 }
372 if (fpscr_vxvc) {
373 env->error_code |= POWERPC_EXCP_FP_VXVC;
374 }
375 if (fpscr_vxsoft) {
376 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
377 }
378 if (fpscr_vxsqrt) {
379 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
380 }
381 if (fpscr_vxcvi) {
382 env->error_code |= POWERPC_EXCP_FP_VXCVI;
383 }
384 goto raise_excp;
385 }
386 break;
387 case FPSCR_OE:
388 if (fpscr_ox != 0) {
389 raise_oe:
390 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
391 goto raise_excp;
392 }
393 break;
394 case FPSCR_UE:
395 if (fpscr_ux != 0) {
396 raise_ue:
397 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
398 goto raise_excp;
399 }
400 break;
401 case FPSCR_ZE:
402 if (fpscr_zx != 0) {
403 raise_ze:
404 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
405 goto raise_excp;
406 }
407 break;
408 case FPSCR_XE:
409 if (fpscr_xx != 0) {
410 raise_xe:
411 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
412 goto raise_excp;
413 }
414 break;
415 case FPSCR_RN1:
416 case FPSCR_RN:
417 fpscr_set_rounding_mode(env);
418 break;
419 default:
420 break;
421 raise_excp:
422 /* Update the floating-point enabled exception summary */
423 env->fpscr |= 1 << FPSCR_FEX;
424 /* We have to update Rc1 before raising the exception */
425 env->exception_index = POWERPC_EXCP_PROGRAM;
426 break;
427 }
428 }
429 }
430
431 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
432 {
433 /*
434 * We use only the 32 LSB of the incoming fpr
435 */
436 uint32_t prev, new;
437 int i;
438
439 prev = env->fpscr;
440 new = (uint32_t)arg;
441 new &= ~0x60000000;
442 new |= prev & 0x60000000;
443 for (i = 0; i < 8; i++) {
444 if (mask & (1 << i)) {
445 env->fpscr &= ~(0xF << (4 * i));
446 env->fpscr |= new & (0xF << (4 * i));
447 }
448 }
449 /* Update VX and FEX */
450 if (fpscr_ix != 0) {
451 env->fpscr |= 1 << FPSCR_VX;
452 } else {
453 env->fpscr &= ~(1 << FPSCR_VX);
454 }
455 if ((fpscr_ex & fpscr_eex) != 0) {
456 env->fpscr |= 1 << FPSCR_FEX;
457 env->exception_index = POWERPC_EXCP_PROGRAM;
458 /* XXX: we should compute it properly */
459 env->error_code = POWERPC_EXCP_FP;
460 } else {
461 env->fpscr &= ~(1 << FPSCR_FEX);
462 }
463 fpscr_set_rounding_mode(env);
464 }
465
466 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
467 {
468 helper_store_fpscr(env, arg, mask);
469 }
470
471 void helper_float_check_status(CPUPPCState *env)
472 {
473 int status = get_float_exception_flags(&env->fp_status);
474
475 if (status & float_flag_divbyzero) {
476 float_zero_divide_excp(env);
477 } else if (status & float_flag_overflow) {
478 float_overflow_excp(env);
479 } else if (status & float_flag_underflow) {
480 float_underflow_excp(env);
481 } else if (status & float_flag_inexact) {
482 float_inexact_excp(env);
483 }
484
485 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
486 (env->error_code & POWERPC_EXCP_FP)) {
487 /* Differred floating-point exception after target FPR update */
488 if (msr_fe0 != 0 || msr_fe1 != 0) {
489 helper_raise_exception_err(env, env->exception_index,
490 env->error_code);
491 }
492 }
493 }
494
495 void helper_reset_fpstatus(CPUPPCState *env)
496 {
497 set_float_exception_flags(0, &env->fp_status);
498 }
499
500 /* fadd - fadd. */
501 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
502 {
503 CPU_DoubleU farg1, farg2;
504
505 farg1.ll = arg1;
506 farg2.ll = arg2;
507
508 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
509 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
510 /* Magnitude subtraction of infinities */
511 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
512 } else {
513 if (unlikely(float64_is_signaling_nan(farg1.d) ||
514 float64_is_signaling_nan(farg2.d))) {
515 /* sNaN addition */
516 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
517 }
518 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
519 }
520
521 return farg1.ll;
522 }
523
524 /* fsub - fsub. */
525 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
526 {
527 CPU_DoubleU farg1, farg2;
528
529 farg1.ll = arg1;
530 farg2.ll = arg2;
531
532 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
533 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
534 /* Magnitude subtraction of infinities */
535 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
536 } else {
537 if (unlikely(float64_is_signaling_nan(farg1.d) ||
538 float64_is_signaling_nan(farg2.d))) {
539 /* sNaN subtraction */
540 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
541 }
542 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
543 }
544
545 return farg1.ll;
546 }
547
548 /* fmul - fmul. */
549 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
550 {
551 CPU_DoubleU farg1, farg2;
552
553 farg1.ll = arg1;
554 farg2.ll = arg2;
555
556 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
557 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
558 /* Multiplication of zero by infinity */
559 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
560 } else {
561 if (unlikely(float64_is_signaling_nan(farg1.d) ||
562 float64_is_signaling_nan(farg2.d))) {
563 /* sNaN multiplication */
564 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
565 }
566 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
567 }
568
569 return farg1.ll;
570 }
571
572 /* fdiv - fdiv. */
573 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
574 {
575 CPU_DoubleU farg1, farg2;
576
577 farg1.ll = arg1;
578 farg2.ll = arg2;
579
580 if (unlikely(float64_is_infinity(farg1.d) &&
581 float64_is_infinity(farg2.d))) {
582 /* Division of infinity by infinity */
583 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI);
584 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
585 /* Division of zero by zero */
586 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ);
587 } else {
588 if (unlikely(float64_is_signaling_nan(farg1.d) ||
589 float64_is_signaling_nan(farg2.d))) {
590 /* sNaN division */
591 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
592 }
593 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
594 }
595
596 return farg1.ll;
597 }
598
599 /* fctiw - fctiw. */
600 uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg)
601 {
602 CPU_DoubleU farg;
603
604 farg.ll = arg;
605
606 if (unlikely(float64_is_signaling_nan(farg.d))) {
607 /* sNaN conversion */
608 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
609 POWERPC_EXCP_FP_VXCVI);
610 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
611 float64_is_infinity(farg.d))) {
612 /* qNan / infinity conversion */
613 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
614 } else {
615 farg.ll = float64_to_int32(farg.d, &env->fp_status);
616 /* XXX: higher bits are not supposed to be significant.
617 * to make tests easier, return the same as a real PowerPC 750
618 */
619 farg.ll |= 0xFFF80000ULL << 32;
620 }
621 return farg.ll;
622 }
623
624 /* fctiwz - fctiwz. */
625 uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg)
626 {
627 CPU_DoubleU farg;
628
629 farg.ll = arg;
630
631 if (unlikely(float64_is_signaling_nan(farg.d))) {
632 /* sNaN conversion */
633 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
634 POWERPC_EXCP_FP_VXCVI);
635 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
636 float64_is_infinity(farg.d))) {
637 /* qNan / infinity conversion */
638 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
639 } else {
640 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
641 /* XXX: higher bits are not supposed to be significant.
642 * to make tests easier, return the same as a real PowerPC 750
643 */
644 farg.ll |= 0xFFF80000ULL << 32;
645 }
646 return farg.ll;
647 }
648
649 #if defined(TARGET_PPC64)
650 /* fcfid - fcfid. */
651 uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg)
652 {
653 CPU_DoubleU farg;
654
655 farg.d = int64_to_float64(arg, &env->fp_status);
656 return farg.ll;
657 }
658
659 /* fctid - fctid. */
660 uint64_t helper_fctid(CPUPPCState *env, uint64_t arg)
661 {
662 CPU_DoubleU farg;
663
664 farg.ll = arg;
665
666 if (unlikely(float64_is_signaling_nan(farg.d))) {
667 /* sNaN conversion */
668 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
669 POWERPC_EXCP_FP_VXCVI);
670 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
671 float64_is_infinity(farg.d))) {
672 /* qNan / infinity conversion */
673 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
674 } else {
675 farg.ll = float64_to_int64(farg.d, &env->fp_status);
676 }
677 return farg.ll;
678 }
679
680 /* fctidz - fctidz. */
681 uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg)
682 {
683 CPU_DoubleU farg;
684
685 farg.ll = arg;
686
687 if (unlikely(float64_is_signaling_nan(farg.d))) {
688 /* sNaN conversion */
689 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
690 POWERPC_EXCP_FP_VXCVI);
691 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
692 float64_is_infinity(farg.d))) {
693 /* qNan / infinity conversion */
694 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
695 } else {
696 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
697 }
698 return farg.ll;
699 }
700
701 #endif
702
703 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
704 int rounding_mode)
705 {
706 CPU_DoubleU farg;
707
708 farg.ll = arg;
709
710 if (unlikely(float64_is_signaling_nan(farg.d))) {
711 /* sNaN round */
712 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
713 POWERPC_EXCP_FP_VXCVI);
714 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
715 float64_is_infinity(farg.d))) {
716 /* qNan / infinity round */
717 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
718 } else {
719 set_float_rounding_mode(rounding_mode, &env->fp_status);
720 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
721 /* Restore rounding mode from FPSCR */
722 fpscr_set_rounding_mode(env);
723 }
724 return farg.ll;
725 }
726
727 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
728 {
729 return do_fri(env, arg, float_round_nearest_even);
730 }
731
732 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
733 {
734 return do_fri(env, arg, float_round_to_zero);
735 }
736
737 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
738 {
739 return do_fri(env, arg, float_round_up);
740 }
741
742 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
743 {
744 return do_fri(env, arg, float_round_down);
745 }
746
747 /* fmadd - fmadd. */
748 uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
749 uint64_t arg3)
750 {
751 CPU_DoubleU farg1, farg2, farg3;
752
753 farg1.ll = arg1;
754 farg2.ll = arg2;
755 farg3.ll = arg3;
756
757 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
758 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
759 /* Multiplication of zero by infinity */
760 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
761 } else {
762 if (unlikely(float64_is_signaling_nan(farg1.d) ||
763 float64_is_signaling_nan(farg2.d) ||
764 float64_is_signaling_nan(farg3.d))) {
765 /* sNaN operation */
766 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
767 }
768 /* This is the way the PowerPC specification defines it */
769 float128 ft0_128, ft1_128;
770
771 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
772 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
773 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
774 if (unlikely(float128_is_infinity(ft0_128) &&
775 float64_is_infinity(farg3.d) &&
776 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
777 /* Magnitude subtraction of infinities */
778 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
779 } else {
780 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
781 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
782 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
783 }
784 }
785
786 return farg1.ll;
787 }
788
789 /* fmsub - fmsub. */
790 uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
791 uint64_t arg3)
792 {
793 CPU_DoubleU farg1, farg2, farg3;
794
795 farg1.ll = arg1;
796 farg2.ll = arg2;
797 farg3.ll = arg3;
798
799 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
800 (float64_is_zero(farg1.d) &&
801 float64_is_infinity(farg2.d)))) {
802 /* Multiplication of zero by infinity */
803 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
804 } else {
805 if (unlikely(float64_is_signaling_nan(farg1.d) ||
806 float64_is_signaling_nan(farg2.d) ||
807 float64_is_signaling_nan(farg3.d))) {
808 /* sNaN operation */
809 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
810 }
811 /* This is the way the PowerPC specification defines it */
812 float128 ft0_128, ft1_128;
813
814 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
815 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
816 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
817 if (unlikely(float128_is_infinity(ft0_128) &&
818 float64_is_infinity(farg3.d) &&
819 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
820 /* Magnitude subtraction of infinities */
821 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
822 } else {
823 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
824 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
825 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
826 }
827 }
828 return farg1.ll;
829 }
830
831 /* fnmadd - fnmadd. */
832 uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
833 uint64_t arg3)
834 {
835 CPU_DoubleU farg1, farg2, farg3;
836
837 farg1.ll = arg1;
838 farg2.ll = arg2;
839 farg3.ll = arg3;
840
841 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
842 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
843 /* Multiplication of zero by infinity */
844 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
845 } else {
846 if (unlikely(float64_is_signaling_nan(farg1.d) ||
847 float64_is_signaling_nan(farg2.d) ||
848 float64_is_signaling_nan(farg3.d))) {
849 /* sNaN operation */
850 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
851 }
852 /* This is the way the PowerPC specification defines it */
853 float128 ft0_128, ft1_128;
854
855 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
856 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
857 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
858 if (unlikely(float128_is_infinity(ft0_128) &&
859 float64_is_infinity(farg3.d) &&
860 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
861 /* Magnitude subtraction of infinities */
862 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
863 } else {
864 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
865 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
866 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
867 }
868 if (likely(!float64_is_any_nan(farg1.d))) {
869 farg1.d = float64_chs(farg1.d);
870 }
871 }
872 return farg1.ll;
873 }
874
875 /* fnmsub - fnmsub. */
876 uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
877 uint64_t arg3)
878 {
879 CPU_DoubleU farg1, farg2, farg3;
880
881 farg1.ll = arg1;
882 farg2.ll = arg2;
883 farg3.ll = arg3;
884
885 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
886 (float64_is_zero(farg1.d) &&
887 float64_is_infinity(farg2.d)))) {
888 /* Multiplication of zero by infinity */
889 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
890 } else {
891 if (unlikely(float64_is_signaling_nan(farg1.d) ||
892 float64_is_signaling_nan(farg2.d) ||
893 float64_is_signaling_nan(farg3.d))) {
894 /* sNaN operation */
895 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
896 }
897 /* This is the way the PowerPC specification defines it */
898 float128 ft0_128, ft1_128;
899
900 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
901 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
902 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
903 if (unlikely(float128_is_infinity(ft0_128) &&
904 float64_is_infinity(farg3.d) &&
905 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
906 /* Magnitude subtraction of infinities */
907 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
908 } else {
909 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
910 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
911 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
912 }
913 if (likely(!float64_is_any_nan(farg1.d))) {
914 farg1.d = float64_chs(farg1.d);
915 }
916 }
917 return farg1.ll;
918 }
919
920 /* frsp - frsp. */
921 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
922 {
923 CPU_DoubleU farg;
924 float32 f32;
925
926 farg.ll = arg;
927
928 if (unlikely(float64_is_signaling_nan(farg.d))) {
929 /* sNaN square root */
930 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
931 }
932 f32 = float64_to_float32(farg.d, &env->fp_status);
933 farg.d = float32_to_float64(f32, &env->fp_status);
934
935 return farg.ll;
936 }
937
938 /* fsqrt - fsqrt. */
939 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
940 {
941 CPU_DoubleU farg;
942
943 farg.ll = arg;
944
945 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
946 /* Square root of a negative nonzero number */
947 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT);
948 } else {
949 if (unlikely(float64_is_signaling_nan(farg.d))) {
950 /* sNaN square root */
951 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
952 }
953 farg.d = float64_sqrt(farg.d, &env->fp_status);
954 }
955 return farg.ll;
956 }
957
958 /* fre - fre. */
959 uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
960 {
961 CPU_DoubleU farg;
962
963 farg.ll = arg;
964
965 if (unlikely(float64_is_signaling_nan(farg.d))) {
966 /* sNaN reciprocal */
967 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
968 }
969 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
970 return farg.d;
971 }
972
973 /* fres - fres. */
974 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
975 {
976 CPU_DoubleU farg;
977 float32 f32;
978
979 farg.ll = arg;
980
981 if (unlikely(float64_is_signaling_nan(farg.d))) {
982 /* sNaN reciprocal */
983 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
984 }
985 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
986 f32 = float64_to_float32(farg.d, &env->fp_status);
987 farg.d = float32_to_float64(f32, &env->fp_status);
988
989 return farg.ll;
990 }
991
992 /* frsqrte - frsqrte. */
993 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
994 {
995 CPU_DoubleU farg;
996 float32 f32;
997
998 farg.ll = arg;
999
1000 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1001 /* Reciprocal square root of a negative nonzero number */
1002 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT);
1003 } else {
1004 if (unlikely(float64_is_signaling_nan(farg.d))) {
1005 /* sNaN reciprocal square root */
1006 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
1007 }
1008 farg.d = float64_sqrt(farg.d, &env->fp_status);
1009 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1010 f32 = float64_to_float32(farg.d, &env->fp_status);
1011 farg.d = float32_to_float64(f32, &env->fp_status);
1012 }
1013 return farg.ll;
1014 }
1015
1016 /* fsel - fsel. */
1017 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1018 uint64_t arg3)
1019 {
1020 CPU_DoubleU farg1;
1021
1022 farg1.ll = arg1;
1023
1024 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1025 !float64_is_any_nan(farg1.d)) {
1026 return arg2;
1027 } else {
1028 return arg3;
1029 }
1030 }
1031
1032 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1033 uint32_t crfD)
1034 {
1035 CPU_DoubleU farg1, farg2;
1036 uint32_t ret = 0;
1037
1038 farg1.ll = arg1;
1039 farg2.ll = arg2;
1040
1041 if (unlikely(float64_is_any_nan(farg1.d) ||
1042 float64_is_any_nan(farg2.d))) {
1043 ret = 0x01UL;
1044 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1045 ret = 0x08UL;
1046 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1047 ret = 0x04UL;
1048 } else {
1049 ret = 0x02UL;
1050 }
1051
1052 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1053 env->fpscr |= ret << FPSCR_FPRF;
1054 env->crf[crfD] = ret;
1055 if (unlikely(ret == 0x01UL
1056 && (float64_is_signaling_nan(farg1.d) ||
1057 float64_is_signaling_nan(farg2.d)))) {
1058 /* sNaN comparison */
1059 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
1060 }
1061 }
1062
1063 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1064 uint32_t crfD)
1065 {
1066 CPU_DoubleU farg1, farg2;
1067 uint32_t ret = 0;
1068
1069 farg1.ll = arg1;
1070 farg2.ll = arg2;
1071
1072 if (unlikely(float64_is_any_nan(farg1.d) ||
1073 float64_is_any_nan(farg2.d))) {
1074 ret = 0x01UL;
1075 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1076 ret = 0x08UL;
1077 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1078 ret = 0x04UL;
1079 } else {
1080 ret = 0x02UL;
1081 }
1082
1083 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1084 env->fpscr |= ret << FPSCR_FPRF;
1085 env->crf[crfD] = ret;
1086 if (unlikely(ret == 0x01UL)) {
1087 if (float64_is_signaling_nan(farg1.d) ||
1088 float64_is_signaling_nan(farg2.d)) {
1089 /* sNaN comparison */
1090 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1091 POWERPC_EXCP_FP_VXVC);
1092 } else {
1093 /* qNaN comparison */
1094 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC);
1095 }
1096 }
1097 }
1098
1099 /* Single-precision floating-point conversions */
1100 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1101 {
1102 CPU_FloatU u;
1103
1104 u.f = int32_to_float32(val, &env->vec_status);
1105
1106 return u.l;
1107 }
1108
1109 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1110 {
1111 CPU_FloatU u;
1112
1113 u.f = uint32_to_float32(val, &env->vec_status);
1114
1115 return u.l;
1116 }
1117
1118 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1119 {
1120 CPU_FloatU u;
1121
1122 u.l = val;
1123 /* NaN are not treated the same way IEEE 754 does */
1124 if (unlikely(float32_is_quiet_nan(u.f))) {
1125 return 0;
1126 }
1127
1128 return float32_to_int32(u.f, &env->vec_status);
1129 }
1130
1131 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1132 {
1133 CPU_FloatU u;
1134
1135 u.l = val;
1136 /* NaN are not treated the same way IEEE 754 does */
1137 if (unlikely(float32_is_quiet_nan(u.f))) {
1138 return 0;
1139 }
1140
1141 return float32_to_uint32(u.f, &env->vec_status);
1142 }
1143
1144 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1145 {
1146 CPU_FloatU u;
1147
1148 u.l = val;
1149 /* NaN are not treated the same way IEEE 754 does */
1150 if (unlikely(float32_is_quiet_nan(u.f))) {
1151 return 0;
1152 }
1153
1154 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1155 }
1156
1157 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1158 {
1159 CPU_FloatU u;
1160
1161 u.l = val;
1162 /* NaN are not treated the same way IEEE 754 does */
1163 if (unlikely(float32_is_quiet_nan(u.f))) {
1164 return 0;
1165 }
1166
1167 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1168 }
1169
1170 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1171 {
1172 CPU_FloatU u;
1173 float32 tmp;
1174
1175 u.f = int32_to_float32(val, &env->vec_status);
1176 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1177 u.f = float32_div(u.f, tmp, &env->vec_status);
1178
1179 return u.l;
1180 }
1181
1182 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1183 {
1184 CPU_FloatU u;
1185 float32 tmp;
1186
1187 u.f = uint32_to_float32(val, &env->vec_status);
1188 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1189 u.f = float32_div(u.f, tmp, &env->vec_status);
1190
1191 return u.l;
1192 }
1193
1194 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1195 {
1196 CPU_FloatU u;
1197 float32 tmp;
1198
1199 u.l = val;
1200 /* NaN are not treated the same way IEEE 754 does */
1201 if (unlikely(float32_is_quiet_nan(u.f))) {
1202 return 0;
1203 }
1204 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1205 u.f = float32_mul(u.f, tmp, &env->vec_status);
1206
1207 return float32_to_int32(u.f, &env->vec_status);
1208 }
1209
1210 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1211 {
1212 CPU_FloatU u;
1213 float32 tmp;
1214
1215 u.l = val;
1216 /* NaN are not treated the same way IEEE 754 does */
1217 if (unlikely(float32_is_quiet_nan(u.f))) {
1218 return 0;
1219 }
1220 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1221 u.f = float32_mul(u.f, tmp, &env->vec_status);
1222
1223 return float32_to_uint32(u.f, &env->vec_status);
1224 }
1225
1226 #define HELPER_SPE_SINGLE_CONV(name) \
1227 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1228 { \
1229 return e##name(env, val); \
1230 }
1231 /* efscfsi */
1232 HELPER_SPE_SINGLE_CONV(fscfsi);
1233 /* efscfui */
1234 HELPER_SPE_SINGLE_CONV(fscfui);
1235 /* efscfuf */
1236 HELPER_SPE_SINGLE_CONV(fscfuf);
1237 /* efscfsf */
1238 HELPER_SPE_SINGLE_CONV(fscfsf);
1239 /* efsctsi */
1240 HELPER_SPE_SINGLE_CONV(fsctsi);
1241 /* efsctui */
1242 HELPER_SPE_SINGLE_CONV(fsctui);
1243 /* efsctsiz */
1244 HELPER_SPE_SINGLE_CONV(fsctsiz);
1245 /* efsctuiz */
1246 HELPER_SPE_SINGLE_CONV(fsctuiz);
1247 /* efsctsf */
1248 HELPER_SPE_SINGLE_CONV(fsctsf);
1249 /* efsctuf */
1250 HELPER_SPE_SINGLE_CONV(fsctuf);
1251
1252 #define HELPER_SPE_VECTOR_CONV(name) \
1253 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1254 { \
1255 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1256 (uint64_t)e##name(env, val); \
1257 }
1258 /* evfscfsi */
1259 HELPER_SPE_VECTOR_CONV(fscfsi);
1260 /* evfscfui */
1261 HELPER_SPE_VECTOR_CONV(fscfui);
1262 /* evfscfuf */
1263 HELPER_SPE_VECTOR_CONV(fscfuf);
1264 /* evfscfsf */
1265 HELPER_SPE_VECTOR_CONV(fscfsf);
1266 /* evfsctsi */
1267 HELPER_SPE_VECTOR_CONV(fsctsi);
1268 /* evfsctui */
1269 HELPER_SPE_VECTOR_CONV(fsctui);
1270 /* evfsctsiz */
1271 HELPER_SPE_VECTOR_CONV(fsctsiz);
1272 /* evfsctuiz */
1273 HELPER_SPE_VECTOR_CONV(fsctuiz);
1274 /* evfsctsf */
1275 HELPER_SPE_VECTOR_CONV(fsctsf);
1276 /* evfsctuf */
1277 HELPER_SPE_VECTOR_CONV(fsctuf);
1278
1279 /* Single-precision floating-point arithmetic */
1280 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1281 {
1282 CPU_FloatU u1, u2;
1283
1284 u1.l = op1;
1285 u2.l = op2;
1286 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1287 return u1.l;
1288 }
1289
1290 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1291 {
1292 CPU_FloatU u1, u2;
1293
1294 u1.l = op1;
1295 u2.l = op2;
1296 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1297 return u1.l;
1298 }
1299
1300 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1301 {
1302 CPU_FloatU u1, u2;
1303
1304 u1.l = op1;
1305 u2.l = op2;
1306 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1307 return u1.l;
1308 }
1309
1310 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1311 {
1312 CPU_FloatU u1, u2;
1313
1314 u1.l = op1;
1315 u2.l = op2;
1316 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1317 return u1.l;
1318 }
1319
1320 #define HELPER_SPE_SINGLE_ARITH(name) \
1321 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1322 { \
1323 return e##name(env, op1, op2); \
1324 }
1325 /* efsadd */
1326 HELPER_SPE_SINGLE_ARITH(fsadd);
1327 /* efssub */
1328 HELPER_SPE_SINGLE_ARITH(fssub);
1329 /* efsmul */
1330 HELPER_SPE_SINGLE_ARITH(fsmul);
1331 /* efsdiv */
1332 HELPER_SPE_SINGLE_ARITH(fsdiv);
1333
1334 #define HELPER_SPE_VECTOR_ARITH(name) \
1335 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1336 { \
1337 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1338 (uint64_t)e##name(env, op1, op2); \
1339 }
1340 /* evfsadd */
1341 HELPER_SPE_VECTOR_ARITH(fsadd);
1342 /* evfssub */
1343 HELPER_SPE_VECTOR_ARITH(fssub);
1344 /* evfsmul */
1345 HELPER_SPE_VECTOR_ARITH(fsmul);
1346 /* evfsdiv */
1347 HELPER_SPE_VECTOR_ARITH(fsdiv);
1348
1349 /* Single-precision floating-point comparisons */
1350 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1351 {
1352 CPU_FloatU u1, u2;
1353
1354 u1.l = op1;
1355 u2.l = op2;
1356 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1357 }
1358
1359 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1360 {
1361 CPU_FloatU u1, u2;
1362
1363 u1.l = op1;
1364 u2.l = op2;
1365 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1366 }
1367
1368 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1369 {
1370 CPU_FloatU u1, u2;
1371
1372 u1.l = op1;
1373 u2.l = op2;
1374 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1375 }
1376
1377 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1378 {
1379 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1380 return efscmplt(env, op1, op2);
1381 }
1382
1383 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1384 {
1385 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1386 return efscmpgt(env, op1, op2);
1387 }
1388
1389 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1390 {
1391 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1392 return efscmpeq(env, op1, op2);
1393 }
1394
1395 #define HELPER_SINGLE_SPE_CMP(name) \
1396 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1397 { \
1398 return e##name(env, op1, op2) << 2; \
1399 }
1400 /* efststlt */
1401 HELPER_SINGLE_SPE_CMP(fststlt);
1402 /* efststgt */
1403 HELPER_SINGLE_SPE_CMP(fststgt);
1404 /* efststeq */
1405 HELPER_SINGLE_SPE_CMP(fststeq);
1406 /* efscmplt */
1407 HELPER_SINGLE_SPE_CMP(fscmplt);
1408 /* efscmpgt */
1409 HELPER_SINGLE_SPE_CMP(fscmpgt);
1410 /* efscmpeq */
1411 HELPER_SINGLE_SPE_CMP(fscmpeq);
1412
1413 static inline uint32_t evcmp_merge(int t0, int t1)
1414 {
1415 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1416 }
1417
1418 #define HELPER_VECTOR_SPE_CMP(name) \
1419 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1420 { \
1421 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1422 e##name(env, op1, op2)); \
1423 }
1424 /* evfststlt */
1425 HELPER_VECTOR_SPE_CMP(fststlt);
1426 /* evfststgt */
1427 HELPER_VECTOR_SPE_CMP(fststgt);
1428 /* evfststeq */
1429 HELPER_VECTOR_SPE_CMP(fststeq);
1430 /* evfscmplt */
1431 HELPER_VECTOR_SPE_CMP(fscmplt);
1432 /* evfscmpgt */
1433 HELPER_VECTOR_SPE_CMP(fscmpgt);
1434 /* evfscmpeq */
1435 HELPER_VECTOR_SPE_CMP(fscmpeq);
1436
1437 /* Double-precision floating-point conversion */
1438 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1439 {
1440 CPU_DoubleU u;
1441
1442 u.d = int32_to_float64(val, &env->vec_status);
1443
1444 return u.ll;
1445 }
1446
1447 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1448 {
1449 CPU_DoubleU u;
1450
1451 u.d = int64_to_float64(val, &env->vec_status);
1452
1453 return u.ll;
1454 }
1455
1456 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1457 {
1458 CPU_DoubleU u;
1459
1460 u.d = uint32_to_float64(val, &env->vec_status);
1461
1462 return u.ll;
1463 }
1464
1465 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1466 {
1467 CPU_DoubleU u;
1468
1469 u.d = uint64_to_float64(val, &env->vec_status);
1470
1471 return u.ll;
1472 }
1473
1474 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1475 {
1476 CPU_DoubleU u;
1477
1478 u.ll = val;
1479 /* NaN are not treated the same way IEEE 754 does */
1480 if (unlikely(float64_is_any_nan(u.d))) {
1481 return 0;
1482 }
1483
1484 return float64_to_int32(u.d, &env->vec_status);
1485 }
1486
1487 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1488 {
1489 CPU_DoubleU u;
1490
1491 u.ll = val;
1492 /* NaN are not treated the same way IEEE 754 does */
1493 if (unlikely(float64_is_any_nan(u.d))) {
1494 return 0;
1495 }
1496
1497 return float64_to_uint32(u.d, &env->vec_status);
1498 }
1499
1500 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1501 {
1502 CPU_DoubleU u;
1503
1504 u.ll = val;
1505 /* NaN are not treated the same way IEEE 754 does */
1506 if (unlikely(float64_is_any_nan(u.d))) {
1507 return 0;
1508 }
1509
1510 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1511 }
1512
1513 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1514 {
1515 CPU_DoubleU u;
1516
1517 u.ll = val;
1518 /* NaN are not treated the same way IEEE 754 does */
1519 if (unlikely(float64_is_any_nan(u.d))) {
1520 return 0;
1521 }
1522
1523 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1524 }
1525
1526 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1527 {
1528 CPU_DoubleU u;
1529
1530 u.ll = val;
1531 /* NaN are not treated the same way IEEE 754 does */
1532 if (unlikely(float64_is_any_nan(u.d))) {
1533 return 0;
1534 }
1535
1536 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1537 }
1538
1539 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1540 {
1541 CPU_DoubleU u;
1542
1543 u.ll = val;
1544 /* NaN are not treated the same way IEEE 754 does */
1545 if (unlikely(float64_is_any_nan(u.d))) {
1546 return 0;
1547 }
1548
1549 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1550 }
1551
1552 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1553 {
1554 CPU_DoubleU u;
1555 float64 tmp;
1556
1557 u.d = int32_to_float64(val, &env->vec_status);
1558 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1559 u.d = float64_div(u.d, tmp, &env->vec_status);
1560
1561 return u.ll;
1562 }
1563
1564 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1565 {
1566 CPU_DoubleU u;
1567 float64 tmp;
1568
1569 u.d = uint32_to_float64(val, &env->vec_status);
1570 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1571 u.d = float64_div(u.d, tmp, &env->vec_status);
1572
1573 return u.ll;
1574 }
1575
1576 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1577 {
1578 CPU_DoubleU u;
1579 float64 tmp;
1580
1581 u.ll = val;
1582 /* NaN are not treated the same way IEEE 754 does */
1583 if (unlikely(float64_is_any_nan(u.d))) {
1584 return 0;
1585 }
1586 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1587 u.d = float64_mul(u.d, tmp, &env->vec_status);
1588
1589 return float64_to_int32(u.d, &env->vec_status);
1590 }
1591
1592 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1593 {
1594 CPU_DoubleU u;
1595 float64 tmp;
1596
1597 u.ll = val;
1598 /* NaN are not treated the same way IEEE 754 does */
1599 if (unlikely(float64_is_any_nan(u.d))) {
1600 return 0;
1601 }
1602 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1603 u.d = float64_mul(u.d, tmp, &env->vec_status);
1604
1605 return float64_to_uint32(u.d, &env->vec_status);
1606 }
1607
1608 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1609 {
1610 CPU_DoubleU u1;
1611 CPU_FloatU u2;
1612
1613 u1.ll = val;
1614 u2.f = float64_to_float32(u1.d, &env->vec_status);
1615
1616 return u2.l;
1617 }
1618
1619 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1620 {
1621 CPU_DoubleU u2;
1622 CPU_FloatU u1;
1623
1624 u1.l = val;
1625 u2.d = float32_to_float64(u1.f, &env->vec_status);
1626
1627 return u2.ll;
1628 }
1629
1630 /* Double precision fixed-point arithmetic */
1631 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1632 {
1633 CPU_DoubleU u1, u2;
1634
1635 u1.ll = op1;
1636 u2.ll = op2;
1637 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1638 return u1.ll;
1639 }
1640
1641 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1642 {
1643 CPU_DoubleU u1, u2;
1644
1645 u1.ll = op1;
1646 u2.ll = op2;
1647 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1648 return u1.ll;
1649 }
1650
1651 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1652 {
1653 CPU_DoubleU u1, u2;
1654
1655 u1.ll = op1;
1656 u2.ll = op2;
1657 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1658 return u1.ll;
1659 }
1660
1661 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1662 {
1663 CPU_DoubleU u1, u2;
1664
1665 u1.ll = op1;
1666 u2.ll = op2;
1667 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1668 return u1.ll;
1669 }
1670
1671 /* Double precision floating point helpers */
1672 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1673 {
1674 CPU_DoubleU u1, u2;
1675
1676 u1.ll = op1;
1677 u2.ll = op2;
1678 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1679 }
1680
1681 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1682 {
1683 CPU_DoubleU u1, u2;
1684
1685 u1.ll = op1;
1686 u2.ll = op2;
1687 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1688 }
1689
1690 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1691 {
1692 CPU_DoubleU u1, u2;
1693
1694 u1.ll = op1;
1695 u2.ll = op2;
1696 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1697 }
1698
1699 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1700 {
1701 /* XXX: TODO: test special values (NaN, infinites, ...) */
1702 return helper_efdtstlt(env, op1, op2);
1703 }
1704
1705 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1706 {
1707 /* XXX: TODO: test special values (NaN, infinites, ...) */
1708 return helper_efdtstgt(env, op1, op2);
1709 }
1710
1711 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1712 {
1713 /* XXX: TODO: test special values (NaN, infinites, ...) */
1714 return helper_efdtsteq(env, op1, op2);
1715 }