2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licenced under the GNU GPL v2.
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
21 static float_status neon_float_status
;
22 #define NFS &neon_float_status
24 /* Helper routines to perform bitwise copies between float and int. */
25 static inline float32
vfp_itos(uint32_t i
)
36 static inline uint32_t vfp_stoi(float32 s
)
47 #define NEON_TYPE1(name, type) \
52 #ifdef HOST_WORDS_BIGENDIAN
53 #define NEON_TYPE2(name, type) \
59 #define NEON_TYPE4(name, type) \
68 #define NEON_TYPE2(name, type) \
74 #define NEON_TYPE4(name, type) \
84 NEON_TYPE4(s8
, int8_t)
85 NEON_TYPE4(u8
, uint8_t)
86 NEON_TYPE2(s16
, int16_t)
87 NEON_TYPE2(u16
, uint16_t)
88 NEON_TYPE1(s32
, int32_t)
89 NEON_TYPE1(u32
, uint32_t)
94 /* Copy from a uint32_t to a vector structure type. */
95 #define NEON_UNPACK(vtype, dest, val) do { \
104 /* Copy from a vector structure type to a uint32_t. */
105 #define NEON_PACK(vtype, dest, val) do { \
115 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
117 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
118 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
120 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
121 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
122 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
123 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
125 #define NEON_VOP_BODY(vtype, n) \
131 NEON_UNPACK(vtype, vsrc1, arg1); \
132 NEON_UNPACK(vtype, vsrc2, arg2); \
134 NEON_PACK(vtype, res, vdest); \
138 #define NEON_VOP(name, vtype, n) \
139 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
140 NEON_VOP_BODY(vtype, n)
142 #define NEON_VOP_ENV(name, vtype, n) \
143 uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
144 NEON_VOP_BODY(vtype, n)
146 /* Pairwise operations. */
147 /* For 32-bit elements each segment only contains a single element, so
148 the elementwise and pairwise operations are the same. */
150 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
151 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
153 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
154 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
155 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
156 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
158 #define NEON_POP(name, vtype, n) \
159 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
165 NEON_UNPACK(vtype, vsrc1, arg1); \
166 NEON_UNPACK(vtype, vsrc2, arg2); \
168 NEON_PACK(vtype, res, vdest); \
172 /* Unary operators. */
173 #define NEON_VOP1(name, vtype, n) \
174 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
178 NEON_UNPACK(vtype, vsrc1, arg); \
180 NEON_PACK(vtype, arg, vdest); \
185 #define NEON_USAT(dest, src1, src2, type) do { \
186 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
187 if (tmp != (type)tmp) { \
193 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
194 NEON_VOP_ENV(qadd_u8
, neon_u8
, 4)
196 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
197 NEON_VOP_ENV(qadd_u16
, neon_u16
, 2)
201 uint32_t HELPER(neon_qadd_u32
)(CPUState
*env
, uint32_t a
, uint32_t b
)
203 uint32_t res
= a
+ b
;
211 uint64_t HELPER(neon_qadd_u64
)(CPUState
*env
, uint64_t src1
, uint64_t src2
)
223 #define NEON_SSAT(dest, src1, src2, type) do { \
224 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
225 if (tmp != (type)tmp) { \
228 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
230 tmp = 1 << (sizeof(type) * 8 - 1); \
235 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
236 NEON_VOP_ENV(qadd_s8
, neon_s8
, 4)
238 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
239 NEON_VOP_ENV(qadd_s16
, neon_s16
, 2)
243 uint32_t HELPER(neon_qadd_s32
)(CPUState
*env
, uint32_t a
, uint32_t b
)
245 uint32_t res
= a
+ b
;
246 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
248 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
253 uint64_t HELPER(neon_qadd_s64
)(CPUState
*env
, uint64_t src1
, uint64_t src2
)
258 if (((res
^ src1
) & SIGNBIT64
) && !((src1
^ src2
) & SIGNBIT64
)) {
260 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
265 #define NEON_USAT(dest, src1, src2, type) do { \
266 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
267 if (tmp != (type)tmp) { \
273 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
274 NEON_VOP_ENV(qsub_u8
, neon_u8
, 4)
276 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
277 NEON_VOP_ENV(qsub_u16
, neon_u16
, 2)
281 uint32_t HELPER(neon_qsub_u32
)(CPUState
*env
, uint32_t a
, uint32_t b
)
283 uint32_t res
= a
- b
;
291 uint64_t HELPER(neon_qsub_u64
)(CPUState
*env
, uint64_t src1
, uint64_t src2
)
304 #define NEON_SSAT(dest, src1, src2, type) do { \
305 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
306 if (tmp != (type)tmp) { \
309 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
311 tmp = 1 << (sizeof(type) * 8 - 1); \
316 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
317 NEON_VOP_ENV(qsub_s8
, neon_s8
, 4)
319 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
320 NEON_VOP_ENV(qsub_s16
, neon_s16
, 2)
324 uint32_t HELPER(neon_qsub_s32
)(CPUState
*env
, uint32_t a
, uint32_t b
)
326 uint32_t res
= a
- b
;
327 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
329 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
334 uint64_t HELPER(neon_qsub_s64
)(CPUState
*env
, uint64_t src1
, uint64_t src2
)
339 if (((res
^ src1
) & SIGNBIT64
) && ((src1
^ src2
) & SIGNBIT64
)) {
341 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
346 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
347 NEON_VOP(hadd_s8
, neon_s8
, 4)
348 NEON_VOP(hadd_u8
, neon_u8
, 4)
349 NEON_VOP(hadd_s16
, neon_s16
, 2)
350 NEON_VOP(hadd_u16
, neon_u16
, 2)
353 int32_t HELPER(neon_hadd_s32
)(int32_t src1
, int32_t src2
)
357 dest
= (src1
>> 1) + (src2
>> 1);
363 uint32_t HELPER(neon_hadd_u32
)(uint32_t src1
, uint32_t src2
)
367 dest
= (src1
>> 1) + (src2
>> 1);
373 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
374 NEON_VOP(rhadd_s8
, neon_s8
, 4)
375 NEON_VOP(rhadd_u8
, neon_u8
, 4)
376 NEON_VOP(rhadd_s16
, neon_s16
, 2)
377 NEON_VOP(rhadd_u16
, neon_u16
, 2)
380 int32_t HELPER(neon_rhadd_s32
)(int32_t src1
, int32_t src2
)
384 dest
= (src1
>> 1) + (src2
>> 1);
385 if ((src1
| src2
) & 1)
390 uint32_t HELPER(neon_rhadd_u32
)(uint32_t src1
, uint32_t src2
)
394 dest
= (src1
>> 1) + (src2
>> 1);
395 if ((src1
| src2
) & 1)
400 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
401 NEON_VOP(hsub_s8
, neon_s8
, 4)
402 NEON_VOP(hsub_u8
, neon_u8
, 4)
403 NEON_VOP(hsub_s16
, neon_s16
, 2)
404 NEON_VOP(hsub_u16
, neon_u16
, 2)
407 int32_t HELPER(neon_hsub_s32
)(int32_t src1
, int32_t src2
)
411 dest
= (src1
>> 1) - (src2
>> 1);
412 if ((~src1
) & src2
& 1)
417 uint32_t HELPER(neon_hsub_u32
)(uint32_t src1
, uint32_t src2
)
421 dest
= (src1
>> 1) - (src2
>> 1);
422 if ((~src1
) & src2
& 1)
427 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
428 NEON_VOP(cgt_s8
, neon_s8
, 4)
429 NEON_VOP(cgt_u8
, neon_u8
, 4)
430 NEON_VOP(cgt_s16
, neon_s16
, 2)
431 NEON_VOP(cgt_u16
, neon_u16
, 2)
432 NEON_VOP(cgt_s32
, neon_s32
, 1)
433 NEON_VOP(cgt_u32
, neon_u32
, 1)
436 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
437 NEON_VOP(cge_s8
, neon_s8
, 4)
438 NEON_VOP(cge_u8
, neon_u8
, 4)
439 NEON_VOP(cge_s16
, neon_s16
, 2)
440 NEON_VOP(cge_u16
, neon_u16
, 2)
441 NEON_VOP(cge_s32
, neon_s32
, 1)
442 NEON_VOP(cge_u32
, neon_u32
, 1)
445 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
446 NEON_VOP(min_s8
, neon_s8
, 4)
447 NEON_VOP(min_u8
, neon_u8
, 4)
448 NEON_VOP(min_s16
, neon_s16
, 2)
449 NEON_VOP(min_u16
, neon_u16
, 2)
450 NEON_VOP(min_s32
, neon_s32
, 1)
451 NEON_VOP(min_u32
, neon_u32
, 1)
452 NEON_POP(pmin_s8
, neon_s8
, 4)
453 NEON_POP(pmin_u8
, neon_u8
, 4)
454 NEON_POP(pmin_s16
, neon_s16
, 2)
455 NEON_POP(pmin_u16
, neon_u16
, 2)
458 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
459 NEON_VOP(max_s8
, neon_s8
, 4)
460 NEON_VOP(max_u8
, neon_u8
, 4)
461 NEON_VOP(max_s16
, neon_s16
, 2)
462 NEON_VOP(max_u16
, neon_u16
, 2)
463 NEON_VOP(max_s32
, neon_s32
, 1)
464 NEON_VOP(max_u32
, neon_u32
, 1)
465 NEON_POP(pmax_s8
, neon_s8
, 4)
466 NEON_POP(pmax_u8
, neon_u8
, 4)
467 NEON_POP(pmax_s16
, neon_s16
, 2)
468 NEON_POP(pmax_u16
, neon_u16
, 2)
471 #define NEON_FN(dest, src1, src2) \
472 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
473 NEON_VOP(abd_s8
, neon_s8
, 4)
474 NEON_VOP(abd_u8
, neon_u8
, 4)
475 NEON_VOP(abd_s16
, neon_s16
, 2)
476 NEON_VOP(abd_u16
, neon_u16
, 2)
477 NEON_VOP(abd_s32
, neon_s32
, 1)
478 NEON_VOP(abd_u32
, neon_u32
, 1)
481 #define NEON_FN(dest, src1, src2) do { \
483 tmp = (int8_t)src2; \
484 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
485 tmp <= -(ssize_t)sizeof(src1) * 8) { \
487 } else if (tmp < 0) { \
488 dest = src1 >> -tmp; \
490 dest = src1 << tmp; \
492 NEON_VOP(shl_u8
, neon_u8
, 4)
493 NEON_VOP(shl_u16
, neon_u16
, 2)
494 NEON_VOP(shl_u32
, neon_u32
, 1)
497 uint64_t HELPER(neon_shl_u64
)(uint64_t val
, uint64_t shiftop
)
499 int8_t shift
= (int8_t)shiftop
;
500 if (shift
>= 64 || shift
<= -64) {
502 } else if (shift
< 0) {
510 #define NEON_FN(dest, src1, src2) do { \
512 tmp = (int8_t)src2; \
513 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
515 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
516 dest = src1 >> (sizeof(src1) * 8 - 1); \
517 } else if (tmp < 0) { \
518 dest = src1 >> -tmp; \
520 dest = src1 << tmp; \
522 NEON_VOP(shl_s8
, neon_s8
, 4)
523 NEON_VOP(shl_s16
, neon_s16
, 2)
524 NEON_VOP(shl_s32
, neon_s32
, 1)
527 uint64_t HELPER(neon_shl_s64
)(uint64_t valop
, uint64_t shiftop
)
529 int8_t shift
= (int8_t)shiftop
;
533 } else if (shift
<= -64) {
535 } else if (shift
< 0) {
543 #define NEON_FN(dest, src1, src2) do { \
545 tmp = (int8_t)src2; \
546 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
548 } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
549 dest = src1 >> (sizeof(src1) * 8 - 1); \
550 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
551 dest = src1 >> (tmp - 1); \
554 } else if (tmp < 0) { \
555 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
557 dest = src1 << tmp; \
559 NEON_VOP(rshl_s8
, neon_s8
, 4)
560 NEON_VOP(rshl_s16
, neon_s16
, 2)
563 /* The addition of the rounding constant may overflow, so we use an
564 * intermediate 64 bits accumulator. */
565 uint32_t HELPER(neon_rshl_s32
)(uint32_t valop
, uint32_t shiftop
)
568 int32_t val
= (int32_t)valop
;
569 int8_t shift
= (int8_t)shiftop
;
570 if ((shift
>= 32) || (shift
<= -32)) {
572 } else if (shift
< 0) {
573 int64_t big_dest
= ((int64_t)val
+ (1 << (-1 - shift
)));
574 dest
= big_dest
>> -shift
;
581 /* Handling addition overflow with 64 bits inputs values is more
582 * tricky than with 32 bits values. */
583 uint64_t HELPER(neon_rshl_s64
)(uint64_t valop
, uint64_t shiftop
)
585 int8_t shift
= (int8_t)shiftop
;
589 } else if (shift
< -64) {
591 } else if (shift
== -63) {
595 } else if (shift
< 0) {
596 val
>>= (-shift
- 1);
597 if (val
== INT64_MAX
) {
598 /* In this case, it means that the rounding constant is 1,
599 * and the addition would overflow. Return the actual
600 * result directly. */
601 val
= 0x4000000000000000LL
;
612 #define NEON_FN(dest, src1, src2) do { \
614 tmp = (int8_t)src2; \
615 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
616 tmp < -(ssize_t)sizeof(src1) * 8) { \
618 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
619 dest = src1 >> (tmp - 1); \
620 } else if (tmp < 0) { \
621 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
623 dest = src1 << tmp; \
625 NEON_VOP(rshl_u8
, neon_u8
, 4)
626 NEON_VOP(rshl_u16
, neon_u16
, 2)
629 /* The addition of the rounding constant may overflow, so we use an
630 * intermediate 64 bits accumulator. */
631 uint32_t HELPER(neon_rshl_u32
)(uint32_t val
, uint32_t shiftop
)
634 int8_t shift
= (int8_t)shiftop
;
635 if (shift
>= 32 || shift
< -32) {
637 } else if (shift
== -32) {
639 } else if (shift
< 0) {
640 uint64_t big_dest
= ((uint64_t)val
+ (1 << (-1 - shift
)));
641 dest
= big_dest
>> -shift
;
648 /* Handling addition overflow with 64 bits inputs values is more
649 * tricky than with 32 bits values. */
650 uint64_t HELPER(neon_rshl_u64
)(uint64_t val
, uint64_t shiftop
)
652 int8_t shift
= (uint8_t)shiftop
;
653 if (shift
>= 64 || shift
< 64) {
655 } else if (shift
== -64) {
656 /* Rounding a 1-bit result just preserves that bit. */
658 } else if (shift
< 0) {
659 val
>>= (-shift
- 1);
660 if (val
== UINT64_MAX
) {
661 /* In this case, it means that the rounding constant is 1,
662 * and the addition would overflow. Return the actual
663 * result directly. */
664 val
= 0x8000000000000000ULL
;
675 #define NEON_FN(dest, src1, src2) do { \
677 tmp = (int8_t)src2; \
678 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
685 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
687 } else if (tmp < 0) { \
688 dest = src1 >> -tmp; \
690 dest = src1 << tmp; \
691 if ((dest >> tmp) != src1) { \
696 NEON_VOP_ENV(qshl_u8
, neon_u8
, 4)
697 NEON_VOP_ENV(qshl_u16
, neon_u16
, 2)
698 NEON_VOP_ENV(qshl_u32
, neon_u32
, 1)
701 uint64_t HELPER(neon_qshl_u64
)(CPUState
*env
, uint64_t val
, uint64_t shiftop
)
703 int8_t shift
= (int8_t)shiftop
;
709 } else if (shift
<= -64) {
711 } else if (shift
< 0) {
716 if ((val
>> shift
) != tmp
) {
724 #define NEON_FN(dest, src1, src2) do { \
726 tmp = (int8_t)src2; \
727 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
730 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
737 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
739 } else if (tmp < 0) { \
740 dest = src1 >> -tmp; \
742 dest = src1 << tmp; \
743 if ((dest >> tmp) != src1) { \
745 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
751 NEON_VOP_ENV(qshl_s8
, neon_s8
, 4)
752 NEON_VOP_ENV(qshl_s16
, neon_s16
, 2)
753 NEON_VOP_ENV(qshl_s32
, neon_s32
, 1)
756 uint64_t HELPER(neon_qshl_s64
)(CPUState
*env
, uint64_t valop
, uint64_t shiftop
)
758 int8_t shift
= (uint8_t)shiftop
;
763 val
= (val
>> 63) ^ ~SIGNBIT64
;
765 } else if (shift
<= -64) {
767 } else if (shift
< 0) {
772 if ((val
>> shift
) != tmp
) {
774 val
= (tmp
>> 63) ^ ~SIGNBIT64
;
780 #define NEON_FN(dest, src1, src2) do { \
781 if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
786 tmp = (int8_t)src2; \
787 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
794 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
796 } else if (tmp < 0) { \
797 dest = src1 >> -tmp; \
799 dest = src1 << tmp; \
800 if ((dest >> tmp) != src1) { \
806 NEON_VOP_ENV(qshlu_s8
, neon_u8
, 4)
807 NEON_VOP_ENV(qshlu_s16
, neon_u16
, 2)
810 uint32_t HELPER(neon_qshlu_s32
)(CPUState
*env
, uint32_t valop
, uint32_t shiftop
)
812 if ((int32_t)valop
< 0) {
816 return helper_neon_qshl_u32(env
, valop
, shiftop
);
819 uint64_t HELPER(neon_qshlu_s64
)(CPUState
*env
, uint64_t valop
, uint64_t shiftop
)
821 if ((int64_t)valop
< 0) {
825 return helper_neon_qshl_u64(env
, valop
, shiftop
);
828 /* FIXME: This is wrong. */
829 #define NEON_FN(dest, src1, src2) do { \
831 tmp = (int8_t)src2; \
833 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
835 dest = src1 << tmp; \
836 if ((dest >> tmp) != src1) { \
841 NEON_VOP_ENV(qrshl_u8
, neon_u8
, 4)
842 NEON_VOP_ENV(qrshl_u16
, neon_u16
, 2)
845 /* The addition of the rounding constant may overflow, so we use an
846 * intermediate 64 bits accumulator. */
847 uint32_t HELPER(neon_qrshl_u32
)(CPUState
*env
, uint32_t val
, uint32_t shiftop
)
850 int8_t shift
= (int8_t)shiftop
;
852 uint64_t big_dest
= ((uint64_t)val
+ (1 << (-1 - shift
)));
853 dest
= big_dest
>> -shift
;
856 if ((dest
>> shift
) != val
) {
864 /* Handling addition overflow with 64 bits inputs values is more
865 * tricky than with 32 bits values. */
866 uint64_t HELPER(neon_qrshl_u64
)(CPUState
*env
, uint64_t val
, uint64_t shiftop
)
868 int8_t shift
= (int8_t)shiftop
;
870 val
>>= (-shift
- 1);
871 if (val
== UINT64_MAX
) {
872 /* In this case, it means that the rounding constant is 1,
873 * and the addition would overflow. Return the actual
874 * result directly. */
875 val
= 0x8000000000000000ULL
;
883 if ((val
>> shift
) != tmp
) {
891 #define NEON_FN(dest, src1, src2) do { \
893 tmp = (int8_t)src2; \
895 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
897 dest = src1 << tmp; \
898 if ((dest >> tmp) != src1) { \
903 NEON_VOP_ENV(qrshl_s8
, neon_s8
, 4)
904 NEON_VOP_ENV(qrshl_s16
, neon_s16
, 2)
907 /* The addition of the rounding constant may overflow, so we use an
908 * intermediate 64 bits accumulator. */
909 uint32_t HELPER(neon_qrshl_s32
)(CPUState
*env
, uint32_t valop
, uint32_t shiftop
)
912 int32_t val
= (int32_t)valop
;
913 int8_t shift
= (int8_t)shiftop
;
915 int64_t big_dest
= ((int64_t)val
+ (1 << (-1 - shift
)));
916 dest
= big_dest
>> -shift
;
919 if ((dest
>> shift
) != val
) {
921 dest
= (val
>> 31) ^ ~SIGNBIT
;
927 /* Handling addition overflow with 64 bits inputs values is more
928 * tricky than with 32 bits values. */
929 uint64_t HELPER(neon_qrshl_s64
)(CPUState
*env
, uint64_t valop
, uint64_t shiftop
)
931 int8_t shift
= (uint8_t)shiftop
;
935 val
>>= (-shift
- 1);
936 if (val
== INT64_MAX
) {
937 /* In this case, it means that the rounding constant is 1,
938 * and the addition would overflow. Return the actual
939 * result directly. */
940 val
= 0x4000000000000000ULL
;
948 if ((val
>> shift
) != tmp
) {
950 val
= (tmp
>> 63) ^ ~SIGNBIT64
;
956 uint32_t HELPER(neon_add_u8
)(uint32_t a
, uint32_t b
)
959 mask
= (a
^ b
) & 0x80808080u
;
962 return (a
+ b
) ^ mask
;
965 uint32_t HELPER(neon_add_u16
)(uint32_t a
, uint32_t b
)
968 mask
= (a
^ b
) & 0x80008000u
;
971 return (a
+ b
) ^ mask
;
974 #define NEON_FN(dest, src1, src2) dest = src1 + src2
975 NEON_POP(padd_u8
, neon_u8
, 4)
976 NEON_POP(padd_u16
, neon_u16
, 2)
979 #define NEON_FN(dest, src1, src2) dest = src1 - src2
980 NEON_VOP(sub_u8
, neon_u8
, 4)
981 NEON_VOP(sub_u16
, neon_u16
, 2)
984 #define NEON_FN(dest, src1, src2) dest = src1 * src2
985 NEON_VOP(mul_u8
, neon_u8
, 4)
986 NEON_VOP(mul_u16
, neon_u16
, 2)
989 /* Polynomial multiplication is like integer multiplication except the
990 partial products are XORed, not added. */
991 uint32_t HELPER(neon_mul_p8
)(uint32_t op1
, uint32_t op2
)
1001 mask
|= (0xff << 8);
1002 if (op1
& (1 << 16))
1003 mask
|= (0xff << 16);
1004 if (op1
& (1 << 24))
1005 mask
|= (0xff << 24);
1006 result
^= op2
& mask
;
1007 op1
= (op1
>> 1) & 0x7f7f7f7f;
1008 op2
= (op2
<< 1) & 0xfefefefe;
1013 uint64_t HELPER(neon_mull_p8
)(uint32_t op1
, uint32_t op2
)
1015 uint64_t result
= 0;
1017 uint64_t op2ex
= op2
;
1018 op2ex
= (op2ex
& 0xff) |
1019 ((op2ex
& 0xff00) << 8) |
1020 ((op2ex
& 0xff0000) << 16) |
1021 ((op2ex
& 0xff000000) << 24);
1027 if (op1
& (1 << 8)) {
1028 mask
|= (0xffffU
<< 16);
1030 if (op1
& (1 << 16)) {
1031 mask
|= (0xffffULL
<< 32);
1033 if (op1
& (1 << 24)) {
1034 mask
|= (0xffffULL
<< 48);
1036 result
^= op2ex
& mask
;
1037 op1
= (op1
>> 1) & 0x7f7f7f7f;
1043 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1044 NEON_VOP(tst_u8
, neon_u8
, 4)
1045 NEON_VOP(tst_u16
, neon_u16
, 2)
1046 NEON_VOP(tst_u32
, neon_u32
, 1)
1049 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1050 NEON_VOP(ceq_u8
, neon_u8
, 4)
1051 NEON_VOP(ceq_u16
, neon_u16
, 2)
1052 NEON_VOP(ceq_u32
, neon_u32
, 1)
1055 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1056 NEON_VOP1(abs_s8
, neon_s8
, 4)
1057 NEON_VOP1(abs_s16
, neon_s16
, 2)
1060 /* Count Leading Sign/Zero Bits. */
1061 static inline int do_clz8(uint8_t x
)
1069 static inline int do_clz16(uint16_t x
)
1072 for (n
= 16; x
; n
--)
1077 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1078 NEON_VOP1(clz_u8
, neon_u8
, 4)
1081 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1082 NEON_VOP1(clz_u16
, neon_u16
, 2)
1085 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1086 NEON_VOP1(cls_s8
, neon_s8
, 4)
1089 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1090 NEON_VOP1(cls_s16
, neon_s16
, 2)
1093 uint32_t HELPER(neon_cls_s32
)(uint32_t x
)
1098 for (count
= 32; x
; count
--)
1104 uint32_t HELPER(neon_cnt_u8
)(uint32_t x
)
1106 x
= (x
& 0x55555555) + ((x
>> 1) & 0x55555555);
1107 x
= (x
& 0x33333333) + ((x
>> 2) & 0x33333333);
1108 x
= (x
& 0x0f0f0f0f) + ((x
>> 4) & 0x0f0f0f0f);
1112 #define NEON_QDMULH16(dest, src1, src2, round) do { \
1113 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1114 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1116 tmp = (tmp >> 31) ^ ~SIGNBIT; \
1121 int32_t old = tmp; \
1123 if ((int32_t)tmp < old) { \
1125 tmp = SIGNBIT - 1; \
1130 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1131 NEON_VOP_ENV(qdmulh_s16
, neon_s16
, 2)
1133 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1134 NEON_VOP_ENV(qrdmulh_s16
, neon_s16
, 2)
1136 #undef NEON_QDMULH16
1138 #define NEON_QDMULH32(dest, src1, src2, round) do { \
1139 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1140 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1142 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1147 int64_t old = tmp; \
1148 tmp += (int64_t)1 << 31; \
1149 if ((int64_t)tmp < old) { \
1151 tmp = SIGNBIT64 - 1; \
1156 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1157 NEON_VOP_ENV(qdmulh_s32
, neon_s32
, 1)
1159 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1160 NEON_VOP_ENV(qrdmulh_s32
, neon_s32
, 1)
1162 #undef NEON_QDMULH32
1164 uint32_t HELPER(neon_narrow_u8
)(uint64_t x
)
1166 return (x
& 0xffu
) | ((x
>> 8) & 0xff00u
) | ((x
>> 16) & 0xff0000u
)
1167 | ((x
>> 24) & 0xff000000u
);
1170 uint32_t HELPER(neon_narrow_u16
)(uint64_t x
)
1172 return (x
& 0xffffu
) | ((x
>> 16) & 0xffff0000u
);
1175 uint32_t HELPER(neon_narrow_high_u8
)(uint64_t x
)
1177 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
1178 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
1181 uint32_t HELPER(neon_narrow_high_u16
)(uint64_t x
)
1183 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
1186 uint32_t HELPER(neon_narrow_round_high_u8
)(uint64_t x
)
1188 x
&= 0xff80ff80ff80ff80ull
;
1189 x
+= 0x0080008000800080ull
;
1190 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
1191 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
1194 uint32_t HELPER(neon_narrow_round_high_u16
)(uint64_t x
)
1196 x
&= 0xffff8000ffff8000ull
;
1197 x
+= 0x0000800000008000ull
;
1198 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
1201 uint32_t HELPER(neon_unarrow_sat8
)(CPUState
*env
, uint64_t x
)
1217 res |= (uint32_t)d << (n / 2); \
1228 uint32_t HELPER(neon_narrow_sat_u8
)(CPUState
*env
, uint64_t x
)
1241 res |= (uint32_t)d << (n / 2);
1251 uint32_t HELPER(neon_narrow_sat_s8
)(CPUState
*env
, uint64_t x
)
1258 if (s != (int8_t)s) { \
1259 d = (s >> 15) ^ 0x7f; \
1264 res |= (uint32_t)d << (n / 2);
1274 uint32_t HELPER(neon_unarrow_sat16
)(CPUState
*env
, uint64_t x
)
1279 if (low
& 0x80000000) {
1282 } else if (low
> 0xffff) {
1287 if (high
& 0x80000000) {
1290 } else if (high
> 0xffff) {
1294 return low
| (high
<< 16);
1297 uint32_t HELPER(neon_narrow_sat_u16
)(CPUState
*env
, uint64_t x
)
1307 if (high
> 0xffff) {
1311 return low
| (high
<< 16);
1314 uint32_t HELPER(neon_narrow_sat_s16
)(CPUState
*env
, uint64_t x
)
1319 if (low
!= (int16_t)low
) {
1320 low
= (low
>> 31) ^ 0x7fff;
1324 if (high
!= (int16_t)high
) {
1325 high
= (high
>> 31) ^ 0x7fff;
1328 return (uint16_t)low
| (high
<< 16);
1331 uint32_t HELPER(neon_unarrow_sat32
)(CPUState
*env
, uint64_t x
)
1333 if (x
& 0x8000000000000000ull
) {
1337 if (x
> 0xffffffffu
) {
1344 uint32_t HELPER(neon_narrow_sat_u32
)(CPUState
*env
, uint64_t x
)
1346 if (x
> 0xffffffffu
) {
1353 uint32_t HELPER(neon_narrow_sat_s32
)(CPUState
*env
, uint64_t x
)
1355 if ((int64_t)x
!= (int32_t)x
) {
1357 return ((int64_t)x
>> 63) ^ 0x7fffffff;
1362 uint64_t HELPER(neon_widen_u8
)(uint32_t x
)
1367 tmp
= (uint8_t)(x
>> 8);
1369 tmp
= (uint8_t)(x
>> 16);
1371 tmp
= (uint8_t)(x
>> 24);
1376 uint64_t HELPER(neon_widen_s8
)(uint32_t x
)
1380 ret
= (uint16_t)(int8_t)x
;
1381 tmp
= (uint16_t)(int8_t)(x
>> 8);
1383 tmp
= (uint16_t)(int8_t)(x
>> 16);
1385 tmp
= (uint16_t)(int8_t)(x
>> 24);
1390 uint64_t HELPER(neon_widen_u16
)(uint32_t x
)
1392 uint64_t high
= (uint16_t)(x
>> 16);
1393 return ((uint16_t)x
) | (high
<< 32);
1396 uint64_t HELPER(neon_widen_s16
)(uint32_t x
)
1398 uint64_t high
= (int16_t)(x
>> 16);
1399 return ((uint32_t)(int16_t)x
) | (high
<< 32);
1402 uint64_t HELPER(neon_addl_u16
)(uint64_t a
, uint64_t b
)
1405 mask
= (a
^ b
) & 0x8000800080008000ull
;
1406 a
&= ~0x8000800080008000ull
;
1407 b
&= ~0x8000800080008000ull
;
1408 return (a
+ b
) ^ mask
;
1411 uint64_t HELPER(neon_addl_u32
)(uint64_t a
, uint64_t b
)
1414 mask
= (a
^ b
) & 0x8000000080000000ull
;
1415 a
&= ~0x8000000080000000ull
;
1416 b
&= ~0x8000000080000000ull
;
1417 return (a
+ b
) ^ mask
;
1420 uint64_t HELPER(neon_paddl_u16
)(uint64_t a
, uint64_t b
)
1425 tmp
= a
& 0x0000ffff0000ffffull
;
1426 tmp
+= (a
>> 16) & 0x0000ffff0000ffffull
;
1427 tmp2
= b
& 0xffff0000ffff0000ull
;
1428 tmp2
+= (b
<< 16) & 0xffff0000ffff0000ull
;
1429 return ( tmp
& 0xffff)
1430 | ((tmp
>> 16) & 0xffff0000ull
)
1431 | ((tmp2
<< 16) & 0xffff00000000ull
)
1432 | ( tmp2
& 0xffff000000000000ull
);
1435 uint64_t HELPER(neon_paddl_u32
)(uint64_t a
, uint64_t b
)
1437 uint32_t low
= a
+ (a
>> 32);
1438 uint32_t high
= b
+ (b
>> 32);
1439 return low
+ ((uint64_t)high
<< 32);
1442 uint64_t HELPER(neon_subl_u16
)(uint64_t a
, uint64_t b
)
1445 mask
= (a
^ ~b
) & 0x8000800080008000ull
;
1446 a
|= 0x8000800080008000ull
;
1447 b
&= ~0x8000800080008000ull
;
1448 return (a
- b
) ^ mask
;
1451 uint64_t HELPER(neon_subl_u32
)(uint64_t a
, uint64_t b
)
1454 mask
= (a
^ ~b
) & 0x8000000080000000ull
;
1455 a
|= 0x8000000080000000ull
;
1456 b
&= ~0x8000000080000000ull
;
1457 return (a
- b
) ^ mask
;
1460 uint64_t HELPER(neon_addl_saturate_s32
)(CPUState
*env
, uint64_t a
, uint64_t b
)
1468 if (((low
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1470 low
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1475 if (((high
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1477 high
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1479 return low
| ((uint64_t)high
<< 32);
1482 uint64_t HELPER(neon_addl_saturate_s64
)(CPUState
*env
, uint64_t a
, uint64_t b
)
1487 if (((result
^ a
) & SIGNBIT64
) && !((a
^ b
) & SIGNBIT64
)) {
1489 result
= ((int64_t)a
>> 63) ^ ~SIGNBIT64
;
1494 #define DO_ABD(dest, x, y, type) do { \
1497 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1500 uint64_t HELPER(neon_abdl_u16
)(uint32_t a
, uint32_t b
)
1504 DO_ABD(result
, a
, b
, uint8_t);
1505 DO_ABD(tmp
, a
>> 8, b
>> 8, uint8_t);
1506 result
|= tmp
<< 16;
1507 DO_ABD(tmp
, a
>> 16, b
>> 16, uint8_t);
1508 result
|= tmp
<< 32;
1509 DO_ABD(tmp
, a
>> 24, b
>> 24, uint8_t);
1510 result
|= tmp
<< 48;
1514 uint64_t HELPER(neon_abdl_s16
)(uint32_t a
, uint32_t b
)
1518 DO_ABD(result
, a
, b
, int8_t);
1519 DO_ABD(tmp
, a
>> 8, b
>> 8, int8_t);
1520 result
|= tmp
<< 16;
1521 DO_ABD(tmp
, a
>> 16, b
>> 16, int8_t);
1522 result
|= tmp
<< 32;
1523 DO_ABD(tmp
, a
>> 24, b
>> 24, int8_t);
1524 result
|= tmp
<< 48;
1528 uint64_t HELPER(neon_abdl_u32
)(uint32_t a
, uint32_t b
)
1532 DO_ABD(result
, a
, b
, uint16_t);
1533 DO_ABD(tmp
, a
>> 16, b
>> 16, uint16_t);
1534 return result
| (tmp
<< 32);
1537 uint64_t HELPER(neon_abdl_s32
)(uint32_t a
, uint32_t b
)
1541 DO_ABD(result
, a
, b
, int16_t);
1542 DO_ABD(tmp
, a
>> 16, b
>> 16, int16_t);
1543 return result
| (tmp
<< 32);
1546 uint64_t HELPER(neon_abdl_u64
)(uint32_t a
, uint32_t b
)
1549 DO_ABD(result
, a
, b
, uint32_t);
1553 uint64_t HELPER(neon_abdl_s64
)(uint32_t a
, uint32_t b
)
1556 DO_ABD(result
, a
, b
, int32_t);
1561 /* Widening multiply. Named type is the source type. */
1562 #define DO_MULL(dest, x, y, type1, type2) do { \
1565 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1568 uint64_t HELPER(neon_mull_u8
)(uint32_t a
, uint32_t b
)
1573 DO_MULL(result
, a
, b
, uint8_t, uint16_t);
1574 DO_MULL(tmp
, a
>> 8, b
>> 8, uint8_t, uint16_t);
1575 result
|= tmp
<< 16;
1576 DO_MULL(tmp
, a
>> 16, b
>> 16, uint8_t, uint16_t);
1577 result
|= tmp
<< 32;
1578 DO_MULL(tmp
, a
>> 24, b
>> 24, uint8_t, uint16_t);
1579 result
|= tmp
<< 48;
1583 uint64_t HELPER(neon_mull_s8
)(uint32_t a
, uint32_t b
)
1588 DO_MULL(result
, a
, b
, int8_t, uint16_t);
1589 DO_MULL(tmp
, a
>> 8, b
>> 8, int8_t, uint16_t);
1590 result
|= tmp
<< 16;
1591 DO_MULL(tmp
, a
>> 16, b
>> 16, int8_t, uint16_t);
1592 result
|= tmp
<< 32;
1593 DO_MULL(tmp
, a
>> 24, b
>> 24, int8_t, uint16_t);
1594 result
|= tmp
<< 48;
1598 uint64_t HELPER(neon_mull_u16
)(uint32_t a
, uint32_t b
)
1603 DO_MULL(result
, a
, b
, uint16_t, uint32_t);
1604 DO_MULL(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1605 return result
| (tmp
<< 32);
1608 uint64_t HELPER(neon_mull_s16
)(uint32_t a
, uint32_t b
)
1613 DO_MULL(result
, a
, b
, int16_t, uint32_t);
1614 DO_MULL(tmp
, a
>> 16, b
>> 16, int16_t, uint32_t);
1615 return result
| (tmp
<< 32);
1618 uint64_t HELPER(neon_negl_u16
)(uint64_t x
)
1622 result
= (uint16_t)-x
;
1624 result
|= (uint64_t)tmp
<< 16;
1626 result
|= (uint64_t)tmp
<< 32;
1628 result
|= (uint64_t)tmp
<< 48;
1632 uint64_t HELPER(neon_negl_u32
)(uint64_t x
)
1635 uint32_t high
= -(x
>> 32);
1636 return low
| ((uint64_t)high
<< 32);
1639 /* FIXME: There should be a native op for this. */
1640 uint64_t HELPER(neon_negl_u64
)(uint64_t x
)
1645 /* Saturnating sign manuipulation. */
1646 /* ??? Make these use NEON_VOP1 */
1647 #define DO_QABS8(x) do { \
1648 if (x == (int8_t)0x80) { \
1651 } else if (x < 0) { \
1654 uint32_t HELPER(neon_qabs_s8
)(CPUState
*env
, uint32_t x
)
1657 NEON_UNPACK(neon_s8
, vec
, x
);
1662 NEON_PACK(neon_s8
, x
, vec
);
1667 #define DO_QNEG8(x) do { \
1668 if (x == (int8_t)0x80) { \
1674 uint32_t HELPER(neon_qneg_s8
)(CPUState
*env
, uint32_t x
)
1677 NEON_UNPACK(neon_s8
, vec
, x
);
1682 NEON_PACK(neon_s8
, x
, vec
);
1687 #define DO_QABS16(x) do { \
1688 if (x == (int16_t)0x8000) { \
1691 } else if (x < 0) { \
1694 uint32_t HELPER(neon_qabs_s16
)(CPUState
*env
, uint32_t x
)
1697 NEON_UNPACK(neon_s16
, vec
, x
);
1700 NEON_PACK(neon_s16
, x
, vec
);
1705 #define DO_QNEG16(x) do { \
1706 if (x == (int16_t)0x8000) { \
1712 uint32_t HELPER(neon_qneg_s16
)(CPUState
*env
, uint32_t x
)
1715 NEON_UNPACK(neon_s16
, vec
, x
);
1718 NEON_PACK(neon_s16
, x
, vec
);
1723 uint32_t HELPER(neon_qabs_s32
)(CPUState
*env
, uint32_t x
)
1728 } else if ((int32_t)x
< 0) {
1734 uint32_t HELPER(neon_qneg_s32
)(CPUState
*env
, uint32_t x
)
1745 /* NEON Float helpers. */
1746 uint32_t HELPER(neon_min_f32
)(uint32_t a
, uint32_t b
)
1748 float32 f0
= vfp_itos(a
);
1749 float32 f1
= vfp_itos(b
);
1750 return (float32_compare_quiet(f0
, f1
, NFS
) == -1) ? a
: b
;
1753 uint32_t HELPER(neon_max_f32
)(uint32_t a
, uint32_t b
)
1755 float32 f0
= vfp_itos(a
);
1756 float32 f1
= vfp_itos(b
);
1757 return (float32_compare_quiet(f0
, f1
, NFS
) == 1) ? a
: b
;
1760 uint32_t HELPER(neon_abd_f32
)(uint32_t a
, uint32_t b
)
1762 float32 f0
= vfp_itos(a
);
1763 float32 f1
= vfp_itos(b
);
1764 return vfp_stoi((float32_compare_quiet(f0
, f1
, NFS
) == 1)
1765 ? float32_sub(f0
, f1
, NFS
)
1766 : float32_sub(f1
, f0
, NFS
));
1769 uint32_t HELPER(neon_add_f32
)(uint32_t a
, uint32_t b
)
1771 return vfp_stoi(float32_add(vfp_itos(a
), vfp_itos(b
), NFS
));
1774 uint32_t HELPER(neon_sub_f32
)(uint32_t a
, uint32_t b
)
1776 return vfp_stoi(float32_sub(vfp_itos(a
), vfp_itos(b
), NFS
));
1779 uint32_t HELPER(neon_mul_f32
)(uint32_t a
, uint32_t b
)
1781 return vfp_stoi(float32_mul(vfp_itos(a
), vfp_itos(b
), NFS
));
1784 /* Floating point comparisons produce an integer result. */
1785 #define NEON_VOP_FCMP(name, cmp) \
1786 uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
1788 if (float32_compare_quiet(vfp_itos(a), vfp_itos(b), NFS) cmp 0) \
1794 NEON_VOP_FCMP(ceq_f32
, ==)
1795 NEON_VOP_FCMP(cge_f32
, >=)
1796 NEON_VOP_FCMP(cgt_f32
, >)
1798 uint32_t HELPER(neon_acge_f32
)(uint32_t a
, uint32_t b
)
1800 float32 f0
= float32_abs(vfp_itos(a
));
1801 float32 f1
= float32_abs(vfp_itos(b
));
1802 return (float32_compare_quiet(f0
, f1
,NFS
) >= 0) ? ~0 : 0;
1805 uint32_t HELPER(neon_acgt_f32
)(uint32_t a
, uint32_t b
)
1807 float32 f0
= float32_abs(vfp_itos(a
));
1808 float32 f1
= float32_abs(vfp_itos(b
));
1809 return (float32_compare_quiet(f0
, f1
, NFS
) > 0) ? ~0 : 0;
1812 #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1814 void HELPER(neon_qunzip8
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1816 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1817 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1818 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1819 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1820 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zd0
, 2, 8) << 8)
1821 | (ELEM(zd0
, 4, 8) << 16) | (ELEM(zd0
, 6, 8) << 24)
1822 | (ELEM(zd1
, 0, 8) << 32) | (ELEM(zd1
, 2, 8) << 40)
1823 | (ELEM(zd1
, 4, 8) << 48) | (ELEM(zd1
, 6, 8) << 56);
1824 uint64_t d1
= ELEM(zm0
, 0, 8) | (ELEM(zm0
, 2, 8) << 8)
1825 | (ELEM(zm0
, 4, 8) << 16) | (ELEM(zm0
, 6, 8) << 24)
1826 | (ELEM(zm1
, 0, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
1827 | (ELEM(zm1
, 4, 8) << 48) | (ELEM(zm1
, 6, 8) << 56);
1828 uint64_t m0
= ELEM(zd0
, 1, 8) | (ELEM(zd0
, 3, 8) << 8)
1829 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zd0
, 7, 8) << 24)
1830 | (ELEM(zd1
, 1, 8) << 32) | (ELEM(zd1
, 3, 8) << 40)
1831 | (ELEM(zd1
, 5, 8) << 48) | (ELEM(zd1
, 7, 8) << 56);
1832 uint64_t m1
= ELEM(zm0
, 1, 8) | (ELEM(zm0
, 3, 8) << 8)
1833 | (ELEM(zm0
, 5, 8) << 16) | (ELEM(zm0
, 7, 8) << 24)
1834 | (ELEM(zm1
, 1, 8) << 32) | (ELEM(zm1
, 3, 8) << 40)
1835 | (ELEM(zm1
, 5, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
1836 env
->vfp
.regs
[rm
] = make_float64(m0
);
1837 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1838 env
->vfp
.regs
[rd
] = make_float64(d0
);
1839 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1842 void HELPER(neon_qunzip16
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1844 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1845 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1846 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1847 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1848 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zd0
, 2, 16) << 16)
1849 | (ELEM(zd1
, 0, 16) << 32) | (ELEM(zd1
, 2, 16) << 48);
1850 uint64_t d1
= ELEM(zm0
, 0, 16) | (ELEM(zm0
, 2, 16) << 16)
1851 | (ELEM(zm1
, 0, 16) << 32) | (ELEM(zm1
, 2, 16) << 48);
1852 uint64_t m0
= ELEM(zd0
, 1, 16) | (ELEM(zd0
, 3, 16) << 16)
1853 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zd1
, 3, 16) << 48);
1854 uint64_t m1
= ELEM(zm0
, 1, 16) | (ELEM(zm0
, 3, 16) << 16)
1855 | (ELEM(zm1
, 1, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
1856 env
->vfp
.regs
[rm
] = make_float64(m0
);
1857 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1858 env
->vfp
.regs
[rd
] = make_float64(d0
);
1859 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1862 void HELPER(neon_qunzip32
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1864 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1865 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1866 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1867 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1868 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zd1
, 0, 32) << 32);
1869 uint64_t d1
= ELEM(zm0
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
1870 uint64_t m0
= ELEM(zd0
, 1, 32) | (ELEM(zd1
, 1, 32) << 32);
1871 uint64_t m1
= ELEM(zm0
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
1872 env
->vfp
.regs
[rm
] = make_float64(m0
);
1873 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1874 env
->vfp
.regs
[rd
] = make_float64(d0
);
1875 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1878 void HELPER(neon_unzip8
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1880 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
1881 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
1882 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zd
, 2, 8) << 8)
1883 | (ELEM(zd
, 4, 8) << 16) | (ELEM(zd
, 6, 8) << 24)
1884 | (ELEM(zm
, 0, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
1885 | (ELEM(zm
, 4, 8) << 48) | (ELEM(zm
, 6, 8) << 56);
1886 uint64_t m0
= ELEM(zd
, 1, 8) | (ELEM(zd
, 3, 8) << 8)
1887 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zd
, 7, 8) << 24)
1888 | (ELEM(zm
, 1, 8) << 32) | (ELEM(zm
, 3, 8) << 40)
1889 | (ELEM(zm
, 5, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
1890 env
->vfp
.regs
[rm
] = make_float64(m0
);
1891 env
->vfp
.regs
[rd
] = make_float64(d0
);
1894 void HELPER(neon_unzip16
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1896 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
1897 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
1898 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zd
, 2, 16) << 16)
1899 | (ELEM(zm
, 0, 16) << 32) | (ELEM(zm
, 2, 16) << 48);
1900 uint64_t m0
= ELEM(zd
, 1, 16) | (ELEM(zd
, 3, 16) << 16)
1901 | (ELEM(zm
, 1, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
1902 env
->vfp
.regs
[rm
] = make_float64(m0
);
1903 env
->vfp
.regs
[rd
] = make_float64(d0
);
1906 void HELPER(neon_qzip8
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1908 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1909 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1910 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1911 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1912 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zm0
, 0, 8) << 8)
1913 | (ELEM(zd0
, 1, 8) << 16) | (ELEM(zm0
, 1, 8) << 24)
1914 | (ELEM(zd0
, 2, 8) << 32) | (ELEM(zm0
, 2, 8) << 40)
1915 | (ELEM(zd0
, 3, 8) << 48) | (ELEM(zm0
, 3, 8) << 56);
1916 uint64_t d1
= ELEM(zd0
, 4, 8) | (ELEM(zm0
, 4, 8) << 8)
1917 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zm0
, 5, 8) << 24)
1918 | (ELEM(zd0
, 6, 8) << 32) | (ELEM(zm0
, 6, 8) << 40)
1919 | (ELEM(zd0
, 7, 8) << 48) | (ELEM(zm0
, 7, 8) << 56);
1920 uint64_t m0
= ELEM(zd1
, 0, 8) | (ELEM(zm1
, 0, 8) << 8)
1921 | (ELEM(zd1
, 1, 8) << 16) | (ELEM(zm1
, 1, 8) << 24)
1922 | (ELEM(zd1
, 2, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
1923 | (ELEM(zd1
, 3, 8) << 48) | (ELEM(zm1
, 3, 8) << 56);
1924 uint64_t m1
= ELEM(zd1
, 4, 8) | (ELEM(zm1
, 4, 8) << 8)
1925 | (ELEM(zd1
, 5, 8) << 16) | (ELEM(zm1
, 5, 8) << 24)
1926 | (ELEM(zd1
, 6, 8) << 32) | (ELEM(zm1
, 6, 8) << 40)
1927 | (ELEM(zd1
, 7, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
1928 env
->vfp
.regs
[rm
] = make_float64(m0
);
1929 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1930 env
->vfp
.regs
[rd
] = make_float64(d0
);
1931 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1934 void HELPER(neon_qzip16
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1936 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1937 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1938 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1939 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1940 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zm0
, 0, 16) << 16)
1941 | (ELEM(zd0
, 1, 16) << 32) | (ELEM(zm0
, 1, 16) << 48);
1942 uint64_t d1
= ELEM(zd0
, 2, 16) | (ELEM(zm0
, 2, 16) << 16)
1943 | (ELEM(zd0
, 3, 16) << 32) | (ELEM(zm0
, 3, 16) << 48);
1944 uint64_t m0
= ELEM(zd1
, 0, 16) | (ELEM(zm1
, 0, 16) << 16)
1945 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zm1
, 1, 16) << 48);
1946 uint64_t m1
= ELEM(zd1
, 2, 16) | (ELEM(zm1
, 2, 16) << 16)
1947 | (ELEM(zd1
, 3, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
1948 env
->vfp
.regs
[rm
] = make_float64(m0
);
1949 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1950 env
->vfp
.regs
[rd
] = make_float64(d0
);
1951 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1954 void HELPER(neon_qzip32
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1956 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1957 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1958 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1959 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1960 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zm0
, 0, 32) << 32);
1961 uint64_t d1
= ELEM(zd0
, 1, 32) | (ELEM(zm0
, 1, 32) << 32);
1962 uint64_t m0
= ELEM(zd1
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
1963 uint64_t m1
= ELEM(zd1
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
1964 env
->vfp
.regs
[rm
] = make_float64(m0
);
1965 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1966 env
->vfp
.regs
[rd
] = make_float64(d0
);
1967 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1970 void HELPER(neon_zip8
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1972 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
1973 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
1974 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zm
, 0, 8) << 8)
1975 | (ELEM(zd
, 1, 8) << 16) | (ELEM(zm
, 1, 8) << 24)
1976 | (ELEM(zd
, 2, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
1977 | (ELEM(zd
, 3, 8) << 48) | (ELEM(zm
, 3, 8) << 56);
1978 uint64_t m0
= ELEM(zd
, 4, 8) | (ELEM(zm
, 4, 8) << 8)
1979 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zm
, 5, 8) << 24)
1980 | (ELEM(zd
, 6, 8) << 32) | (ELEM(zm
, 6, 8) << 40)
1981 | (ELEM(zd
, 7, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
1982 env
->vfp
.regs
[rm
] = make_float64(m0
);
1983 env
->vfp
.regs
[rd
] = make_float64(d0
);
1986 void HELPER(neon_zip16
)(CPUState
*env
, uint32_t rd
, uint32_t rm
)
1988 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
1989 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
1990 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zm
, 0, 16) << 16)
1991 | (ELEM(zd
, 1, 16) << 32) | (ELEM(zm
, 1, 16) << 48);
1992 uint64_t m0
= ELEM(zd
, 2, 16) | (ELEM(zm
, 2, 16) << 16)
1993 | (ELEM(zd
, 3, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
1994 env
->vfp
.regs
[rm
] = make_float64(m0
);
1995 env
->vfp
.regs
[rd
] = make_float64(d0
);