]>
git.proxmox.com Git - mirror_qemu.git/blob - target/arm/tcg/vec_internal.h
2 * ARM AdvSIMD / SVE Vector Helpers
4 * Copyright (c) 2020 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef TARGET_ARM_VEC_INTERNAL_H
21 #define TARGET_ARM_VEC_INTERNAL_H
24 * Note that vector data is stored in host-endian 64-bit chunks,
25 * so addressing units smaller than that needs a host-endian fixup.
27 * The H<N> macros are used when indexing an array of elements of size N.
29 * The H1_<N> macros are used when performing byte arithmetic and then
30 * casting the final pointer to a type of size N.
33 #define H1(x) ((x) ^ 7)
34 #define H1_2(x) ((x) ^ 6)
35 #define H1_4(x) ((x) ^ 4)
36 #define H2(x) ((x) ^ 3)
37 #define H4(x) ((x) ^ 1)
46 * Access to 64-bit elements isn't host-endian dependent; we provide H8
47 * and H1_8 so that when a function is being generated from a macro we
48 * can pass these rather than an empty macro argument, for clarity.
54 * Expand active predicate bits to bytes, for byte elements.
56 extern const uint64_t expand_pred_b_data
[256];
57 static inline uint64_t expand_pred_b(uint8_t byte
)
59 return expand_pred_b_data
[byte
];
62 /* Similarly for half-word elements. */
63 extern const uint64_t expand_pred_h_data
[0x55 + 1];
64 static inline uint64_t expand_pred_h(uint8_t byte
)
66 return expand_pred_h_data
[byte
& 0x55];
69 static inline void clear_tail(void *vd
, uintptr_t opr_sz
, uintptr_t max_sz
)
71 uint64_t *d
= vd
+ opr_sz
;
74 for (i
= opr_sz
; i
< max_sz
; i
+= 8) {
79 static inline int32_t do_sqrshl_bhs(int32_t src
, int32_t shift
, int bits
,
80 bool round
, uint32_t *sat
)
83 /* Rounding the sign bit always produces 0. */
88 } else if (shift
< 0) {
91 return (src
>> 1) + (src
& 1);
94 } else if (shift
< bits
) {
95 int32_t val
= src
<< shift
;
97 if (!sat
|| val
>> shift
== src
) {
101 int32_t extval
= sextract32(val
, 0, bits
);
102 if (!sat
|| val
== extval
) {
106 } else if (!sat
|| src
== 0) {
111 return (1u << (bits
- 1)) - (src
>= 0);
114 static inline uint32_t do_uqrshl_bhs(uint32_t src
, int32_t shift
, int bits
,
115 bool round
, uint32_t *sat
)
117 if (shift
<= -(bits
+ round
)) {
119 } else if (shift
< 0) {
122 return (src
>> 1) + (src
& 1);
124 return src
>> -shift
;
125 } else if (shift
< bits
) {
126 uint32_t val
= src
<< shift
;
128 if (!sat
|| val
>> shift
== src
) {
132 uint32_t extval
= extract32(val
, 0, bits
);
133 if (!sat
|| val
== extval
) {
137 } else if (!sat
|| src
== 0) {
142 return MAKE_64BIT_MASK(0, bits
);
145 static inline int32_t do_suqrshl_bhs(int32_t src
, int32_t shift
, int bits
,
146 bool round
, uint32_t *sat
)
148 if (sat
&& src
< 0) {
152 return do_uqrshl_bhs(src
, shift
, bits
, round
, sat
);
155 static inline int64_t do_sqrshl_d(int64_t src
, int64_t shift
,
156 bool round
, uint32_t *sat
)
159 /* Rounding the sign bit always produces 0. */
164 } else if (shift
< 0) {
167 return (src
>> 1) + (src
& 1);
169 return src
>> -shift
;
170 } else if (shift
< 64) {
171 int64_t val
= src
<< shift
;
172 if (!sat
|| val
>> shift
== src
) {
175 } else if (!sat
|| src
== 0) {
180 return src
< 0 ? INT64_MIN
: INT64_MAX
;
183 static inline uint64_t do_uqrshl_d(uint64_t src
, int64_t shift
,
184 bool round
, uint32_t *sat
)
186 if (shift
<= -(64 + round
)) {
188 } else if (shift
< 0) {
191 return (src
>> 1) + (src
& 1);
193 return src
>> -shift
;
194 } else if (shift
< 64) {
195 uint64_t val
= src
<< shift
;
196 if (!sat
|| val
>> shift
== src
) {
199 } else if (!sat
|| src
== 0) {
207 static inline int64_t do_suqrshl_d(int64_t src
, int64_t shift
,
208 bool round
, uint32_t *sat
)
210 if (sat
&& src
< 0) {
214 return do_uqrshl_d(src
, shift
, round
, sat
);
217 int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool);
218 int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
219 int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
220 int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
223 * 16 x 16 -> 32 vector polynomial multiply where the inputs are
224 * in the low 16 bits of each 32-bit element
226 uint64_t pmull_w(uint64_t op1
, uint64_t op2
);
231 * @e1, @e2: multiplicand vectors
233 * BFloat16 2-way dot product of @e1 & @e2, accumulating with @sum.
234 * The @e1 and @e2 operands correspond to the 32-bit source vector
235 * slots and contain two Bfloat16 values each.
237 * Corresponds to the ARM pseudocode function BFDotAdd.
239 float32
bfdotadd(float32 sum
, uint32_t e1
, uint32_t e2
);
241 #endif /* TARGET_ARM_VEC_INTERNAL_H */