]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/sve_helper.c
target/arm: Implement SVE2 FCVTNT
[mirror_qemu.git] / target / arm / sve_helper.c
1 /*
2 * ARM SVE Operations
3 *
4 * Copyright (c) 2018 Linaro, Ltd.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "internals.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/helper-proto.h"
26 #include "tcg/tcg-gvec-desc.h"
27 #include "fpu/softfloat.h"
28 #include "tcg/tcg.h"
29 #include "vec_internal.h"
30
31
32 /* Note that vector data is stored in host-endian 64-bit chunks,
33 so addressing units smaller than that needs a host-endian fixup. */
34 #ifdef HOST_WORDS_BIGENDIAN
35 #define H1(x) ((x) ^ 7)
36 #define H1_2(x) ((x) ^ 6)
37 #define H1_4(x) ((x) ^ 4)
38 #define H2(x) ((x) ^ 3)
39 #define H4(x) ((x) ^ 1)
40 #else
41 #define H1(x) (x)
42 #define H1_2(x) (x)
43 #define H1_4(x) (x)
44 #define H2(x) (x)
45 #define H4(x) (x)
46 #endif
47
48 /* Return a value for NZCV as per the ARM PredTest pseudofunction.
49 *
50 * The return value has bit 31 set if N is set, bit 1 set if Z is clear,
51 * and bit 0 set if C is set. Compare the definitions of these variables
52 * within CPUARMState.
53 */
54
55 /* For no G bits set, NZCV = C. */
56 #define PREDTEST_INIT 1
57
58 /* This is an iterative function, called for each Pd and Pg word
59 * moving forward.
60 */
61 static uint32_t iter_predtest_fwd(uint64_t d, uint64_t g, uint32_t flags)
62 {
63 if (likely(g)) {
64 /* Compute N from first D & G.
65 Use bit 2 to signal first G bit seen. */
66 if (!(flags & 4)) {
67 flags |= ((d & (g & -g)) != 0) << 31;
68 flags |= 4;
69 }
70
71 /* Accumulate Z from each D & G. */
72 flags |= ((d & g) != 0) << 1;
73
74 /* Compute C from last !(D & G). Replace previous. */
75 flags = deposit32(flags, 0, 1, (d & pow2floor(g)) == 0);
76 }
77 return flags;
78 }
79
80 /* This is an iterative function, called for each Pd and Pg word
81 * moving backward.
82 */
83 static uint32_t iter_predtest_bwd(uint64_t d, uint64_t g, uint32_t flags)
84 {
85 if (likely(g)) {
86 /* Compute C from first (i.e last) !(D & G).
87 Use bit 2 to signal first G bit seen. */
88 if (!(flags & 4)) {
89 flags += 4 - 1; /* add bit 2, subtract C from PREDTEST_INIT */
90 flags |= (d & pow2floor(g)) == 0;
91 }
92
93 /* Accumulate Z from each D & G. */
94 flags |= ((d & g) != 0) << 1;
95
96 /* Compute N from last (i.e first) D & G. Replace previous. */
97 flags = deposit32(flags, 31, 1, (d & (g & -g)) != 0);
98 }
99 return flags;
100 }
101
102 /* The same for a single word predicate. */
103 uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g)
104 {
105 return iter_predtest_fwd(d, g, PREDTEST_INIT);
106 }
107
108 /* The same for a multi-word predicate. */
109 uint32_t HELPER(sve_predtest)(void *vd, void *vg, uint32_t words)
110 {
111 uint32_t flags = PREDTEST_INIT;
112 uint64_t *d = vd, *g = vg;
113 uintptr_t i = 0;
114
115 do {
116 flags = iter_predtest_fwd(d[i], g[i], flags);
117 } while (++i < words);
118
119 return flags;
120 }
121
122 /* Expand active predicate bits to bytes, for byte elements.
123 * for (i = 0; i < 256; ++i) {
124 * unsigned long m = 0;
125 * for (j = 0; j < 8; j++) {
126 * if ((i >> j) & 1) {
127 * m |= 0xfful << (j << 3);
128 * }
129 * }
130 * printf("0x%016lx,\n", m);
131 * }
132 */
133 static inline uint64_t expand_pred_b(uint8_t byte)
134 {
135 static const uint64_t word[256] = {
136 0x0000000000000000, 0x00000000000000ff, 0x000000000000ff00,
137 0x000000000000ffff, 0x0000000000ff0000, 0x0000000000ff00ff,
138 0x0000000000ffff00, 0x0000000000ffffff, 0x00000000ff000000,
139 0x00000000ff0000ff, 0x00000000ff00ff00, 0x00000000ff00ffff,
140 0x00000000ffff0000, 0x00000000ffff00ff, 0x00000000ffffff00,
141 0x00000000ffffffff, 0x000000ff00000000, 0x000000ff000000ff,
142 0x000000ff0000ff00, 0x000000ff0000ffff, 0x000000ff00ff0000,
143 0x000000ff00ff00ff, 0x000000ff00ffff00, 0x000000ff00ffffff,
144 0x000000ffff000000, 0x000000ffff0000ff, 0x000000ffff00ff00,
145 0x000000ffff00ffff, 0x000000ffffff0000, 0x000000ffffff00ff,
146 0x000000ffffffff00, 0x000000ffffffffff, 0x0000ff0000000000,
147 0x0000ff00000000ff, 0x0000ff000000ff00, 0x0000ff000000ffff,
148 0x0000ff0000ff0000, 0x0000ff0000ff00ff, 0x0000ff0000ffff00,
149 0x0000ff0000ffffff, 0x0000ff00ff000000, 0x0000ff00ff0000ff,
150 0x0000ff00ff00ff00, 0x0000ff00ff00ffff, 0x0000ff00ffff0000,
151 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0x0000ff00ffffffff,
152 0x0000ffff00000000, 0x0000ffff000000ff, 0x0000ffff0000ff00,
153 0x0000ffff0000ffff, 0x0000ffff00ff0000, 0x0000ffff00ff00ff,
154 0x0000ffff00ffff00, 0x0000ffff00ffffff, 0x0000ffffff000000,
155 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0x0000ffffff00ffff,
156 0x0000ffffffff0000, 0x0000ffffffff00ff, 0x0000ffffffffff00,
157 0x0000ffffffffffff, 0x00ff000000000000, 0x00ff0000000000ff,
158 0x00ff00000000ff00, 0x00ff00000000ffff, 0x00ff000000ff0000,
159 0x00ff000000ff00ff, 0x00ff000000ffff00, 0x00ff000000ffffff,
160 0x00ff0000ff000000, 0x00ff0000ff0000ff, 0x00ff0000ff00ff00,
161 0x00ff0000ff00ffff, 0x00ff0000ffff0000, 0x00ff0000ffff00ff,
162 0x00ff0000ffffff00, 0x00ff0000ffffffff, 0x00ff00ff00000000,
163 0x00ff00ff000000ff, 0x00ff00ff0000ff00, 0x00ff00ff0000ffff,
164 0x00ff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00,
165 0x00ff00ff00ffffff, 0x00ff00ffff000000, 0x00ff00ffff0000ff,
166 0x00ff00ffff00ff00, 0x00ff00ffff00ffff, 0x00ff00ffffff0000,
167 0x00ff00ffffff00ff, 0x00ff00ffffffff00, 0x00ff00ffffffffff,
168 0x00ffff0000000000, 0x00ffff00000000ff, 0x00ffff000000ff00,
169 0x00ffff000000ffff, 0x00ffff0000ff0000, 0x00ffff0000ff00ff,
170 0x00ffff0000ffff00, 0x00ffff0000ffffff, 0x00ffff00ff000000,
171 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0x00ffff00ff00ffff,
172 0x00ffff00ffff0000, 0x00ffff00ffff00ff, 0x00ffff00ffffff00,
173 0x00ffff00ffffffff, 0x00ffffff00000000, 0x00ffffff000000ff,
174 0x00ffffff0000ff00, 0x00ffffff0000ffff, 0x00ffffff00ff0000,
175 0x00ffffff00ff00ff, 0x00ffffff00ffff00, 0x00ffffff00ffffff,
176 0x00ffffffff000000, 0x00ffffffff0000ff, 0x00ffffffff00ff00,
177 0x00ffffffff00ffff, 0x00ffffffffff0000, 0x00ffffffffff00ff,
178 0x00ffffffffffff00, 0x00ffffffffffffff, 0xff00000000000000,
179 0xff000000000000ff, 0xff0000000000ff00, 0xff0000000000ffff,
180 0xff00000000ff0000, 0xff00000000ff00ff, 0xff00000000ffff00,
181 0xff00000000ffffff, 0xff000000ff000000, 0xff000000ff0000ff,
182 0xff000000ff00ff00, 0xff000000ff00ffff, 0xff000000ffff0000,
183 0xff000000ffff00ff, 0xff000000ffffff00, 0xff000000ffffffff,
184 0xff0000ff00000000, 0xff0000ff000000ff, 0xff0000ff0000ff00,
185 0xff0000ff0000ffff, 0xff0000ff00ff0000, 0xff0000ff00ff00ff,
186 0xff0000ff00ffff00, 0xff0000ff00ffffff, 0xff0000ffff000000,
187 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0xff0000ffff00ffff,
188 0xff0000ffffff0000, 0xff0000ffffff00ff, 0xff0000ffffffff00,
189 0xff0000ffffffffff, 0xff00ff0000000000, 0xff00ff00000000ff,
190 0xff00ff000000ff00, 0xff00ff000000ffff, 0xff00ff0000ff0000,
191 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0xff00ff0000ffffff,
192 0xff00ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00,
193 0xff00ff00ff00ffff, 0xff00ff00ffff0000, 0xff00ff00ffff00ff,
194 0xff00ff00ffffff00, 0xff00ff00ffffffff, 0xff00ffff00000000,
195 0xff00ffff000000ff, 0xff00ffff0000ff00, 0xff00ffff0000ffff,
196 0xff00ffff00ff0000, 0xff00ffff00ff00ff, 0xff00ffff00ffff00,
197 0xff00ffff00ffffff, 0xff00ffffff000000, 0xff00ffffff0000ff,
198 0xff00ffffff00ff00, 0xff00ffffff00ffff, 0xff00ffffffff0000,
199 0xff00ffffffff00ff, 0xff00ffffffffff00, 0xff00ffffffffffff,
200 0xffff000000000000, 0xffff0000000000ff, 0xffff00000000ff00,
201 0xffff00000000ffff, 0xffff000000ff0000, 0xffff000000ff00ff,
202 0xffff000000ffff00, 0xffff000000ffffff, 0xffff0000ff000000,
203 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0xffff0000ff00ffff,
204 0xffff0000ffff0000, 0xffff0000ffff00ff, 0xffff0000ffffff00,
205 0xffff0000ffffffff, 0xffff00ff00000000, 0xffff00ff000000ff,
206 0xffff00ff0000ff00, 0xffff00ff0000ffff, 0xffff00ff00ff0000,
207 0xffff00ff00ff00ff, 0xffff00ff00ffff00, 0xffff00ff00ffffff,
208 0xffff00ffff000000, 0xffff00ffff0000ff, 0xffff00ffff00ff00,
209 0xffff00ffff00ffff, 0xffff00ffffff0000, 0xffff00ffffff00ff,
210 0xffff00ffffffff00, 0xffff00ffffffffff, 0xffffff0000000000,
211 0xffffff00000000ff, 0xffffff000000ff00, 0xffffff000000ffff,
212 0xffffff0000ff0000, 0xffffff0000ff00ff, 0xffffff0000ffff00,
213 0xffffff0000ffffff, 0xffffff00ff000000, 0xffffff00ff0000ff,
214 0xffffff00ff00ff00, 0xffffff00ff00ffff, 0xffffff00ffff0000,
215 0xffffff00ffff00ff, 0xffffff00ffffff00, 0xffffff00ffffffff,
216 0xffffffff00000000, 0xffffffff000000ff, 0xffffffff0000ff00,
217 0xffffffff0000ffff, 0xffffffff00ff0000, 0xffffffff00ff00ff,
218 0xffffffff00ffff00, 0xffffffff00ffffff, 0xffffffffff000000,
219 0xffffffffff0000ff, 0xffffffffff00ff00, 0xffffffffff00ffff,
220 0xffffffffffff0000, 0xffffffffffff00ff, 0xffffffffffffff00,
221 0xffffffffffffffff,
222 };
223 return word[byte];
224 }
225
226 /* Similarly for half-word elements.
227 * for (i = 0; i < 256; ++i) {
228 * unsigned long m = 0;
229 * if (i & 0xaa) {
230 * continue;
231 * }
232 * for (j = 0; j < 8; j += 2) {
233 * if ((i >> j) & 1) {
234 * m |= 0xfffful << (j << 3);
235 * }
236 * }
237 * printf("[0x%x] = 0x%016lx,\n", i, m);
238 * }
239 */
240 static inline uint64_t expand_pred_h(uint8_t byte)
241 {
242 static const uint64_t word[] = {
243 [0x01] = 0x000000000000ffff, [0x04] = 0x00000000ffff0000,
244 [0x05] = 0x00000000ffffffff, [0x10] = 0x0000ffff00000000,
245 [0x11] = 0x0000ffff0000ffff, [0x14] = 0x0000ffffffff0000,
246 [0x15] = 0x0000ffffffffffff, [0x40] = 0xffff000000000000,
247 [0x41] = 0xffff00000000ffff, [0x44] = 0xffff0000ffff0000,
248 [0x45] = 0xffff0000ffffffff, [0x50] = 0xffffffff00000000,
249 [0x51] = 0xffffffff0000ffff, [0x54] = 0xffffffffffff0000,
250 [0x55] = 0xffffffffffffffff,
251 };
252 return word[byte & 0x55];
253 }
254
255 /* Similarly for single word elements. */
256 static inline uint64_t expand_pred_s(uint8_t byte)
257 {
258 static const uint64_t word[] = {
259 [0x01] = 0x00000000ffffffffull,
260 [0x10] = 0xffffffff00000000ull,
261 [0x11] = 0xffffffffffffffffull,
262 };
263 return word[byte & 0x11];
264 }
265
266 /* Swap 16-bit words within a 32-bit word. */
267 static inline uint32_t hswap32(uint32_t h)
268 {
269 return rol32(h, 16);
270 }
271
272 /* Swap 16-bit words within a 64-bit word. */
273 static inline uint64_t hswap64(uint64_t h)
274 {
275 uint64_t m = 0x0000ffff0000ffffull;
276 h = rol64(h, 32);
277 return ((h & m) << 16) | ((h >> 16) & m);
278 }
279
280 /* Swap 32-bit words within a 64-bit word. */
281 static inline uint64_t wswap64(uint64_t h)
282 {
283 return rol64(h, 32);
284 }
285
286 #define LOGICAL_PPPP(NAME, FUNC) \
287 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
288 { \
289 uintptr_t opr_sz = simd_oprsz(desc); \
290 uint64_t *d = vd, *n = vn, *m = vm, *g = vg; \
291 uintptr_t i; \
292 for (i = 0; i < opr_sz / 8; ++i) { \
293 d[i] = FUNC(n[i], m[i], g[i]); \
294 } \
295 }
296
297 #define DO_AND(N, M, G) (((N) & (M)) & (G))
298 #define DO_BIC(N, M, G) (((N) & ~(M)) & (G))
299 #define DO_EOR(N, M, G) (((N) ^ (M)) & (G))
300 #define DO_ORR(N, M, G) (((N) | (M)) & (G))
301 #define DO_ORN(N, M, G) (((N) | ~(M)) & (G))
302 #define DO_NOR(N, M, G) (~((N) | (M)) & (G))
303 #define DO_NAND(N, M, G) (~((N) & (M)) & (G))
304 #define DO_SEL(N, M, G) (((N) & (G)) | ((M) & ~(G)))
305
306 LOGICAL_PPPP(sve_and_pppp, DO_AND)
307 LOGICAL_PPPP(sve_bic_pppp, DO_BIC)
308 LOGICAL_PPPP(sve_eor_pppp, DO_EOR)
309 LOGICAL_PPPP(sve_sel_pppp, DO_SEL)
310 LOGICAL_PPPP(sve_orr_pppp, DO_ORR)
311 LOGICAL_PPPP(sve_orn_pppp, DO_ORN)
312 LOGICAL_PPPP(sve_nor_pppp, DO_NOR)
313 LOGICAL_PPPP(sve_nand_pppp, DO_NAND)
314
315 #undef DO_AND
316 #undef DO_BIC
317 #undef DO_EOR
318 #undef DO_ORR
319 #undef DO_ORN
320 #undef DO_NOR
321 #undef DO_NAND
322 #undef DO_SEL
323 #undef LOGICAL_PPPP
324
325 /* Fully general three-operand expander, controlled by a predicate.
326 * This is complicated by the host-endian storage of the register file.
327 */
328 /* ??? I don't expect the compiler could ever vectorize this itself.
329 * With some tables we can convert bit masks to byte masks, and with
330 * extra care wrt byte/word ordering we could use gcc generic vectors
331 * and do 16 bytes at a time.
332 */
333 #define DO_ZPZZ(NAME, TYPE, H, OP) \
334 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
335 { \
336 intptr_t i, opr_sz = simd_oprsz(desc); \
337 for (i = 0; i < opr_sz; ) { \
338 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
339 do { \
340 if (pg & 1) { \
341 TYPE nn = *(TYPE *)(vn + H(i)); \
342 TYPE mm = *(TYPE *)(vm + H(i)); \
343 *(TYPE *)(vd + H(i)) = OP(nn, mm); \
344 } \
345 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
346 } while (i & 15); \
347 } \
348 }
349
350 /* Similarly, specialized for 64-bit operands. */
351 #define DO_ZPZZ_D(NAME, TYPE, OP) \
352 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
353 { \
354 intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
355 TYPE *d = vd, *n = vn, *m = vm; \
356 uint8_t *pg = vg; \
357 for (i = 0; i < opr_sz; i += 1) { \
358 if (pg[H1(i)] & 1) { \
359 TYPE nn = n[i], mm = m[i]; \
360 d[i] = OP(nn, mm); \
361 } \
362 } \
363 }
364
365 #define DO_AND(N, M) (N & M)
366 #define DO_EOR(N, M) (N ^ M)
367 #define DO_ORR(N, M) (N | M)
368 #define DO_BIC(N, M) (N & ~M)
369 #define DO_ADD(N, M) (N + M)
370 #define DO_SUB(N, M) (N - M)
371 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
372 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
373 #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
374 #define DO_MUL(N, M) (N * M)
375
376
377 /*
378 * We must avoid the C undefined behaviour cases: division by
379 * zero and signed division of INT_MIN by -1. Both of these
380 * have architecturally defined required results for Arm.
381 * We special case all signed divisions by -1 to avoid having
382 * to deduce the minimum integer for the type involved.
383 */
384 #define DO_SDIV(N, M) (unlikely(M == 0) ? 0 : unlikely(M == -1) ? -N : N / M)
385 #define DO_UDIV(N, M) (unlikely(M == 0) ? 0 : N / M)
386
387 DO_ZPZZ(sve_and_zpzz_b, uint8_t, H1, DO_AND)
388 DO_ZPZZ(sve_and_zpzz_h, uint16_t, H1_2, DO_AND)
389 DO_ZPZZ(sve_and_zpzz_s, uint32_t, H1_4, DO_AND)
390 DO_ZPZZ_D(sve_and_zpzz_d, uint64_t, DO_AND)
391
392 DO_ZPZZ(sve_orr_zpzz_b, uint8_t, H1, DO_ORR)
393 DO_ZPZZ(sve_orr_zpzz_h, uint16_t, H1_2, DO_ORR)
394 DO_ZPZZ(sve_orr_zpzz_s, uint32_t, H1_4, DO_ORR)
395 DO_ZPZZ_D(sve_orr_zpzz_d, uint64_t, DO_ORR)
396
397 DO_ZPZZ(sve_eor_zpzz_b, uint8_t, H1, DO_EOR)
398 DO_ZPZZ(sve_eor_zpzz_h, uint16_t, H1_2, DO_EOR)
399 DO_ZPZZ(sve_eor_zpzz_s, uint32_t, H1_4, DO_EOR)
400 DO_ZPZZ_D(sve_eor_zpzz_d, uint64_t, DO_EOR)
401
402 DO_ZPZZ(sve_bic_zpzz_b, uint8_t, H1, DO_BIC)
403 DO_ZPZZ(sve_bic_zpzz_h, uint16_t, H1_2, DO_BIC)
404 DO_ZPZZ(sve_bic_zpzz_s, uint32_t, H1_4, DO_BIC)
405 DO_ZPZZ_D(sve_bic_zpzz_d, uint64_t, DO_BIC)
406
407 DO_ZPZZ(sve_add_zpzz_b, uint8_t, H1, DO_ADD)
408 DO_ZPZZ(sve_add_zpzz_h, uint16_t, H1_2, DO_ADD)
409 DO_ZPZZ(sve_add_zpzz_s, uint32_t, H1_4, DO_ADD)
410 DO_ZPZZ_D(sve_add_zpzz_d, uint64_t, DO_ADD)
411
412 DO_ZPZZ(sve_sub_zpzz_b, uint8_t, H1, DO_SUB)
413 DO_ZPZZ(sve_sub_zpzz_h, uint16_t, H1_2, DO_SUB)
414 DO_ZPZZ(sve_sub_zpzz_s, uint32_t, H1_4, DO_SUB)
415 DO_ZPZZ_D(sve_sub_zpzz_d, uint64_t, DO_SUB)
416
417 DO_ZPZZ(sve_smax_zpzz_b, int8_t, H1, DO_MAX)
418 DO_ZPZZ(sve_smax_zpzz_h, int16_t, H1_2, DO_MAX)
419 DO_ZPZZ(sve_smax_zpzz_s, int32_t, H1_4, DO_MAX)
420 DO_ZPZZ_D(sve_smax_zpzz_d, int64_t, DO_MAX)
421
422 DO_ZPZZ(sve_umax_zpzz_b, uint8_t, H1, DO_MAX)
423 DO_ZPZZ(sve_umax_zpzz_h, uint16_t, H1_2, DO_MAX)
424 DO_ZPZZ(sve_umax_zpzz_s, uint32_t, H1_4, DO_MAX)
425 DO_ZPZZ_D(sve_umax_zpzz_d, uint64_t, DO_MAX)
426
427 DO_ZPZZ(sve_smin_zpzz_b, int8_t, H1, DO_MIN)
428 DO_ZPZZ(sve_smin_zpzz_h, int16_t, H1_2, DO_MIN)
429 DO_ZPZZ(sve_smin_zpzz_s, int32_t, H1_4, DO_MIN)
430 DO_ZPZZ_D(sve_smin_zpzz_d, int64_t, DO_MIN)
431
432 DO_ZPZZ(sve_umin_zpzz_b, uint8_t, H1, DO_MIN)
433 DO_ZPZZ(sve_umin_zpzz_h, uint16_t, H1_2, DO_MIN)
434 DO_ZPZZ(sve_umin_zpzz_s, uint32_t, H1_4, DO_MIN)
435 DO_ZPZZ_D(sve_umin_zpzz_d, uint64_t, DO_MIN)
436
437 DO_ZPZZ(sve_sabd_zpzz_b, int8_t, H1, DO_ABD)
438 DO_ZPZZ(sve_sabd_zpzz_h, int16_t, H1_2, DO_ABD)
439 DO_ZPZZ(sve_sabd_zpzz_s, int32_t, H1_4, DO_ABD)
440 DO_ZPZZ_D(sve_sabd_zpzz_d, int64_t, DO_ABD)
441
442 DO_ZPZZ(sve_uabd_zpzz_b, uint8_t, H1, DO_ABD)
443 DO_ZPZZ(sve_uabd_zpzz_h, uint16_t, H1_2, DO_ABD)
444 DO_ZPZZ(sve_uabd_zpzz_s, uint32_t, H1_4, DO_ABD)
445 DO_ZPZZ_D(sve_uabd_zpzz_d, uint64_t, DO_ABD)
446
447 /* Because the computation type is at least twice as large as required,
448 these work for both signed and unsigned source types. */
449 static inline uint8_t do_mulh_b(int32_t n, int32_t m)
450 {
451 return (n * m) >> 8;
452 }
453
454 static inline uint16_t do_mulh_h(int32_t n, int32_t m)
455 {
456 return (n * m) >> 16;
457 }
458
459 static inline uint32_t do_mulh_s(int64_t n, int64_t m)
460 {
461 return (n * m) >> 32;
462 }
463
464 static inline uint64_t do_smulh_d(uint64_t n, uint64_t m)
465 {
466 uint64_t lo, hi;
467 muls64(&lo, &hi, n, m);
468 return hi;
469 }
470
471 static inline uint64_t do_umulh_d(uint64_t n, uint64_t m)
472 {
473 uint64_t lo, hi;
474 mulu64(&lo, &hi, n, m);
475 return hi;
476 }
477
478 DO_ZPZZ(sve_mul_zpzz_b, uint8_t, H1, DO_MUL)
479 DO_ZPZZ(sve_mul_zpzz_h, uint16_t, H1_2, DO_MUL)
480 DO_ZPZZ(sve_mul_zpzz_s, uint32_t, H1_4, DO_MUL)
481 DO_ZPZZ_D(sve_mul_zpzz_d, uint64_t, DO_MUL)
482
483 DO_ZPZZ(sve_smulh_zpzz_b, int8_t, H1, do_mulh_b)
484 DO_ZPZZ(sve_smulh_zpzz_h, int16_t, H1_2, do_mulh_h)
485 DO_ZPZZ(sve_smulh_zpzz_s, int32_t, H1_4, do_mulh_s)
486 DO_ZPZZ_D(sve_smulh_zpzz_d, uint64_t, do_smulh_d)
487
488 DO_ZPZZ(sve_umulh_zpzz_b, uint8_t, H1, do_mulh_b)
489 DO_ZPZZ(sve_umulh_zpzz_h, uint16_t, H1_2, do_mulh_h)
490 DO_ZPZZ(sve_umulh_zpzz_s, uint32_t, H1_4, do_mulh_s)
491 DO_ZPZZ_D(sve_umulh_zpzz_d, uint64_t, do_umulh_d)
492
493 DO_ZPZZ(sve_sdiv_zpzz_s, int32_t, H1_4, DO_SDIV)
494 DO_ZPZZ_D(sve_sdiv_zpzz_d, int64_t, DO_SDIV)
495
496 DO_ZPZZ(sve_udiv_zpzz_s, uint32_t, H1_4, DO_UDIV)
497 DO_ZPZZ_D(sve_udiv_zpzz_d, uint64_t, DO_UDIV)
498
499 /* Note that all bits of the shift are significant
500 and not modulo the element size. */
501 #define DO_ASR(N, M) (N >> MIN(M, sizeof(N) * 8 - 1))
502 #define DO_LSR(N, M) (M < sizeof(N) * 8 ? N >> M : 0)
503 #define DO_LSL(N, M) (M < sizeof(N) * 8 ? N << M : 0)
504
505 DO_ZPZZ(sve_asr_zpzz_b, int8_t, H1, DO_ASR)
506 DO_ZPZZ(sve_lsr_zpzz_b, uint8_t, H1_2, DO_LSR)
507 DO_ZPZZ(sve_lsl_zpzz_b, uint8_t, H1_4, DO_LSL)
508
509 DO_ZPZZ(sve_asr_zpzz_h, int16_t, H1, DO_ASR)
510 DO_ZPZZ(sve_lsr_zpzz_h, uint16_t, H1_2, DO_LSR)
511 DO_ZPZZ(sve_lsl_zpzz_h, uint16_t, H1_4, DO_LSL)
512
513 DO_ZPZZ(sve_asr_zpzz_s, int32_t, H1, DO_ASR)
514 DO_ZPZZ(sve_lsr_zpzz_s, uint32_t, H1_2, DO_LSR)
515 DO_ZPZZ(sve_lsl_zpzz_s, uint32_t, H1_4, DO_LSL)
516
517 DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR)
518 DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR)
519 DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL)
520
521 static inline uint16_t do_sadalp_h(int16_t n, int16_t m)
522 {
523 int8_t n1 = n, n2 = n >> 8;
524 return m + n1 + n2;
525 }
526
527 static inline uint32_t do_sadalp_s(int32_t n, int32_t m)
528 {
529 int16_t n1 = n, n2 = n >> 16;
530 return m + n1 + n2;
531 }
532
533 static inline uint64_t do_sadalp_d(int64_t n, int64_t m)
534 {
535 int32_t n1 = n, n2 = n >> 32;
536 return m + n1 + n2;
537 }
538
539 DO_ZPZZ(sve2_sadalp_zpzz_h, int16_t, H1_2, do_sadalp_h)
540 DO_ZPZZ(sve2_sadalp_zpzz_s, int32_t, H1_4, do_sadalp_s)
541 DO_ZPZZ_D(sve2_sadalp_zpzz_d, int64_t, do_sadalp_d)
542
543 static inline uint16_t do_uadalp_h(uint16_t n, uint16_t m)
544 {
545 uint8_t n1 = n, n2 = n >> 8;
546 return m + n1 + n2;
547 }
548
549 static inline uint32_t do_uadalp_s(uint32_t n, uint32_t m)
550 {
551 uint16_t n1 = n, n2 = n >> 16;
552 return m + n1 + n2;
553 }
554
555 static inline uint64_t do_uadalp_d(uint64_t n, uint64_t m)
556 {
557 uint32_t n1 = n, n2 = n >> 32;
558 return m + n1 + n2;
559 }
560
561 DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h)
562 DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s)
563 DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d)
564
565 #define do_srshl_b(n, m) do_sqrshl_bhs(n, m, 8, true, NULL)
566 #define do_srshl_h(n, m) do_sqrshl_bhs(n, m, 16, true, NULL)
567 #define do_srshl_s(n, m) do_sqrshl_bhs(n, m, 32, true, NULL)
568 #define do_srshl_d(n, m) do_sqrshl_d(n, m, true, NULL)
569
570 DO_ZPZZ(sve2_srshl_zpzz_b, int8_t, H1, do_srshl_b)
571 DO_ZPZZ(sve2_srshl_zpzz_h, int16_t, H1_2, do_srshl_h)
572 DO_ZPZZ(sve2_srshl_zpzz_s, int32_t, H1_4, do_srshl_s)
573 DO_ZPZZ_D(sve2_srshl_zpzz_d, int64_t, do_srshl_d)
574
575 #define do_urshl_b(n, m) do_uqrshl_bhs(n, (int8_t)m, 8, true, NULL)
576 #define do_urshl_h(n, m) do_uqrshl_bhs(n, (int16_t)m, 16, true, NULL)
577 #define do_urshl_s(n, m) do_uqrshl_bhs(n, m, 32, true, NULL)
578 #define do_urshl_d(n, m) do_uqrshl_d(n, m, true, NULL)
579
580 DO_ZPZZ(sve2_urshl_zpzz_b, uint8_t, H1, do_urshl_b)
581 DO_ZPZZ(sve2_urshl_zpzz_h, uint16_t, H1_2, do_urshl_h)
582 DO_ZPZZ(sve2_urshl_zpzz_s, uint32_t, H1_4, do_urshl_s)
583 DO_ZPZZ_D(sve2_urshl_zpzz_d, uint64_t, do_urshl_d)
584
585 /*
586 * Unlike the NEON and AdvSIMD versions, there is no QC bit to set.
587 * We pass in a pointer to a dummy saturation field to trigger
588 * the saturating arithmetic but discard the information about
589 * whether it has occurred.
590 */
591 #define do_sqshl_b(n, m) \
592 ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, false, &discard); })
593 #define do_sqshl_h(n, m) \
594 ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, false, &discard); })
595 #define do_sqshl_s(n, m) \
596 ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, false, &discard); })
597 #define do_sqshl_d(n, m) \
598 ({ uint32_t discard; do_sqrshl_d(n, m, false, &discard); })
599
600 DO_ZPZZ(sve2_sqshl_zpzz_b, int8_t, H1_2, do_sqshl_b)
601 DO_ZPZZ(sve2_sqshl_zpzz_h, int16_t, H1_2, do_sqshl_h)
602 DO_ZPZZ(sve2_sqshl_zpzz_s, int32_t, H1_4, do_sqshl_s)
603 DO_ZPZZ_D(sve2_sqshl_zpzz_d, int64_t, do_sqshl_d)
604
605 #define do_uqshl_b(n, m) \
606 ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
607 #define do_uqshl_h(n, m) \
608 ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
609 #define do_uqshl_s(n, m) \
610 ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, false, &discard); })
611 #define do_uqshl_d(n, m) \
612 ({ uint32_t discard; do_uqrshl_d(n, m, false, &discard); })
613
614 DO_ZPZZ(sve2_uqshl_zpzz_b, uint8_t, H1_2, do_uqshl_b)
615 DO_ZPZZ(sve2_uqshl_zpzz_h, uint16_t, H1_2, do_uqshl_h)
616 DO_ZPZZ(sve2_uqshl_zpzz_s, uint32_t, H1_4, do_uqshl_s)
617 DO_ZPZZ_D(sve2_uqshl_zpzz_d, uint64_t, do_uqshl_d)
618
619 #define do_sqrshl_b(n, m) \
620 ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, true, &discard); })
621 #define do_sqrshl_h(n, m) \
622 ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, true, &discard); })
623 #define do_sqrshl_s(n, m) \
624 ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, true, &discard); })
625 #define do_sqrshl_d(n, m) \
626 ({ uint32_t discard; do_sqrshl_d(n, m, true, &discard); })
627
628 DO_ZPZZ(sve2_sqrshl_zpzz_b, int8_t, H1_2, do_sqrshl_b)
629 DO_ZPZZ(sve2_sqrshl_zpzz_h, int16_t, H1_2, do_sqrshl_h)
630 DO_ZPZZ(sve2_sqrshl_zpzz_s, int32_t, H1_4, do_sqrshl_s)
631 DO_ZPZZ_D(sve2_sqrshl_zpzz_d, int64_t, do_sqrshl_d)
632
633 #undef do_sqrshl_d
634
635 #define do_uqrshl_b(n, m) \
636 ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, true, &discard); })
637 #define do_uqrshl_h(n, m) \
638 ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, true, &discard); })
639 #define do_uqrshl_s(n, m) \
640 ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, true, &discard); })
641 #define do_uqrshl_d(n, m) \
642 ({ uint32_t discard; do_uqrshl_d(n, m, true, &discard); })
643
644 DO_ZPZZ(sve2_uqrshl_zpzz_b, uint8_t, H1_2, do_uqrshl_b)
645 DO_ZPZZ(sve2_uqrshl_zpzz_h, uint16_t, H1_2, do_uqrshl_h)
646 DO_ZPZZ(sve2_uqrshl_zpzz_s, uint32_t, H1_4, do_uqrshl_s)
647 DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d)
648
649 #undef do_uqrshl_d
650
651 #define DO_HADD_BHS(n, m) (((int64_t)n + m) >> 1)
652 #define DO_HADD_D(n, m) ((n >> 1) + (m >> 1) + (n & m & 1))
653
654 DO_ZPZZ(sve2_shadd_zpzz_b, int8_t, H1, DO_HADD_BHS)
655 DO_ZPZZ(sve2_shadd_zpzz_h, int16_t, H1_2, DO_HADD_BHS)
656 DO_ZPZZ(sve2_shadd_zpzz_s, int32_t, H1_4, DO_HADD_BHS)
657 DO_ZPZZ_D(sve2_shadd_zpzz_d, int64_t, DO_HADD_D)
658
659 DO_ZPZZ(sve2_uhadd_zpzz_b, uint8_t, H1, DO_HADD_BHS)
660 DO_ZPZZ(sve2_uhadd_zpzz_h, uint16_t, H1_2, DO_HADD_BHS)
661 DO_ZPZZ(sve2_uhadd_zpzz_s, uint32_t, H1_4, DO_HADD_BHS)
662 DO_ZPZZ_D(sve2_uhadd_zpzz_d, uint64_t, DO_HADD_D)
663
664 #define DO_RHADD_BHS(n, m) (((int64_t)n + m + 1) >> 1)
665 #define DO_RHADD_D(n, m) ((n >> 1) + (m >> 1) + ((n | m) & 1))
666
667 DO_ZPZZ(sve2_srhadd_zpzz_b, int8_t, H1, DO_RHADD_BHS)
668 DO_ZPZZ(sve2_srhadd_zpzz_h, int16_t, H1_2, DO_RHADD_BHS)
669 DO_ZPZZ(sve2_srhadd_zpzz_s, int32_t, H1_4, DO_RHADD_BHS)
670 DO_ZPZZ_D(sve2_srhadd_zpzz_d, int64_t, DO_RHADD_D)
671
672 DO_ZPZZ(sve2_urhadd_zpzz_b, uint8_t, H1, DO_RHADD_BHS)
673 DO_ZPZZ(sve2_urhadd_zpzz_h, uint16_t, H1_2, DO_RHADD_BHS)
674 DO_ZPZZ(sve2_urhadd_zpzz_s, uint32_t, H1_4, DO_RHADD_BHS)
675 DO_ZPZZ_D(sve2_urhadd_zpzz_d, uint64_t, DO_RHADD_D)
676
677 #define DO_HSUB_BHS(n, m) (((int64_t)n - m) >> 1)
678 #define DO_HSUB_D(n, m) ((n >> 1) - (m >> 1) - (~n & m & 1))
679
680 DO_ZPZZ(sve2_shsub_zpzz_b, int8_t, H1, DO_HSUB_BHS)
681 DO_ZPZZ(sve2_shsub_zpzz_h, int16_t, H1_2, DO_HSUB_BHS)
682 DO_ZPZZ(sve2_shsub_zpzz_s, int32_t, H1_4, DO_HSUB_BHS)
683 DO_ZPZZ_D(sve2_shsub_zpzz_d, int64_t, DO_HSUB_D)
684
685 DO_ZPZZ(sve2_uhsub_zpzz_b, uint8_t, H1, DO_HSUB_BHS)
686 DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
687 DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
688 DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
689
690 static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max)
691 {
692 return val >= max ? max : val <= min ? min : val;
693 }
694
695 #define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX)
696 #define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX)
697 #define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX)
698
699 static inline int64_t do_sqadd_d(int64_t n, int64_t m)
700 {
701 int64_t r = n + m;
702 if (((r ^ n) & ~(n ^ m)) < 0) {
703 /* Signed overflow. */
704 return r < 0 ? INT64_MAX : INT64_MIN;
705 }
706 return r;
707 }
708
709 DO_ZPZZ(sve2_sqadd_zpzz_b, int8_t, H1, DO_SQADD_B)
710 DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H)
711 DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S)
712 DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d)
713
714 #define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX)
715 #define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX)
716 #define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX)
717
718 static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m)
719 {
720 uint64_t r = n + m;
721 return r < n ? UINT64_MAX : r;
722 }
723
724 DO_ZPZZ(sve2_uqadd_zpzz_b, uint8_t, H1, DO_UQADD_B)
725 DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H)
726 DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S)
727 DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d)
728
729 #define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX)
730 #define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX)
731 #define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX)
732
733 static inline int64_t do_sqsub_d(int64_t n, int64_t m)
734 {
735 int64_t r = n - m;
736 if (((r ^ n) & (n ^ m)) < 0) {
737 /* Signed overflow. */
738 return r < 0 ? INT64_MAX : INT64_MIN;
739 }
740 return r;
741 }
742
743 DO_ZPZZ(sve2_sqsub_zpzz_b, int8_t, H1, DO_SQSUB_B)
744 DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H)
745 DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S)
746 DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d)
747
748 #define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX)
749 #define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX)
750 #define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX)
751
752 static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m)
753 {
754 return n > m ? n - m : 0;
755 }
756
757 DO_ZPZZ(sve2_uqsub_zpzz_b, uint8_t, H1, DO_UQSUB_B)
758 DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H)
759 DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S)
760 DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d)
761
762 #define DO_SUQADD_B(n, m) \
763 do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX)
764 #define DO_SUQADD_H(n, m) \
765 do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX)
766 #define DO_SUQADD_S(n, m) \
767 do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX)
768
769 static inline int64_t do_suqadd_d(int64_t n, uint64_t m)
770 {
771 uint64_t r = n + m;
772
773 if (n < 0) {
774 /* Note that m - abs(n) cannot underflow. */
775 if (r > INT64_MAX) {
776 /* Result is either very large positive or negative. */
777 if (m > -n) {
778 /* m > abs(n), so r is a very large positive. */
779 return INT64_MAX;
780 }
781 /* Result is negative. */
782 }
783 } else {
784 /* Both inputs are positive: check for overflow. */
785 if (r < m || r > INT64_MAX) {
786 return INT64_MAX;
787 }
788 }
789 return r;
790 }
791
792 DO_ZPZZ(sve2_suqadd_zpzz_b, uint8_t, H1, DO_SUQADD_B)
793 DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H)
794 DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S)
795 DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d)
796
797 #define DO_USQADD_B(n, m) \
798 do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX)
799 #define DO_USQADD_H(n, m) \
800 do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX)
801 #define DO_USQADD_S(n, m) \
802 do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX)
803
804 static inline uint64_t do_usqadd_d(uint64_t n, int64_t m)
805 {
806 uint64_t r = n + m;
807
808 if (m < 0) {
809 return n < -m ? 0 : r;
810 }
811 return r < n ? UINT64_MAX : r;
812 }
813
814 DO_ZPZZ(sve2_usqadd_zpzz_b, uint8_t, H1, DO_USQADD_B)
815 DO_ZPZZ(sve2_usqadd_zpzz_h, uint16_t, H1_2, DO_USQADD_H)
816 DO_ZPZZ(sve2_usqadd_zpzz_s, uint32_t, H1_4, DO_USQADD_S)
817 DO_ZPZZ_D(sve2_usqadd_zpzz_d, uint64_t, do_usqadd_d)
818
819 #undef DO_ZPZZ
820 #undef DO_ZPZZ_D
821
822 /*
823 * Three operand expander, operating on element pairs.
824 * If the slot I is even, the elements from from VN {I, I+1}.
825 * If the slot I is odd, the elements from from VM {I-1, I}.
826 * Load all of the input elements in each pair before overwriting output.
827 */
828 #define DO_ZPZZ_PAIR(NAME, TYPE, H, OP) \
829 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
830 { \
831 intptr_t i, opr_sz = simd_oprsz(desc); \
832 for (i = 0; i < opr_sz; ) { \
833 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
834 do { \
835 TYPE n0 = *(TYPE *)(vn + H(i)); \
836 TYPE m0 = *(TYPE *)(vm + H(i)); \
837 TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
838 TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
839 if (pg & 1) { \
840 *(TYPE *)(vd + H(i)) = OP(n0, n1); \
841 } \
842 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
843 if (pg & 1) { \
844 *(TYPE *)(vd + H(i)) = OP(m0, m1); \
845 } \
846 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
847 } while (i & 15); \
848 } \
849 }
850
851 /* Similarly, specialized for 64-bit operands. */
852 #define DO_ZPZZ_PAIR_D(NAME, TYPE, OP) \
853 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
854 { \
855 intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
856 TYPE *d = vd, *n = vn, *m = vm; \
857 uint8_t *pg = vg; \
858 for (i = 0; i < opr_sz; i += 2) { \
859 TYPE n0 = n[i], n1 = n[i + 1]; \
860 TYPE m0 = m[i], m1 = m[i + 1]; \
861 if (pg[H1(i)] & 1) { \
862 d[i] = OP(n0, n1); \
863 } \
864 if (pg[H1(i + 1)] & 1) { \
865 d[i + 1] = OP(m0, m1); \
866 } \
867 } \
868 }
869
870 DO_ZPZZ_PAIR(sve2_addp_zpzz_b, uint8_t, H1, DO_ADD)
871 DO_ZPZZ_PAIR(sve2_addp_zpzz_h, uint16_t, H1_2, DO_ADD)
872 DO_ZPZZ_PAIR(sve2_addp_zpzz_s, uint32_t, H1_4, DO_ADD)
873 DO_ZPZZ_PAIR_D(sve2_addp_zpzz_d, uint64_t, DO_ADD)
874
875 DO_ZPZZ_PAIR(sve2_umaxp_zpzz_b, uint8_t, H1, DO_MAX)
876 DO_ZPZZ_PAIR(sve2_umaxp_zpzz_h, uint16_t, H1_2, DO_MAX)
877 DO_ZPZZ_PAIR(sve2_umaxp_zpzz_s, uint32_t, H1_4, DO_MAX)
878 DO_ZPZZ_PAIR_D(sve2_umaxp_zpzz_d, uint64_t, DO_MAX)
879
880 DO_ZPZZ_PAIR(sve2_uminp_zpzz_b, uint8_t, H1, DO_MIN)
881 DO_ZPZZ_PAIR(sve2_uminp_zpzz_h, uint16_t, H1_2, DO_MIN)
882 DO_ZPZZ_PAIR(sve2_uminp_zpzz_s, uint32_t, H1_4, DO_MIN)
883 DO_ZPZZ_PAIR_D(sve2_uminp_zpzz_d, uint64_t, DO_MIN)
884
885 DO_ZPZZ_PAIR(sve2_smaxp_zpzz_b, int8_t, H1, DO_MAX)
886 DO_ZPZZ_PAIR(sve2_smaxp_zpzz_h, int16_t, H1_2, DO_MAX)
887 DO_ZPZZ_PAIR(sve2_smaxp_zpzz_s, int32_t, H1_4, DO_MAX)
888 DO_ZPZZ_PAIR_D(sve2_smaxp_zpzz_d, int64_t, DO_MAX)
889
890 DO_ZPZZ_PAIR(sve2_sminp_zpzz_b, int8_t, H1, DO_MIN)
891 DO_ZPZZ_PAIR(sve2_sminp_zpzz_h, int16_t, H1_2, DO_MIN)
892 DO_ZPZZ_PAIR(sve2_sminp_zpzz_s, int32_t, H1_4, DO_MIN)
893 DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
894
895 #undef DO_ZPZZ_PAIR
896 #undef DO_ZPZZ_PAIR_D
897
898 #define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \
899 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
900 void *status, uint32_t desc) \
901 { \
902 intptr_t i, opr_sz = simd_oprsz(desc); \
903 for (i = 0; i < opr_sz; ) { \
904 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
905 do { \
906 TYPE n0 = *(TYPE *)(vn + H(i)); \
907 TYPE m0 = *(TYPE *)(vm + H(i)); \
908 TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
909 TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
910 if (pg & 1) { \
911 *(TYPE *)(vd + H(i)) = OP(n0, n1, status); \
912 } \
913 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
914 if (pg & 1) { \
915 *(TYPE *)(vd + H(i)) = OP(m0, m1, status); \
916 } \
917 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
918 } while (i & 15); \
919 } \
920 }
921
922 DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_h, float16, H1_2, float16_add)
923 DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_s, float32, H1_4, float32_add)
924 DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_d, float64, , float64_add)
925
926 DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_h, float16, H1_2, float16_maxnum)
927 DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_s, float32, H1_4, float32_maxnum)
928 DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_d, float64, , float64_maxnum)
929
930 DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_h, float16, H1_2, float16_minnum)
931 DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_s, float32, H1_4, float32_minnum)
932 DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_d, float64, , float64_minnum)
933
934 DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_h, float16, H1_2, float16_max)
935 DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_s, float32, H1_4, float32_max)
936 DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_d, float64, , float64_max)
937
938 DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_h, float16, H1_2, float16_min)
939 DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_s, float32, H1_4, float32_min)
940 DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_d, float64, , float64_min)
941
942 #undef DO_ZPZZ_PAIR_FP
943
944 /* Three-operand expander, controlled by a predicate, in which the
945 * third operand is "wide". That is, for D = N op M, the same 64-bit
946 * value of M is used with all of the narrower values of N.
947 */
948 #define DO_ZPZW(NAME, TYPE, TYPEW, H, OP) \
949 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
950 { \
951 intptr_t i, opr_sz = simd_oprsz(desc); \
952 for (i = 0; i < opr_sz; ) { \
953 uint8_t pg = *(uint8_t *)(vg + H1(i >> 3)); \
954 TYPEW mm = *(TYPEW *)(vm + i); \
955 do { \
956 if (pg & 1) { \
957 TYPE nn = *(TYPE *)(vn + H(i)); \
958 *(TYPE *)(vd + H(i)) = OP(nn, mm); \
959 } \
960 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
961 } while (i & 7); \
962 } \
963 }
964
965 DO_ZPZW(sve_asr_zpzw_b, int8_t, uint64_t, H1, DO_ASR)
966 DO_ZPZW(sve_lsr_zpzw_b, uint8_t, uint64_t, H1, DO_LSR)
967 DO_ZPZW(sve_lsl_zpzw_b, uint8_t, uint64_t, H1, DO_LSL)
968
969 DO_ZPZW(sve_asr_zpzw_h, int16_t, uint64_t, H1_2, DO_ASR)
970 DO_ZPZW(sve_lsr_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSR)
971 DO_ZPZW(sve_lsl_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSL)
972
973 DO_ZPZW(sve_asr_zpzw_s, int32_t, uint64_t, H1_4, DO_ASR)
974 DO_ZPZW(sve_lsr_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSR)
975 DO_ZPZW(sve_lsl_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSL)
976
977 #undef DO_ZPZW
978
979 /* Fully general two-operand expander, controlled by a predicate.
980 */
981 #define DO_ZPZ(NAME, TYPE, H, OP) \
982 void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
983 { \
984 intptr_t i, opr_sz = simd_oprsz(desc); \
985 for (i = 0; i < opr_sz; ) { \
986 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
987 do { \
988 if (pg & 1) { \
989 TYPE nn = *(TYPE *)(vn + H(i)); \
990 *(TYPE *)(vd + H(i)) = OP(nn); \
991 } \
992 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
993 } while (i & 15); \
994 } \
995 }
996
997 /* Similarly, specialized for 64-bit operands. */
998 #define DO_ZPZ_D(NAME, TYPE, OP) \
999 void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
1000 { \
1001 intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
1002 TYPE *d = vd, *n = vn; \
1003 uint8_t *pg = vg; \
1004 for (i = 0; i < opr_sz; i += 1) { \
1005 if (pg[H1(i)] & 1) { \
1006 TYPE nn = n[i]; \
1007 d[i] = OP(nn); \
1008 } \
1009 } \
1010 }
1011
1012 #define DO_CLS_B(N) (clrsb32(N) - 24)
1013 #define DO_CLS_H(N) (clrsb32(N) - 16)
1014
1015 DO_ZPZ(sve_cls_b, int8_t, H1, DO_CLS_B)
1016 DO_ZPZ(sve_cls_h, int16_t, H1_2, DO_CLS_H)
1017 DO_ZPZ(sve_cls_s, int32_t, H1_4, clrsb32)
1018 DO_ZPZ_D(sve_cls_d, int64_t, clrsb64)
1019
1020 #define DO_CLZ_B(N) (clz32(N) - 24)
1021 #define DO_CLZ_H(N) (clz32(N) - 16)
1022
1023 DO_ZPZ(sve_clz_b, uint8_t, H1, DO_CLZ_B)
1024 DO_ZPZ(sve_clz_h, uint16_t, H1_2, DO_CLZ_H)
1025 DO_ZPZ(sve_clz_s, uint32_t, H1_4, clz32)
1026 DO_ZPZ_D(sve_clz_d, uint64_t, clz64)
1027
1028 DO_ZPZ(sve_cnt_zpz_b, uint8_t, H1, ctpop8)
1029 DO_ZPZ(sve_cnt_zpz_h, uint16_t, H1_2, ctpop16)
1030 DO_ZPZ(sve_cnt_zpz_s, uint32_t, H1_4, ctpop32)
1031 DO_ZPZ_D(sve_cnt_zpz_d, uint64_t, ctpop64)
1032
1033 #define DO_CNOT(N) (N == 0)
1034
1035 DO_ZPZ(sve_cnot_b, uint8_t, H1, DO_CNOT)
1036 DO_ZPZ(sve_cnot_h, uint16_t, H1_2, DO_CNOT)
1037 DO_ZPZ(sve_cnot_s, uint32_t, H1_4, DO_CNOT)
1038 DO_ZPZ_D(sve_cnot_d, uint64_t, DO_CNOT)
1039
1040 #define DO_FABS(N) (N & ((__typeof(N))-1 >> 1))
1041
1042 DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS)
1043 DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS)
1044 DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS)
1045
1046 #define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1))
1047
1048 DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG)
1049 DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG)
1050 DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG)
1051
1052 #define DO_NOT(N) (~N)
1053
1054 DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT)
1055 DO_ZPZ(sve_not_zpz_h, uint16_t, H1_2, DO_NOT)
1056 DO_ZPZ(sve_not_zpz_s, uint32_t, H1_4, DO_NOT)
1057 DO_ZPZ_D(sve_not_zpz_d, uint64_t, DO_NOT)
1058
1059 #define DO_SXTB(N) ((int8_t)N)
1060 #define DO_SXTH(N) ((int16_t)N)
1061 #define DO_SXTS(N) ((int32_t)N)
1062 #define DO_UXTB(N) ((uint8_t)N)
1063 #define DO_UXTH(N) ((uint16_t)N)
1064 #define DO_UXTS(N) ((uint32_t)N)
1065
1066 DO_ZPZ(sve_sxtb_h, uint16_t, H1_2, DO_SXTB)
1067 DO_ZPZ(sve_sxtb_s, uint32_t, H1_4, DO_SXTB)
1068 DO_ZPZ(sve_sxth_s, uint32_t, H1_4, DO_SXTH)
1069 DO_ZPZ_D(sve_sxtb_d, uint64_t, DO_SXTB)
1070 DO_ZPZ_D(sve_sxth_d, uint64_t, DO_SXTH)
1071 DO_ZPZ_D(sve_sxtw_d, uint64_t, DO_SXTS)
1072
1073 DO_ZPZ(sve_uxtb_h, uint16_t, H1_2, DO_UXTB)
1074 DO_ZPZ(sve_uxtb_s, uint32_t, H1_4, DO_UXTB)
1075 DO_ZPZ(sve_uxth_s, uint32_t, H1_4, DO_UXTH)
1076 DO_ZPZ_D(sve_uxtb_d, uint64_t, DO_UXTB)
1077 DO_ZPZ_D(sve_uxth_d, uint64_t, DO_UXTH)
1078 DO_ZPZ_D(sve_uxtw_d, uint64_t, DO_UXTS)
1079
1080 #define DO_ABS(N) (N < 0 ? -N : N)
1081
1082 DO_ZPZ(sve_abs_b, int8_t, H1, DO_ABS)
1083 DO_ZPZ(sve_abs_h, int16_t, H1_2, DO_ABS)
1084 DO_ZPZ(sve_abs_s, int32_t, H1_4, DO_ABS)
1085 DO_ZPZ_D(sve_abs_d, int64_t, DO_ABS)
1086
1087 #define DO_NEG(N) (-N)
1088
1089 DO_ZPZ(sve_neg_b, uint8_t, H1, DO_NEG)
1090 DO_ZPZ(sve_neg_h, uint16_t, H1_2, DO_NEG)
1091 DO_ZPZ(sve_neg_s, uint32_t, H1_4, DO_NEG)
1092 DO_ZPZ_D(sve_neg_d, uint64_t, DO_NEG)
1093
1094 DO_ZPZ(sve_revb_h, uint16_t, H1_2, bswap16)
1095 DO_ZPZ(sve_revb_s, uint32_t, H1_4, bswap32)
1096 DO_ZPZ_D(sve_revb_d, uint64_t, bswap64)
1097
1098 DO_ZPZ(sve_revh_s, uint32_t, H1_4, hswap32)
1099 DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
1100
1101 DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
1102
1103 DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
1104 DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
1105 DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
1106 DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64)
1107
1108 #define DO_SQABS(X) \
1109 ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
1110 x_ >= 0 ? x_ : x_ == min_ ? -min_ - 1 : -x_; })
1111
1112 DO_ZPZ(sve2_sqabs_b, int8_t, H1, DO_SQABS)
1113 DO_ZPZ(sve2_sqabs_h, int16_t, H1_2, DO_SQABS)
1114 DO_ZPZ(sve2_sqabs_s, int32_t, H1_4, DO_SQABS)
1115 DO_ZPZ_D(sve2_sqabs_d, int64_t, DO_SQABS)
1116
1117 #define DO_SQNEG(X) \
1118 ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
1119 x_ == min_ ? -min_ - 1 : -x_; })
1120
1121 DO_ZPZ(sve2_sqneg_b, uint8_t, H1, DO_SQNEG)
1122 DO_ZPZ(sve2_sqneg_h, uint16_t, H1_2, DO_SQNEG)
1123 DO_ZPZ(sve2_sqneg_s, uint32_t, H1_4, DO_SQNEG)
1124 DO_ZPZ_D(sve2_sqneg_d, uint64_t, DO_SQNEG)
1125
1126 DO_ZPZ(sve2_urecpe_s, uint32_t, H1_4, helper_recpe_u32)
1127 DO_ZPZ(sve2_ursqrte_s, uint32_t, H1_4, helper_rsqrte_u32)
1128
1129 /* Three-operand expander, unpredicated, in which the third operand is "wide".
1130 */
1131 #define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \
1132 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1133 { \
1134 intptr_t i, opr_sz = simd_oprsz(desc); \
1135 for (i = 0; i < opr_sz; ) { \
1136 TYPEW mm = *(TYPEW *)(vm + i); \
1137 do { \
1138 TYPE nn = *(TYPE *)(vn + H(i)); \
1139 *(TYPE *)(vd + H(i)) = OP(nn, mm); \
1140 i += sizeof(TYPE); \
1141 } while (i & 7); \
1142 } \
1143 }
1144
1145 DO_ZZW(sve_asr_zzw_b, int8_t, uint64_t, H1, DO_ASR)
1146 DO_ZZW(sve_lsr_zzw_b, uint8_t, uint64_t, H1, DO_LSR)
1147 DO_ZZW(sve_lsl_zzw_b, uint8_t, uint64_t, H1, DO_LSL)
1148
1149 DO_ZZW(sve_asr_zzw_h, int16_t, uint64_t, H1_2, DO_ASR)
1150 DO_ZZW(sve_lsr_zzw_h, uint16_t, uint64_t, H1_2, DO_LSR)
1151 DO_ZZW(sve_lsl_zzw_h, uint16_t, uint64_t, H1_2, DO_LSL)
1152
1153 DO_ZZW(sve_asr_zzw_s, int32_t, uint64_t, H1_4, DO_ASR)
1154 DO_ZZW(sve_lsr_zzw_s, uint32_t, uint64_t, H1_4, DO_LSR)
1155 DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL)
1156
1157 #undef DO_ZZW
1158
1159 #undef DO_CLS_B
1160 #undef DO_CLS_H
1161 #undef DO_CLZ_B
1162 #undef DO_CLZ_H
1163 #undef DO_CNOT
1164 #undef DO_FABS
1165 #undef DO_FNEG
1166 #undef DO_ABS
1167 #undef DO_NEG
1168 #undef DO_ZPZ
1169 #undef DO_ZPZ_D
1170
1171 /*
1172 * Three-operand expander, unpredicated, in which the two inputs are
1173 * selected from the top or bottom half of the wide column.
1174 */
1175 #define DO_ZZZ_TB(NAME, TYPEW, TYPEN, HW, HN, OP) \
1176 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1177 { \
1178 intptr_t i, opr_sz = simd_oprsz(desc); \
1179 int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
1180 int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
1181 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
1182 TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
1183 TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
1184 *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
1185 } \
1186 }
1187
1188 DO_ZZZ_TB(sve2_saddl_h, int16_t, int8_t, H1_2, H1, DO_ADD)
1189 DO_ZZZ_TB(sve2_saddl_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
1190 DO_ZZZ_TB(sve2_saddl_d, int64_t, int32_t, , H1_4, DO_ADD)
1191
1192 DO_ZZZ_TB(sve2_ssubl_h, int16_t, int8_t, H1_2, H1, DO_SUB)
1193 DO_ZZZ_TB(sve2_ssubl_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
1194 DO_ZZZ_TB(sve2_ssubl_d, int64_t, int32_t, , H1_4, DO_SUB)
1195
1196 DO_ZZZ_TB(sve2_sabdl_h, int16_t, int8_t, H1_2, H1, DO_ABD)
1197 DO_ZZZ_TB(sve2_sabdl_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
1198 DO_ZZZ_TB(sve2_sabdl_d, int64_t, int32_t, , H1_4, DO_ABD)
1199
1200 DO_ZZZ_TB(sve2_uaddl_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
1201 DO_ZZZ_TB(sve2_uaddl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
1202 DO_ZZZ_TB(sve2_uaddl_d, uint64_t, uint32_t, , H1_4, DO_ADD)
1203
1204 DO_ZZZ_TB(sve2_usubl_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
1205 DO_ZZZ_TB(sve2_usubl_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
1206 DO_ZZZ_TB(sve2_usubl_d, uint64_t, uint32_t, , H1_4, DO_SUB)
1207
1208 DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
1209 DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
1210 DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD)
1211
1212 DO_ZZZ_TB(sve2_smull_zzz_h, int16_t, int8_t, H1_2, H1, DO_MUL)
1213 DO_ZZZ_TB(sve2_smull_zzz_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
1214 DO_ZZZ_TB(sve2_smull_zzz_d, int64_t, int32_t, , H1_4, DO_MUL)
1215
1216 DO_ZZZ_TB(sve2_umull_zzz_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
1217 DO_ZZZ_TB(sve2_umull_zzz_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
1218 DO_ZZZ_TB(sve2_umull_zzz_d, uint64_t, uint32_t, , H1_4, DO_MUL)
1219
1220 /* Note that the multiply cannot overflow, but the doubling can. */
1221 static inline int16_t do_sqdmull_h(int16_t n, int16_t m)
1222 {
1223 int16_t val = n * m;
1224 return DO_SQADD_H(val, val);
1225 }
1226
1227 static inline int32_t do_sqdmull_s(int32_t n, int32_t m)
1228 {
1229 int32_t val = n * m;
1230 return DO_SQADD_S(val, val);
1231 }
1232
1233 static inline int64_t do_sqdmull_d(int64_t n, int64_t m)
1234 {
1235 int64_t val = n * m;
1236 return do_sqadd_d(val, val);
1237 }
1238
1239 DO_ZZZ_TB(sve2_sqdmull_zzz_h, int16_t, int8_t, H1_2, H1, do_sqdmull_h)
1240 DO_ZZZ_TB(sve2_sqdmull_zzz_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
1241 DO_ZZZ_TB(sve2_sqdmull_zzz_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
1242
1243 #undef DO_ZZZ_TB
1244
1245 #define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \
1246 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1247 { \
1248 intptr_t i, opr_sz = simd_oprsz(desc); \
1249 int sel2 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
1250 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
1251 TYPEW nn = *(TYPEW *)(vn + HW(i)); \
1252 TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
1253 *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
1254 } \
1255 }
1256
1257 DO_ZZZ_WTB(sve2_saddw_h, int16_t, int8_t, H1_2, H1, DO_ADD)
1258 DO_ZZZ_WTB(sve2_saddw_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
1259 DO_ZZZ_WTB(sve2_saddw_d, int64_t, int32_t, , H1_4, DO_ADD)
1260
1261 DO_ZZZ_WTB(sve2_ssubw_h, int16_t, int8_t, H1_2, H1, DO_SUB)
1262 DO_ZZZ_WTB(sve2_ssubw_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
1263 DO_ZZZ_WTB(sve2_ssubw_d, int64_t, int32_t, , H1_4, DO_SUB)
1264
1265 DO_ZZZ_WTB(sve2_uaddw_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
1266 DO_ZZZ_WTB(sve2_uaddw_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
1267 DO_ZZZ_WTB(sve2_uaddw_d, uint64_t, uint32_t, , H1_4, DO_ADD)
1268
1269 DO_ZZZ_WTB(sve2_usubw_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
1270 DO_ZZZ_WTB(sve2_usubw_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
1271 DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB)
1272
1273 #undef DO_ZZZ_WTB
1274
1275 #define DO_ZZZ_NTB(NAME, TYPE, H, OP) \
1276 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1277 { \
1278 intptr_t i, opr_sz = simd_oprsz(desc); \
1279 intptr_t sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPE); \
1280 intptr_t sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPE); \
1281 for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
1282 TYPE nn = *(TYPE *)(vn + H(i + sel1)); \
1283 TYPE mm = *(TYPE *)(vm + H(i + sel2)); \
1284 *(TYPE *)(vd + H(i + sel1)) = OP(nn, mm); \
1285 } \
1286 }
1287
1288 DO_ZZZ_NTB(sve2_eoril_b, uint8_t, H1, DO_EOR)
1289 DO_ZZZ_NTB(sve2_eoril_h, uint16_t, H1_2, DO_EOR)
1290 DO_ZZZ_NTB(sve2_eoril_s, uint32_t, H1_4, DO_EOR)
1291 DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR)
1292
1293 #undef DO_ZZZ_NTB
1294
1295 #define DO_ZZZW_ACC(NAME, TYPEW, TYPEN, HW, HN, OP) \
1296 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1297 { \
1298 intptr_t i, opr_sz = simd_oprsz(desc); \
1299 intptr_t sel1 = simd_data(desc) * sizeof(TYPEN); \
1300 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
1301 TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
1302 TYPEW mm = *(TYPEN *)(vm + HN(i + sel1)); \
1303 TYPEW aa = *(TYPEW *)(va + HW(i)); \
1304 *(TYPEW *)(vd + HW(i)) = OP(nn, mm) + aa; \
1305 } \
1306 }
1307
1308 DO_ZZZW_ACC(sve2_sabal_h, int16_t, int8_t, H1_2, H1, DO_ABD)
1309 DO_ZZZW_ACC(sve2_sabal_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
1310 DO_ZZZW_ACC(sve2_sabal_d, int64_t, int32_t, , H1_4, DO_ABD)
1311
1312 DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
1313 DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
1314 DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD)
1315
1316 DO_ZZZW_ACC(sve2_smlal_zzzw_h, int16_t, int8_t, H1_2, H1, DO_MUL)
1317 DO_ZZZW_ACC(sve2_smlal_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
1318 DO_ZZZW_ACC(sve2_smlal_zzzw_d, int64_t, int32_t, , H1_4, DO_MUL)
1319
1320 DO_ZZZW_ACC(sve2_umlal_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
1321 DO_ZZZW_ACC(sve2_umlal_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
1322 DO_ZZZW_ACC(sve2_umlal_zzzw_d, uint64_t, uint32_t, , H1_4, DO_MUL)
1323
1324 #define DO_NMUL(N, M) -(N * M)
1325
1326 DO_ZZZW_ACC(sve2_smlsl_zzzw_h, int16_t, int8_t, H1_2, H1, DO_NMUL)
1327 DO_ZZZW_ACC(sve2_smlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_NMUL)
1328 DO_ZZZW_ACC(sve2_smlsl_zzzw_d, int64_t, int32_t, , H1_4, DO_NMUL)
1329
1330 DO_ZZZW_ACC(sve2_umlsl_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_NMUL)
1331 DO_ZZZW_ACC(sve2_umlsl_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_NMUL)
1332 DO_ZZZW_ACC(sve2_umlsl_zzzw_d, uint64_t, uint32_t, , H1_4, DO_NMUL)
1333
1334 #undef DO_ZZZW_ACC
1335
1336 #define DO_XTNB(NAME, TYPE, OP) \
1337 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1338 { \
1339 intptr_t i, opr_sz = simd_oprsz(desc); \
1340 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1341 TYPE nn = *(TYPE *)(vn + i); \
1342 nn = OP(nn) & MAKE_64BIT_MASK(0, sizeof(TYPE) * 4); \
1343 *(TYPE *)(vd + i) = nn; \
1344 } \
1345 }
1346
1347 #define DO_XTNT(NAME, TYPE, TYPEN, H, OP) \
1348 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1349 { \
1350 intptr_t i, opr_sz = simd_oprsz(desc), odd = H(sizeof(TYPEN)); \
1351 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1352 TYPE nn = *(TYPE *)(vn + i); \
1353 *(TYPEN *)(vd + i + odd) = OP(nn); \
1354 } \
1355 }
1356
1357 #define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX)
1358 #define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX)
1359 #define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX)
1360
1361 DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H)
1362 DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S)
1363 DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D)
1364
1365 DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H)
1366 DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S)
1367 DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D)
1368
1369 #define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX)
1370 #define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX)
1371 #define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX)
1372
1373 DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H)
1374 DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S)
1375 DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D)
1376
1377 DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H)
1378 DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S)
1379 DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D)
1380
1381 DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H)
1382 DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S)
1383 DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D)
1384
1385 DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H)
1386 DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S)
1387 DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D)
1388
1389 #undef DO_XTNB
1390 #undef DO_XTNT
1391
1392 void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
1393 {
1394 intptr_t i, opr_sz = simd_oprsz(desc);
1395 int sel = H4(extract32(desc, SIMD_DATA_SHIFT, 1));
1396 uint32_t inv = -extract32(desc, SIMD_DATA_SHIFT + 1, 1);
1397 uint32_t *a = va, *n = vn;
1398 uint64_t *d = vd, *m = vm;
1399
1400 for (i = 0; i < opr_sz / 8; ++i) {
1401 uint32_t e1 = a[2 * i + H4(0)];
1402 uint32_t e2 = n[2 * i + sel] ^ inv;
1403 uint64_t c = extract64(m[i], 32, 1);
1404 /* Compute and store the entire 33-bit result at once. */
1405 d[i] = c + e1 + e2;
1406 }
1407 }
1408
1409 void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
1410 {
1411 intptr_t i, opr_sz = simd_oprsz(desc);
1412 int sel = extract32(desc, SIMD_DATA_SHIFT, 1);
1413 uint64_t inv = -(uint64_t)extract32(desc, SIMD_DATA_SHIFT + 1, 1);
1414 uint64_t *d = vd, *a = va, *n = vn, *m = vm;
1415
1416 for (i = 0; i < opr_sz / 8; i += 2) {
1417 Int128 e1 = int128_make64(a[i]);
1418 Int128 e2 = int128_make64(n[i + sel] ^ inv);
1419 Int128 c = int128_make64(m[i + 1] & 1);
1420 Int128 r = int128_add(int128_add(e1, e2), c);
1421 d[i + 0] = int128_getlo(r);
1422 d[i + 1] = int128_gethi(r);
1423 }
1424 }
1425
1426 #define DO_SQDMLAL(NAME, TYPEW, TYPEN, HW, HN, DMUL_OP, SUM_OP) \
1427 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1428 { \
1429 intptr_t i, opr_sz = simd_oprsz(desc); \
1430 int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
1431 int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
1432 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
1433 TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
1434 TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
1435 TYPEW aa = *(TYPEW *)(va + HW(i)); \
1436 *(TYPEW *)(vd + HW(i)) = SUM_OP(aa, DMUL_OP(nn, mm)); \
1437 } \
1438 }
1439
1440 DO_SQDMLAL(sve2_sqdmlal_zzzw_h, int16_t, int8_t, H1_2, H1,
1441 do_sqdmull_h, DO_SQADD_H)
1442 DO_SQDMLAL(sve2_sqdmlal_zzzw_s, int32_t, int16_t, H1_4, H1_2,
1443 do_sqdmull_s, DO_SQADD_S)
1444 DO_SQDMLAL(sve2_sqdmlal_zzzw_d, int64_t, int32_t, , H1_4,
1445 do_sqdmull_d, do_sqadd_d)
1446
1447 DO_SQDMLAL(sve2_sqdmlsl_zzzw_h, int16_t, int8_t, H1_2, H1,
1448 do_sqdmull_h, DO_SQSUB_H)
1449 DO_SQDMLAL(sve2_sqdmlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2,
1450 do_sqdmull_s, DO_SQSUB_S)
1451 DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4,
1452 do_sqdmull_d, do_sqsub_d)
1453
1454 #undef DO_SQDMLAL
1455
1456 #define DO_CMLA_FUNC(NAME, TYPE, H, OP) \
1457 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1458 { \
1459 intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
1460 int rot = simd_data(desc); \
1461 int sel_a = rot & 1, sel_b = sel_a ^ 1; \
1462 bool sub_r = rot == 1 || rot == 2; \
1463 bool sub_i = rot >= 2; \
1464 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1465 for (i = 0; i < opr_sz; i += 2) { \
1466 TYPE elt1_a = n[H(i + sel_a)]; \
1467 TYPE elt2_a = m[H(i + sel_a)]; \
1468 TYPE elt2_b = m[H(i + sel_b)]; \
1469 d[H(i)] = OP(elt1_a, elt2_a, a[H(i)], sub_r); \
1470 d[H(i + 1)] = OP(elt1_a, elt2_b, a[H(i + 1)], sub_i); \
1471 } \
1472 }
1473
1474 #define DO_CMLA(N, M, A, S) (A + (N * M) * (S ? -1 : 1))
1475
1476 DO_CMLA_FUNC(sve2_cmla_zzzz_b, uint8_t, H1, DO_CMLA)
1477 DO_CMLA_FUNC(sve2_cmla_zzzz_h, uint16_t, H2, DO_CMLA)
1478 DO_CMLA_FUNC(sve2_cmla_zzzz_s, uint32_t, H4, DO_CMLA)
1479 DO_CMLA_FUNC(sve2_cmla_zzzz_d, uint64_t, , DO_CMLA)
1480
1481 #define DO_SQRDMLAH_B(N, M, A, S) \
1482 do_sqrdmlah_b(N, M, A, S, true)
1483 #define DO_SQRDMLAH_H(N, M, A, S) \
1484 ({ uint32_t discard; do_sqrdmlah_h(N, M, A, S, true, &discard); })
1485 #define DO_SQRDMLAH_S(N, M, A, S) \
1486 ({ uint32_t discard; do_sqrdmlah_s(N, M, A, S, true, &discard); })
1487 #define DO_SQRDMLAH_D(N, M, A, S) \
1488 do_sqrdmlah_d(N, M, A, S, true)
1489
1490 DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_b, int8_t, H1, DO_SQRDMLAH_B)
1491 DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H)
1492 DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S)
1493 DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D)
1494
1495 #define DO_CMLA_IDX_FUNC(NAME, TYPE, H, OP) \
1496 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1497 { \
1498 intptr_t i, j, oprsz = simd_oprsz(desc); \
1499 int rot = extract32(desc, SIMD_DATA_SHIFT, 2); \
1500 int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2) * 2; \
1501 int sel_a = rot & 1, sel_b = sel_a ^ 1; \
1502 bool sub_r = rot == 1 || rot == 2; \
1503 bool sub_i = rot >= 2; \
1504 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1505 for (i = 0; i < oprsz / sizeof(TYPE); i += 16 / sizeof(TYPE)) { \
1506 TYPE elt2_a = m[H(i + idx + sel_a)]; \
1507 TYPE elt2_b = m[H(i + idx + sel_b)]; \
1508 for (j = 0; j < 16 / sizeof(TYPE); j += 2) { \
1509 TYPE elt1_a = n[H(i + j + sel_a)]; \
1510 d[H2(i + j)] = OP(elt1_a, elt2_a, a[H(i + j)], sub_r); \
1511 d[H2(i + j + 1)] = OP(elt1_a, elt2_b, a[H(i + j + 1)], sub_i); \
1512 } \
1513 } \
1514 }
1515
1516 DO_CMLA_IDX_FUNC(sve2_cmla_idx_h, int16_t, H2, DO_CMLA)
1517 DO_CMLA_IDX_FUNC(sve2_cmla_idx_s, int32_t, H4, DO_CMLA)
1518
1519 DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
1520 DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
1521
1522 #undef DO_CMLA
1523 #undef DO_CMLA_FUNC
1524 #undef DO_CMLA_IDX_FUNC
1525 #undef DO_SQRDMLAH_B
1526 #undef DO_SQRDMLAH_H
1527 #undef DO_SQRDMLAH_S
1528 #undef DO_SQRDMLAH_D
1529
1530 /* Note N and M are 4 elements bundled into one unit. */
1531 static int32_t do_cdot_s(uint32_t n, uint32_t m, int32_t a,
1532 int sel_a, int sel_b, int sub_i)
1533 {
1534 for (int i = 0; i <= 1; i++) {
1535 int32_t elt1_r = (int8_t)(n >> (16 * i));
1536 int32_t elt1_i = (int8_t)(n >> (16 * i + 8));
1537 int32_t elt2_a = (int8_t)(m >> (16 * i + 8 * sel_a));
1538 int32_t elt2_b = (int8_t)(m >> (16 * i + 8 * sel_b));
1539
1540 a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
1541 }
1542 return a;
1543 }
1544
1545 static int64_t do_cdot_d(uint64_t n, uint64_t m, int64_t a,
1546 int sel_a, int sel_b, int sub_i)
1547 {
1548 for (int i = 0; i <= 1; i++) {
1549 int64_t elt1_r = (int16_t)(n >> (32 * i + 0));
1550 int64_t elt1_i = (int16_t)(n >> (32 * i + 16));
1551 int64_t elt2_a = (int16_t)(m >> (32 * i + 16 * sel_a));
1552 int64_t elt2_b = (int16_t)(m >> (32 * i + 16 * sel_b));
1553
1554 a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
1555 }
1556 return a;
1557 }
1558
1559 void HELPER(sve2_cdot_zzzz_s)(void *vd, void *vn, void *vm,
1560 void *va, uint32_t desc)
1561 {
1562 int opr_sz = simd_oprsz(desc);
1563 int rot = simd_data(desc);
1564 int sel_a = rot & 1;
1565 int sel_b = sel_a ^ 1;
1566 int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
1567 uint32_t *d = vd, *n = vn, *m = vm, *a = va;
1568
1569 for (int e = 0; e < opr_sz / 4; e++) {
1570 d[e] = do_cdot_s(n[e], m[e], a[e], sel_a, sel_b, sub_i);
1571 }
1572 }
1573
1574 void HELPER(sve2_cdot_zzzz_d)(void *vd, void *vn, void *vm,
1575 void *va, uint32_t desc)
1576 {
1577 int opr_sz = simd_oprsz(desc);
1578 int rot = simd_data(desc);
1579 int sel_a = rot & 1;
1580 int sel_b = sel_a ^ 1;
1581 int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
1582 uint64_t *d = vd, *n = vn, *m = vm, *a = va;
1583
1584 for (int e = 0; e < opr_sz / 8; e++) {
1585 d[e] = do_cdot_d(n[e], m[e], a[e], sel_a, sel_b, sub_i);
1586 }
1587 }
1588
1589 void HELPER(sve2_cdot_idx_s)(void *vd, void *vn, void *vm,
1590 void *va, uint32_t desc)
1591 {
1592 int opr_sz = simd_oprsz(desc);
1593 int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
1594 int idx = H4(extract32(desc, SIMD_DATA_SHIFT + 2, 2));
1595 int sel_a = rot & 1;
1596 int sel_b = sel_a ^ 1;
1597 int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
1598 uint32_t *d = vd, *n = vn, *m = vm, *a = va;
1599
1600 for (int seg = 0; seg < opr_sz / 4; seg += 4) {
1601 uint32_t seg_m = m[seg + idx];
1602 for (int e = 0; e < 4; e++) {
1603 d[seg + e] = do_cdot_s(n[seg + e], seg_m, a[seg + e],
1604 sel_a, sel_b, sub_i);
1605 }
1606 }
1607 }
1608
1609 void HELPER(sve2_cdot_idx_d)(void *vd, void *vn, void *vm,
1610 void *va, uint32_t desc)
1611 {
1612 int seg, opr_sz = simd_oprsz(desc);
1613 int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
1614 int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
1615 int sel_a = rot & 1;
1616 int sel_b = sel_a ^ 1;
1617 int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
1618 uint64_t *d = vd, *n = vn, *m = vm, *a = va;
1619
1620 for (seg = 0; seg < opr_sz / 8; seg += 2) {
1621 uint64_t seg_m = m[seg + idx];
1622 for (int e = 0; e < 2; e++) {
1623 d[seg + e] = do_cdot_d(n[seg + e], seg_m, a[seg + e],
1624 sel_a, sel_b, sub_i);
1625 }
1626 }
1627 }
1628
1629 #define DO_ZZXZ(NAME, TYPE, H, OP) \
1630 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1631 { \
1632 intptr_t oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
1633 intptr_t i, j, idx = simd_data(desc); \
1634 TYPE *d = vd, *a = va, *n = vn, *m = (TYPE *)vm + H(idx); \
1635 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1636 TYPE mm = m[i]; \
1637 for (j = 0; j < segment; j++) { \
1638 d[i + j] = OP(n[i + j], mm, a[i + j]); \
1639 } \
1640 } \
1641 }
1642
1643 #define DO_SQRDMLAH_H(N, M, A) \
1644 ({ uint32_t discard; do_sqrdmlah_h(N, M, A, false, true, &discard); })
1645 #define DO_SQRDMLAH_S(N, M, A) \
1646 ({ uint32_t discard; do_sqrdmlah_s(N, M, A, false, true, &discard); })
1647 #define DO_SQRDMLAH_D(N, M, A) do_sqrdmlah_d(N, M, A, false, true)
1648
1649 DO_ZZXZ(sve2_sqrdmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
1650 DO_ZZXZ(sve2_sqrdmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
1651 DO_ZZXZ(sve2_sqrdmlah_idx_d, int64_t, , DO_SQRDMLAH_D)
1652
1653 #define DO_SQRDMLSH_H(N, M, A) \
1654 ({ uint32_t discard; do_sqrdmlah_h(N, M, A, true, true, &discard); })
1655 #define DO_SQRDMLSH_S(N, M, A) \
1656 ({ uint32_t discard; do_sqrdmlah_s(N, M, A, true, true, &discard); })
1657 #define DO_SQRDMLSH_D(N, M, A) do_sqrdmlah_d(N, M, A, true, true)
1658
1659 DO_ZZXZ(sve2_sqrdmlsh_idx_h, int16_t, H2, DO_SQRDMLSH_H)
1660 DO_ZZXZ(sve2_sqrdmlsh_idx_s, int32_t, H4, DO_SQRDMLSH_S)
1661 DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, , DO_SQRDMLSH_D)
1662
1663 #undef DO_ZZXZ
1664
1665 #define DO_ZZXW(NAME, TYPEW, TYPEN, HW, HN, OP) \
1666 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1667 { \
1668 intptr_t i, j, oprsz = simd_oprsz(desc); \
1669 intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
1670 intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
1671 for (i = 0; i < oprsz; i += 16) { \
1672 TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
1673 for (j = 0; j < 16; j += sizeof(TYPEW)) { \
1674 TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
1675 TYPEW aa = *(TYPEW *)(va + HW(i + j)); \
1676 *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm, aa); \
1677 } \
1678 } \
1679 }
1680
1681 #define DO_MLA(N, M, A) (A + N * M)
1682
1683 DO_ZZXW(sve2_smlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLA)
1684 DO_ZZXW(sve2_smlal_idx_d, int64_t, int32_t, , H1_4, DO_MLA)
1685 DO_ZZXW(sve2_umlal_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLA)
1686 DO_ZZXW(sve2_umlal_idx_d, uint64_t, uint32_t, , H1_4, DO_MLA)
1687
1688 #define DO_MLS(N, M, A) (A - N * M)
1689
1690 DO_ZZXW(sve2_smlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLS)
1691 DO_ZZXW(sve2_smlsl_idx_d, int64_t, int32_t, , H1_4, DO_MLS)
1692 DO_ZZXW(sve2_umlsl_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLS)
1693 DO_ZZXW(sve2_umlsl_idx_d, uint64_t, uint32_t, , H1_4, DO_MLS)
1694
1695 #define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M))
1696 #define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M))
1697
1698 DO_ZZXW(sve2_sqdmlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLAL_S)
1699 DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLAL_D)
1700
1701 #define DO_SQDMLSL_S(N, M, A) DO_SQSUB_S(A, do_sqdmull_s(N, M))
1702 #define DO_SQDMLSL_D(N, M, A) do_sqsub_d(A, do_sqdmull_d(N, M))
1703
1704 DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S)
1705 DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D)
1706
1707 #undef DO_MLA
1708 #undef DO_MLS
1709 #undef DO_ZZXW
1710
1711 #define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \
1712 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1713 { \
1714 intptr_t i, j, oprsz = simd_oprsz(desc); \
1715 intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
1716 intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
1717 for (i = 0; i < oprsz; i += 16) { \
1718 TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
1719 for (j = 0; j < 16; j += sizeof(TYPEW)) { \
1720 TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
1721 *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm); \
1722 } \
1723 } \
1724 }
1725
1726 DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
1727 DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
1728
1729 DO_ZZX(sve2_smull_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
1730 DO_ZZX(sve2_smull_idx_d, int64_t, int32_t, , H1_4, DO_MUL)
1731
1732 DO_ZZX(sve2_umull_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
1733 DO_ZZX(sve2_umull_idx_d, uint64_t, uint32_t, , H1_4, DO_MUL)
1734
1735 #undef DO_ZZX
1736
1737 #define DO_BITPERM(NAME, TYPE, OP) \
1738 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1739 { \
1740 intptr_t i, opr_sz = simd_oprsz(desc); \
1741 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1742 TYPE nn = *(TYPE *)(vn + i); \
1743 TYPE mm = *(TYPE *)(vm + i); \
1744 *(TYPE *)(vd + i) = OP(nn, mm, sizeof(TYPE) * 8); \
1745 } \
1746 }
1747
1748 static uint64_t bitextract(uint64_t data, uint64_t mask, int n)
1749 {
1750 uint64_t res = 0;
1751 int db, rb = 0;
1752
1753 for (db = 0; db < n; ++db) {
1754 if ((mask >> db) & 1) {
1755 res |= ((data >> db) & 1) << rb;
1756 ++rb;
1757 }
1758 }
1759 return res;
1760 }
1761
1762 DO_BITPERM(sve2_bext_b, uint8_t, bitextract)
1763 DO_BITPERM(sve2_bext_h, uint16_t, bitextract)
1764 DO_BITPERM(sve2_bext_s, uint32_t, bitextract)
1765 DO_BITPERM(sve2_bext_d, uint64_t, bitextract)
1766
1767 static uint64_t bitdeposit(uint64_t data, uint64_t mask, int n)
1768 {
1769 uint64_t res = 0;
1770 int rb, db = 0;
1771
1772 for (rb = 0; rb < n; ++rb) {
1773 if ((mask >> rb) & 1) {
1774 res |= ((data >> db) & 1) << rb;
1775 ++db;
1776 }
1777 }
1778 return res;
1779 }
1780
1781 DO_BITPERM(sve2_bdep_b, uint8_t, bitdeposit)
1782 DO_BITPERM(sve2_bdep_h, uint16_t, bitdeposit)
1783 DO_BITPERM(sve2_bdep_s, uint32_t, bitdeposit)
1784 DO_BITPERM(sve2_bdep_d, uint64_t, bitdeposit)
1785
1786 static uint64_t bitgroup(uint64_t data, uint64_t mask, int n)
1787 {
1788 uint64_t resm = 0, resu = 0;
1789 int db, rbm = 0, rbu = 0;
1790
1791 for (db = 0; db < n; ++db) {
1792 uint64_t val = (data >> db) & 1;
1793 if ((mask >> db) & 1) {
1794 resm |= val << rbm++;
1795 } else {
1796 resu |= val << rbu++;
1797 }
1798 }
1799
1800 return resm | (resu << rbm);
1801 }
1802
1803 DO_BITPERM(sve2_bgrp_b, uint8_t, bitgroup)
1804 DO_BITPERM(sve2_bgrp_h, uint16_t, bitgroup)
1805 DO_BITPERM(sve2_bgrp_s, uint32_t, bitgroup)
1806 DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup)
1807
1808 #undef DO_BITPERM
1809
1810 #define DO_CADD(NAME, TYPE, H, ADD_OP, SUB_OP) \
1811 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1812 { \
1813 intptr_t i, opr_sz = simd_oprsz(desc); \
1814 int sub_r = simd_data(desc); \
1815 if (sub_r) { \
1816 for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
1817 TYPE acc_r = *(TYPE *)(vn + H(i)); \
1818 TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
1819 TYPE el2_r = *(TYPE *)(vm + H(i)); \
1820 TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
1821 acc_r = ADD_OP(acc_r, el2_i); \
1822 acc_i = SUB_OP(acc_i, el2_r); \
1823 *(TYPE *)(vd + H(i)) = acc_r; \
1824 *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
1825 } \
1826 } else { \
1827 for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
1828 TYPE acc_r = *(TYPE *)(vn + H(i)); \
1829 TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
1830 TYPE el2_r = *(TYPE *)(vm + H(i)); \
1831 TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
1832 acc_r = SUB_OP(acc_r, el2_i); \
1833 acc_i = ADD_OP(acc_i, el2_r); \
1834 *(TYPE *)(vd + H(i)) = acc_r; \
1835 *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
1836 } \
1837 } \
1838 }
1839
1840 DO_CADD(sve2_cadd_b, int8_t, H1, DO_ADD, DO_SUB)
1841 DO_CADD(sve2_cadd_h, int16_t, H1_2, DO_ADD, DO_SUB)
1842 DO_CADD(sve2_cadd_s, int32_t, H1_4, DO_ADD, DO_SUB)
1843 DO_CADD(sve2_cadd_d, int64_t, , DO_ADD, DO_SUB)
1844
1845 DO_CADD(sve2_sqcadd_b, int8_t, H1, DO_SQADD_B, DO_SQSUB_B)
1846 DO_CADD(sve2_sqcadd_h, int16_t, H1_2, DO_SQADD_H, DO_SQSUB_H)
1847 DO_CADD(sve2_sqcadd_s, int32_t, H1_4, DO_SQADD_S, DO_SQSUB_S)
1848 DO_CADD(sve2_sqcadd_d, int64_t, , do_sqadd_d, do_sqsub_d)
1849
1850 #undef DO_CADD
1851
1852 #define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
1853 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1854 { \
1855 intptr_t i, opr_sz = simd_oprsz(desc); \
1856 intptr_t sel = (simd_data(desc) & 1) * sizeof(TYPEN); \
1857 int shift = simd_data(desc) >> 1; \
1858 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
1859 TYPEW nn = *(TYPEN *)(vn + HN(i + sel)); \
1860 *(TYPEW *)(vd + HW(i)) = nn << shift; \
1861 } \
1862 }
1863
1864 DO_ZZI_SHLL(sve2_sshll_h, int16_t, int8_t, H1_2, H1)
1865 DO_ZZI_SHLL(sve2_sshll_s, int32_t, int16_t, H1_4, H1_2)
1866 DO_ZZI_SHLL(sve2_sshll_d, int64_t, int32_t, , H1_4)
1867
1868 DO_ZZI_SHLL(sve2_ushll_h, uint16_t, uint8_t, H1_2, H1)
1869 DO_ZZI_SHLL(sve2_ushll_s, uint32_t, uint16_t, H1_4, H1_2)
1870 DO_ZZI_SHLL(sve2_ushll_d, uint64_t, uint32_t, , H1_4)
1871
1872 #undef DO_ZZI_SHLL
1873
1874 /* Two-operand reduction expander, controlled by a predicate.
1875 * The difference between TYPERED and TYPERET has to do with
1876 * sign-extension. E.g. for SMAX, TYPERED must be signed,
1877 * but TYPERET must be unsigned so that e.g. a 32-bit value
1878 * is not sign-extended to the ABI uint64_t return type.
1879 */
1880 /* ??? If we were to vectorize this by hand the reduction ordering
1881 * would change. For integer operands, this is perfectly fine.
1882 */
1883 #define DO_VPZ(NAME, TYPEELT, TYPERED, TYPERET, H, INIT, OP) \
1884 uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \
1885 { \
1886 intptr_t i, opr_sz = simd_oprsz(desc); \
1887 TYPERED ret = INIT; \
1888 for (i = 0; i < opr_sz; ) { \
1889 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
1890 do { \
1891 if (pg & 1) { \
1892 TYPEELT nn = *(TYPEELT *)(vn + H(i)); \
1893 ret = OP(ret, nn); \
1894 } \
1895 i += sizeof(TYPEELT), pg >>= sizeof(TYPEELT); \
1896 } while (i & 15); \
1897 } \
1898 return (TYPERET)ret; \
1899 }
1900
1901 #define DO_VPZ_D(NAME, TYPEE, TYPER, INIT, OP) \
1902 uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \
1903 { \
1904 intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
1905 TYPEE *n = vn; \
1906 uint8_t *pg = vg; \
1907 TYPER ret = INIT; \
1908 for (i = 0; i < opr_sz; i += 1) { \
1909 if (pg[H1(i)] & 1) { \
1910 TYPEE nn = n[i]; \
1911 ret = OP(ret, nn); \
1912 } \
1913 } \
1914 return ret; \
1915 }
1916
1917 DO_VPZ(sve_orv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_ORR)
1918 DO_VPZ(sve_orv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_ORR)
1919 DO_VPZ(sve_orv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_ORR)
1920 DO_VPZ_D(sve_orv_d, uint64_t, uint64_t, 0, DO_ORR)
1921
1922 DO_VPZ(sve_eorv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_EOR)
1923 DO_VPZ(sve_eorv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_EOR)
1924 DO_VPZ(sve_eorv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_EOR)
1925 DO_VPZ_D(sve_eorv_d, uint64_t, uint64_t, 0, DO_EOR)
1926
1927 DO_VPZ(sve_andv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_AND)
1928 DO_VPZ(sve_andv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_AND)
1929 DO_VPZ(sve_andv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_AND)
1930 DO_VPZ_D(sve_andv_d, uint64_t, uint64_t, -1, DO_AND)
1931
1932 DO_VPZ(sve_saddv_b, int8_t, uint64_t, uint64_t, H1, 0, DO_ADD)
1933 DO_VPZ(sve_saddv_h, int16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD)
1934 DO_VPZ(sve_saddv_s, int32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD)
1935
1936 DO_VPZ(sve_uaddv_b, uint8_t, uint64_t, uint64_t, H1, 0, DO_ADD)
1937 DO_VPZ(sve_uaddv_h, uint16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD)
1938 DO_VPZ(sve_uaddv_s, uint32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD)
1939 DO_VPZ_D(sve_uaddv_d, uint64_t, uint64_t, 0, DO_ADD)
1940
1941 DO_VPZ(sve_smaxv_b, int8_t, int8_t, uint8_t, H1, INT8_MIN, DO_MAX)
1942 DO_VPZ(sve_smaxv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MIN, DO_MAX)
1943 DO_VPZ(sve_smaxv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MIN, DO_MAX)
1944 DO_VPZ_D(sve_smaxv_d, int64_t, int64_t, INT64_MIN, DO_MAX)
1945
1946 DO_VPZ(sve_umaxv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_MAX)
1947 DO_VPZ(sve_umaxv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_MAX)
1948 DO_VPZ(sve_umaxv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_MAX)
1949 DO_VPZ_D(sve_umaxv_d, uint64_t, uint64_t, 0, DO_MAX)
1950
1951 DO_VPZ(sve_sminv_b, int8_t, int8_t, uint8_t, H1, INT8_MAX, DO_MIN)
1952 DO_VPZ(sve_sminv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MAX, DO_MIN)
1953 DO_VPZ(sve_sminv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MAX, DO_MIN)
1954 DO_VPZ_D(sve_sminv_d, int64_t, int64_t, INT64_MAX, DO_MIN)
1955
1956 DO_VPZ(sve_uminv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_MIN)
1957 DO_VPZ(sve_uminv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_MIN)
1958 DO_VPZ(sve_uminv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_MIN)
1959 DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN)
1960
1961 #undef DO_VPZ
1962 #undef DO_VPZ_D
1963
1964 /* Two vector operand, one scalar operand, unpredicated. */
1965 #define DO_ZZI(NAME, TYPE, OP) \
1966 void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \
1967 { \
1968 intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
1969 TYPE s = s64, *d = vd, *n = vn; \
1970 for (i = 0; i < opr_sz; ++i) { \
1971 d[i] = OP(n[i], s); \
1972 } \
1973 }
1974
1975 #define DO_SUBR(X, Y) (Y - X)
1976
1977 DO_ZZI(sve_subri_b, uint8_t, DO_SUBR)
1978 DO_ZZI(sve_subri_h, uint16_t, DO_SUBR)
1979 DO_ZZI(sve_subri_s, uint32_t, DO_SUBR)
1980 DO_ZZI(sve_subri_d, uint64_t, DO_SUBR)
1981
1982 DO_ZZI(sve_smaxi_b, int8_t, DO_MAX)
1983 DO_ZZI(sve_smaxi_h, int16_t, DO_MAX)
1984 DO_ZZI(sve_smaxi_s, int32_t, DO_MAX)
1985 DO_ZZI(sve_smaxi_d, int64_t, DO_MAX)
1986
1987 DO_ZZI(sve_smini_b, int8_t, DO_MIN)
1988 DO_ZZI(sve_smini_h, int16_t, DO_MIN)
1989 DO_ZZI(sve_smini_s, int32_t, DO_MIN)
1990 DO_ZZI(sve_smini_d, int64_t, DO_MIN)
1991
1992 DO_ZZI(sve_umaxi_b, uint8_t, DO_MAX)
1993 DO_ZZI(sve_umaxi_h, uint16_t, DO_MAX)
1994 DO_ZZI(sve_umaxi_s, uint32_t, DO_MAX)
1995 DO_ZZI(sve_umaxi_d, uint64_t, DO_MAX)
1996
1997 DO_ZZI(sve_umini_b, uint8_t, DO_MIN)
1998 DO_ZZI(sve_umini_h, uint16_t, DO_MIN)
1999 DO_ZZI(sve_umini_s, uint32_t, DO_MIN)
2000 DO_ZZI(sve_umini_d, uint64_t, DO_MIN)
2001
2002 #undef DO_ZZI
2003
2004 #undef DO_AND
2005 #undef DO_ORR
2006 #undef DO_EOR
2007 #undef DO_BIC
2008 #undef DO_ADD
2009 #undef DO_SUB
2010 #undef DO_MAX
2011 #undef DO_MIN
2012 #undef DO_ABD
2013 #undef DO_MUL
2014 #undef DO_DIV
2015 #undef DO_ASR
2016 #undef DO_LSR
2017 #undef DO_LSL
2018 #undef DO_SUBR
2019
2020 /* Similar to the ARM LastActiveElement pseudocode function, except the
2021 result is multiplied by the element size. This includes the not found
2022 indication; e.g. not found for esz=3 is -8. */
2023 static intptr_t last_active_element(uint64_t *g, intptr_t words, intptr_t esz)
2024 {
2025 uint64_t mask = pred_esz_masks[esz];
2026 intptr_t i = words;
2027
2028 do {
2029 uint64_t this_g = g[--i] & mask;
2030 if (this_g) {
2031 return i * 64 + (63 - clz64(this_g));
2032 }
2033 } while (i > 0);
2034 return (intptr_t)-1 << esz;
2035 }
2036
2037 uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t pred_desc)
2038 {
2039 intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
2040 uint32_t flags = PREDTEST_INIT;
2041 uint64_t *d = vd, *g = vg;
2042 intptr_t i = 0;
2043
2044 do {
2045 uint64_t this_d = d[i];
2046 uint64_t this_g = g[i];
2047
2048 if (this_g) {
2049 if (!(flags & 4)) {
2050 /* Set in D the first bit of G. */
2051 this_d |= this_g & -this_g;
2052 d[i] = this_d;
2053 }
2054 flags = iter_predtest_fwd(this_d, this_g, flags);
2055 }
2056 } while (++i < words);
2057
2058 return flags;
2059 }
2060
2061 uint32_t HELPER(sve_pnext)(void *vd, void *vg, uint32_t pred_desc)
2062 {
2063 intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
2064 intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
2065 uint32_t flags = PREDTEST_INIT;
2066 uint64_t *d = vd, *g = vg, esz_mask;
2067 intptr_t i, next;
2068
2069 next = last_active_element(vd, words, esz) + (1 << esz);
2070 esz_mask = pred_esz_masks[esz];
2071
2072 /* Similar to the pseudocode for pnext, but scaled by ESZ
2073 so that we find the correct bit. */
2074 if (next < words * 64) {
2075 uint64_t mask = -1;
2076
2077 if (next & 63) {
2078 mask = ~((1ull << (next & 63)) - 1);
2079 next &= -64;
2080 }
2081 do {
2082 uint64_t this_g = g[next / 64] & esz_mask & mask;
2083 if (this_g != 0) {
2084 next = (next & -64) + ctz64(this_g);
2085 break;
2086 }
2087 next += 64;
2088 mask = -1;
2089 } while (next < words * 64);
2090 }
2091
2092 i = 0;
2093 do {
2094 uint64_t this_d = 0;
2095 if (i == next / 64) {
2096 this_d = 1ull << (next & 63);
2097 }
2098 d[i] = this_d;
2099 flags = iter_predtest_fwd(this_d, g[i] & esz_mask, flags);
2100 } while (++i < words);
2101
2102 return flags;
2103 }
2104
2105 /*
2106 * Copy Zn into Zd, and store zero into inactive elements.
2107 * If inv, store zeros into the active elements.
2108 */
2109 void HELPER(sve_movz_b)(void *vd, void *vn, void *vg, uint32_t desc)
2110 {
2111 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2112 uint64_t inv = -(uint64_t)(simd_data(desc) & 1);
2113 uint64_t *d = vd, *n = vn;
2114 uint8_t *pg = vg;
2115
2116 for (i = 0; i < opr_sz; i += 1) {
2117 d[i] = n[i] & (expand_pred_b(pg[H1(i)]) ^ inv);
2118 }
2119 }
2120
2121 void HELPER(sve_movz_h)(void *vd, void *vn, void *vg, uint32_t desc)
2122 {
2123 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2124 uint64_t inv = -(uint64_t)(simd_data(desc) & 1);
2125 uint64_t *d = vd, *n = vn;
2126 uint8_t *pg = vg;
2127
2128 for (i = 0; i < opr_sz; i += 1) {
2129 d[i] = n[i] & (expand_pred_h(pg[H1(i)]) ^ inv);
2130 }
2131 }
2132
2133 void HELPER(sve_movz_s)(void *vd, void *vn, void *vg, uint32_t desc)
2134 {
2135 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2136 uint64_t inv = -(uint64_t)(simd_data(desc) & 1);
2137 uint64_t *d = vd, *n = vn;
2138 uint8_t *pg = vg;
2139
2140 for (i = 0; i < opr_sz; i += 1) {
2141 d[i] = n[i] & (expand_pred_s(pg[H1(i)]) ^ inv);
2142 }
2143 }
2144
2145 void HELPER(sve_movz_d)(void *vd, void *vn, void *vg, uint32_t desc)
2146 {
2147 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2148 uint64_t *d = vd, *n = vn;
2149 uint8_t *pg = vg;
2150 uint8_t inv = simd_data(desc);
2151
2152 for (i = 0; i < opr_sz; i += 1) {
2153 d[i] = n[i] & -(uint64_t)((pg[H1(i)] ^ inv) & 1);
2154 }
2155 }
2156
2157 /* Three-operand expander, immediate operand, controlled by a predicate.
2158 */
2159 #define DO_ZPZI(NAME, TYPE, H, OP) \
2160 void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
2161 { \
2162 intptr_t i, opr_sz = simd_oprsz(desc); \
2163 TYPE imm = simd_data(desc); \
2164 for (i = 0; i < opr_sz; ) { \
2165 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
2166 do { \
2167 if (pg & 1) { \
2168 TYPE nn = *(TYPE *)(vn + H(i)); \
2169 *(TYPE *)(vd + H(i)) = OP(nn, imm); \
2170 } \
2171 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
2172 } while (i & 15); \
2173 } \
2174 }
2175
2176 /* Similarly, specialized for 64-bit operands. */
2177 #define DO_ZPZI_D(NAME, TYPE, OP) \
2178 void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
2179 { \
2180 intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
2181 TYPE *d = vd, *n = vn; \
2182 TYPE imm = simd_data(desc); \
2183 uint8_t *pg = vg; \
2184 for (i = 0; i < opr_sz; i += 1) { \
2185 if (pg[H1(i)] & 1) { \
2186 TYPE nn = n[i]; \
2187 d[i] = OP(nn, imm); \
2188 } \
2189 } \
2190 }
2191
2192 #define DO_SHR(N, M) (N >> M)
2193 #define DO_SHL(N, M) (N << M)
2194
2195 /* Arithmetic shift right for division. This rounds negative numbers
2196 toward zero as per signed division. Therefore before shifting,
2197 when N is negative, add 2**M-1. */
2198 #define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M)
2199
2200 static inline uint64_t do_urshr(uint64_t x, unsigned sh)
2201 {
2202 if (likely(sh < 64)) {
2203 return (x >> sh) + ((x >> (sh - 1)) & 1);
2204 } else if (sh == 64) {
2205 return x >> 63;
2206 } else {
2207 return 0;
2208 }
2209 }
2210
2211 static inline int64_t do_srshr(int64_t x, unsigned sh)
2212 {
2213 if (likely(sh < 64)) {
2214 return (x >> sh) + ((x >> (sh - 1)) & 1);
2215 } else {
2216 /* Rounding the sign bit always produces 0. */
2217 return 0;
2218 }
2219 }
2220
2221 DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR)
2222 DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR)
2223 DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR)
2224 DO_ZPZI_D(sve_asr_zpzi_d, int64_t, DO_SHR)
2225
2226 DO_ZPZI(sve_lsr_zpzi_b, uint8_t, H1, DO_SHR)
2227 DO_ZPZI(sve_lsr_zpzi_h, uint16_t, H1_2, DO_SHR)
2228 DO_ZPZI(sve_lsr_zpzi_s, uint32_t, H1_4, DO_SHR)
2229 DO_ZPZI_D(sve_lsr_zpzi_d, uint64_t, DO_SHR)
2230
2231 DO_ZPZI(sve_lsl_zpzi_b, uint8_t, H1, DO_SHL)
2232 DO_ZPZI(sve_lsl_zpzi_h, uint16_t, H1_2, DO_SHL)
2233 DO_ZPZI(sve_lsl_zpzi_s, uint32_t, H1_4, DO_SHL)
2234 DO_ZPZI_D(sve_lsl_zpzi_d, uint64_t, DO_SHL)
2235
2236 DO_ZPZI(sve_asrd_b, int8_t, H1, DO_ASRD)
2237 DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD)
2238 DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD)
2239 DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD)
2240
2241 #undef DO_ASRD
2242 #undef DO_ZPZI
2243 #undef DO_ZPZI_D
2244
2245 #define DO_SHRNB(NAME, TYPEW, TYPEN, OP) \
2246 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
2247 { \
2248 intptr_t i, opr_sz = simd_oprsz(desc); \
2249 int shift = simd_data(desc); \
2250 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
2251 TYPEW nn = *(TYPEW *)(vn + i); \
2252 *(TYPEW *)(vd + i) = (TYPEN)OP(nn, shift); \
2253 } \
2254 }
2255
2256 #define DO_SHRNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
2257 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
2258 { \
2259 intptr_t i, opr_sz = simd_oprsz(desc); \
2260 int shift = simd_data(desc); \
2261 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
2262 TYPEW nn = *(TYPEW *)(vn + HW(i)); \
2263 *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, shift); \
2264 } \
2265 }
2266
2267 DO_SHRNB(sve2_shrnb_h, uint16_t, uint8_t, DO_SHR)
2268 DO_SHRNB(sve2_shrnb_s, uint32_t, uint16_t, DO_SHR)
2269 DO_SHRNB(sve2_shrnb_d, uint64_t, uint32_t, DO_SHR)
2270
2271 DO_SHRNT(sve2_shrnt_h, uint16_t, uint8_t, H1_2, H1, DO_SHR)
2272 DO_SHRNT(sve2_shrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_SHR)
2273 DO_SHRNT(sve2_shrnt_d, uint64_t, uint32_t, , H1_4, DO_SHR)
2274
2275 DO_SHRNB(sve2_rshrnb_h, uint16_t, uint8_t, do_urshr)
2276 DO_SHRNB(sve2_rshrnb_s, uint32_t, uint16_t, do_urshr)
2277 DO_SHRNB(sve2_rshrnb_d, uint64_t, uint32_t, do_urshr)
2278
2279 DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr)
2280 DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr)
2281 DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, , H1_4, do_urshr)
2282
2283 #define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX)
2284 #define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX)
2285 #define DO_SQSHRUN_D(x, sh) \
2286 do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX)
2287
2288 DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H)
2289 DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S)
2290 DO_SHRNB(sve2_sqshrunb_d, int64_t, uint32_t, DO_SQSHRUN_D)
2291
2292 DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H)
2293 DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S)
2294 DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, , H1_4, DO_SQSHRUN_D)
2295
2296 #define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX)
2297 #define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX)
2298 #define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX)
2299
2300 DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H)
2301 DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S)
2302 DO_SHRNB(sve2_sqrshrunb_d, int64_t, uint32_t, DO_SQRSHRUN_D)
2303
2304 DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
2305 DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
2306 DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
2307
2308 #define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX)
2309 #define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX)
2310 #define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX)
2311
2312 DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H)
2313 DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S)
2314 DO_SHRNB(sve2_sqshrnb_d, int64_t, uint32_t, DO_SQSHRN_D)
2315
2316 DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H)
2317 DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S)
2318 DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, , H1_4, DO_SQSHRN_D)
2319
2320 #define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX)
2321 #define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX)
2322 #define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX)
2323
2324 DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H)
2325 DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S)
2326 DO_SHRNB(sve2_sqrshrnb_d, int64_t, uint32_t, DO_SQRSHRN_D)
2327
2328 DO_SHRNT(sve2_sqrshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRN_H)
2329 DO_SHRNT(sve2_sqrshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRN_S)
2330 DO_SHRNT(sve2_sqrshrnt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRN_D)
2331
2332 #define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
2333 #define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
2334 #define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
2335
2336 DO_SHRNB(sve2_uqshrnb_h, uint16_t, uint8_t, DO_UQSHRN_H)
2337 DO_SHRNB(sve2_uqshrnb_s, uint32_t, uint16_t, DO_UQSHRN_S)
2338 DO_SHRNB(sve2_uqshrnb_d, uint64_t, uint32_t, DO_UQSHRN_D)
2339
2340 DO_SHRNT(sve2_uqshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQSHRN_H)
2341 DO_SHRNT(sve2_uqshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQSHRN_S)
2342 DO_SHRNT(sve2_uqshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQSHRN_D)
2343
2344 #define DO_UQRSHRN_H(x, sh) MIN(do_urshr(x, sh), UINT8_MAX)
2345 #define DO_UQRSHRN_S(x, sh) MIN(do_urshr(x, sh), UINT16_MAX)
2346 #define DO_UQRSHRN_D(x, sh) MIN(do_urshr(x, sh), UINT32_MAX)
2347
2348 DO_SHRNB(sve2_uqrshrnb_h, uint16_t, uint8_t, DO_UQRSHRN_H)
2349 DO_SHRNB(sve2_uqrshrnb_s, uint32_t, uint16_t, DO_UQRSHRN_S)
2350 DO_SHRNB(sve2_uqrshrnb_d, uint64_t, uint32_t, DO_UQRSHRN_D)
2351
2352 DO_SHRNT(sve2_uqrshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQRSHRN_H)
2353 DO_SHRNT(sve2_uqrshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQRSHRN_S)
2354 DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D)
2355
2356 #undef DO_SHRNB
2357 #undef DO_SHRNT
2358
2359 #define DO_BINOPNB(NAME, TYPEW, TYPEN, SHIFT, OP) \
2360 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
2361 { \
2362 intptr_t i, opr_sz = simd_oprsz(desc); \
2363 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
2364 TYPEW nn = *(TYPEW *)(vn + i); \
2365 TYPEW mm = *(TYPEW *)(vm + i); \
2366 *(TYPEW *)(vd + i) = (TYPEN)OP(nn, mm, SHIFT); \
2367 } \
2368 }
2369
2370 #define DO_BINOPNT(NAME, TYPEW, TYPEN, SHIFT, HW, HN, OP) \
2371 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
2372 { \
2373 intptr_t i, opr_sz = simd_oprsz(desc); \
2374 for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
2375 TYPEW nn = *(TYPEW *)(vn + HW(i)); \
2376 TYPEW mm = *(TYPEW *)(vm + HW(i)); \
2377 *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, mm, SHIFT); \
2378 } \
2379 }
2380
2381 #define DO_ADDHN(N, M, SH) ((N + M) >> SH)
2382 #define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH)
2383 #define DO_SUBHN(N, M, SH) ((N - M) >> SH)
2384 #define DO_RSUBHN(N, M, SH) ((N - M + ((__typeof(N))1 << (SH - 1))) >> SH)
2385
2386 DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
2387 DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
2388 DO_BINOPNB(sve2_addhnb_d, uint64_t, uint32_t, 32, DO_ADDHN)
2389
2390 DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN)
2391 DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN)
2392 DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_ADDHN)
2393
2394 DO_BINOPNB(sve2_raddhnb_h, uint16_t, uint8_t, 8, DO_RADDHN)
2395 DO_BINOPNB(sve2_raddhnb_s, uint32_t, uint16_t, 16, DO_RADDHN)
2396 DO_BINOPNB(sve2_raddhnb_d, uint64_t, uint32_t, 32, DO_RADDHN)
2397
2398 DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN)
2399 DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN)
2400 DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RADDHN)
2401
2402 DO_BINOPNB(sve2_subhnb_h, uint16_t, uint8_t, 8, DO_SUBHN)
2403 DO_BINOPNB(sve2_subhnb_s, uint32_t, uint16_t, 16, DO_SUBHN)
2404 DO_BINOPNB(sve2_subhnb_d, uint64_t, uint32_t, 32, DO_SUBHN)
2405
2406 DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN)
2407 DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN)
2408 DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_SUBHN)
2409
2410 DO_BINOPNB(sve2_rsubhnb_h, uint16_t, uint8_t, 8, DO_RSUBHN)
2411 DO_BINOPNB(sve2_rsubhnb_s, uint32_t, uint16_t, 16, DO_RSUBHN)
2412 DO_BINOPNB(sve2_rsubhnb_d, uint64_t, uint32_t, 32, DO_RSUBHN)
2413
2414 DO_BINOPNT(sve2_rsubhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RSUBHN)
2415 DO_BINOPNT(sve2_rsubhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RSUBHN)
2416 DO_BINOPNT(sve2_rsubhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RSUBHN)
2417
2418 #undef DO_RSUBHN
2419 #undef DO_SUBHN
2420 #undef DO_RADDHN
2421 #undef DO_ADDHN
2422
2423 #undef DO_BINOPNB
2424
2425 /* Fully general four-operand expander, controlled by a predicate.
2426 */
2427 #define DO_ZPZZZ(NAME, TYPE, H, OP) \
2428 void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \
2429 void *vg, uint32_t desc) \
2430 { \
2431 intptr_t i, opr_sz = simd_oprsz(desc); \
2432 for (i = 0; i < opr_sz; ) { \
2433 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
2434 do { \
2435 if (pg & 1) { \
2436 TYPE nn = *(TYPE *)(vn + H(i)); \
2437 TYPE mm = *(TYPE *)(vm + H(i)); \
2438 TYPE aa = *(TYPE *)(va + H(i)); \
2439 *(TYPE *)(vd + H(i)) = OP(aa, nn, mm); \
2440 } \
2441 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
2442 } while (i & 15); \
2443 } \
2444 }
2445
2446 /* Similarly, specialized for 64-bit operands. */
2447 #define DO_ZPZZZ_D(NAME, TYPE, OP) \
2448 void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \
2449 void *vg, uint32_t desc) \
2450 { \
2451 intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
2452 TYPE *d = vd, *a = va, *n = vn, *m = vm; \
2453 uint8_t *pg = vg; \
2454 for (i = 0; i < opr_sz; i += 1) { \
2455 if (pg[H1(i)] & 1) { \
2456 TYPE aa = a[i], nn = n[i], mm = m[i]; \
2457 d[i] = OP(aa, nn, mm); \
2458 } \
2459 } \
2460 }
2461
2462 #define DO_MLA(A, N, M) (A + N * M)
2463 #define DO_MLS(A, N, M) (A - N * M)
2464
2465 DO_ZPZZZ(sve_mla_b, uint8_t, H1, DO_MLA)
2466 DO_ZPZZZ(sve_mls_b, uint8_t, H1, DO_MLS)
2467
2468 DO_ZPZZZ(sve_mla_h, uint16_t, H1_2, DO_MLA)
2469 DO_ZPZZZ(sve_mls_h, uint16_t, H1_2, DO_MLS)
2470
2471 DO_ZPZZZ(sve_mla_s, uint32_t, H1_4, DO_MLA)
2472 DO_ZPZZZ(sve_mls_s, uint32_t, H1_4, DO_MLS)
2473
2474 DO_ZPZZZ_D(sve_mla_d, uint64_t, DO_MLA)
2475 DO_ZPZZZ_D(sve_mls_d, uint64_t, DO_MLS)
2476
2477 #undef DO_MLA
2478 #undef DO_MLS
2479 #undef DO_ZPZZZ
2480 #undef DO_ZPZZZ_D
2481
2482 void HELPER(sve_index_b)(void *vd, uint32_t start,
2483 uint32_t incr, uint32_t desc)
2484 {
2485 intptr_t i, opr_sz = simd_oprsz(desc);
2486 uint8_t *d = vd;
2487 for (i = 0; i < opr_sz; i += 1) {
2488 d[H1(i)] = start + i * incr;
2489 }
2490 }
2491
2492 void HELPER(sve_index_h)(void *vd, uint32_t start,
2493 uint32_t incr, uint32_t desc)
2494 {
2495 intptr_t i, opr_sz = simd_oprsz(desc) / 2;
2496 uint16_t *d = vd;
2497 for (i = 0; i < opr_sz; i += 1) {
2498 d[H2(i)] = start + i * incr;
2499 }
2500 }
2501
2502 void HELPER(sve_index_s)(void *vd, uint32_t start,
2503 uint32_t incr, uint32_t desc)
2504 {
2505 intptr_t i, opr_sz = simd_oprsz(desc) / 4;
2506 uint32_t *d = vd;
2507 for (i = 0; i < opr_sz; i += 1) {
2508 d[H4(i)] = start + i * incr;
2509 }
2510 }
2511
2512 void HELPER(sve_index_d)(void *vd, uint64_t start,
2513 uint64_t incr, uint32_t desc)
2514 {
2515 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2516 uint64_t *d = vd;
2517 for (i = 0; i < opr_sz; i += 1) {
2518 d[i] = start + i * incr;
2519 }
2520 }
2521
2522 void HELPER(sve_adr_p32)(void *vd, void *vn, void *vm, uint32_t desc)
2523 {
2524 intptr_t i, opr_sz = simd_oprsz(desc) / 4;
2525 uint32_t sh = simd_data(desc);
2526 uint32_t *d = vd, *n = vn, *m = vm;
2527 for (i = 0; i < opr_sz; i += 1) {
2528 d[i] = n[i] + (m[i] << sh);
2529 }
2530 }
2531
2532 void HELPER(sve_adr_p64)(void *vd, void *vn, void *vm, uint32_t desc)
2533 {
2534 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2535 uint64_t sh = simd_data(desc);
2536 uint64_t *d = vd, *n = vn, *m = vm;
2537 for (i = 0; i < opr_sz; i += 1) {
2538 d[i] = n[i] + (m[i] << sh);
2539 }
2540 }
2541
2542 void HELPER(sve_adr_s32)(void *vd, void *vn, void *vm, uint32_t desc)
2543 {
2544 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2545 uint64_t sh = simd_data(desc);
2546 uint64_t *d = vd, *n = vn, *m = vm;
2547 for (i = 0; i < opr_sz; i += 1) {
2548 d[i] = n[i] + ((uint64_t)(int32_t)m[i] << sh);
2549 }
2550 }
2551
2552 void HELPER(sve_adr_u32)(void *vd, void *vn, void *vm, uint32_t desc)
2553 {
2554 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2555 uint64_t sh = simd_data(desc);
2556 uint64_t *d = vd, *n = vn, *m = vm;
2557 for (i = 0; i < opr_sz; i += 1) {
2558 d[i] = n[i] + ((uint64_t)(uint32_t)m[i] << sh);
2559 }
2560 }
2561
2562 void HELPER(sve_fexpa_h)(void *vd, void *vn, uint32_t desc)
2563 {
2564 /* These constants are cut-and-paste directly from the ARM pseudocode. */
2565 static const uint16_t coeff[] = {
2566 0x0000, 0x0016, 0x002d, 0x0045, 0x005d, 0x0075, 0x008e, 0x00a8,
2567 0x00c2, 0x00dc, 0x00f8, 0x0114, 0x0130, 0x014d, 0x016b, 0x0189,
2568 0x01a8, 0x01c8, 0x01e8, 0x0209, 0x022b, 0x024e, 0x0271, 0x0295,
2569 0x02ba, 0x02e0, 0x0306, 0x032e, 0x0356, 0x037f, 0x03a9, 0x03d4,
2570 };
2571 intptr_t i, opr_sz = simd_oprsz(desc) / 2;
2572 uint16_t *d = vd, *n = vn;
2573
2574 for (i = 0; i < opr_sz; i++) {
2575 uint16_t nn = n[i];
2576 intptr_t idx = extract32(nn, 0, 5);
2577 uint16_t exp = extract32(nn, 5, 5);
2578 d[i] = coeff[idx] | (exp << 10);
2579 }
2580 }
2581
2582 void HELPER(sve_fexpa_s)(void *vd, void *vn, uint32_t desc)
2583 {
2584 /* These constants are cut-and-paste directly from the ARM pseudocode. */
2585 static const uint32_t coeff[] = {
2586 0x000000, 0x0164d2, 0x02cd87, 0x043a29,
2587 0x05aac3, 0x071f62, 0x08980f, 0x0a14d5,
2588 0x0b95c2, 0x0d1adf, 0x0ea43a, 0x1031dc,
2589 0x11c3d3, 0x135a2b, 0x14f4f0, 0x16942d,
2590 0x1837f0, 0x19e046, 0x1b8d3a, 0x1d3eda,
2591 0x1ef532, 0x20b051, 0x227043, 0x243516,
2592 0x25fed7, 0x27cd94, 0x29a15b, 0x2b7a3a,
2593 0x2d583f, 0x2f3b79, 0x3123f6, 0x3311c4,
2594 0x3504f3, 0x36fd92, 0x38fbaf, 0x3aff5b,
2595 0x3d08a4, 0x3f179a, 0x412c4d, 0x4346cd,
2596 0x45672a, 0x478d75, 0x49b9be, 0x4bec15,
2597 0x4e248c, 0x506334, 0x52a81e, 0x54f35b,
2598 0x5744fd, 0x599d16, 0x5bfbb8, 0x5e60f5,
2599 0x60ccdf, 0x633f89, 0x65b907, 0x68396a,
2600 0x6ac0c7, 0x6d4f30, 0x6fe4ba, 0x728177,
2601 0x75257d, 0x77d0df, 0x7a83b3, 0x7d3e0c,
2602 };
2603 intptr_t i, opr_sz = simd_oprsz(desc) / 4;
2604 uint32_t *d = vd, *n = vn;
2605
2606 for (i = 0; i < opr_sz; i++) {
2607 uint32_t nn = n[i];
2608 intptr_t idx = extract32(nn, 0, 6);
2609 uint32_t exp = extract32(nn, 6, 8);
2610 d[i] = coeff[idx] | (exp << 23);
2611 }
2612 }
2613
2614 void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc)
2615 {
2616 /* These constants are cut-and-paste directly from the ARM pseudocode. */
2617 static const uint64_t coeff[] = {
2618 0x0000000000000ull, 0x02C9A3E778061ull, 0x059B0D3158574ull,
2619 0x0874518759BC8ull, 0x0B5586CF9890Full, 0x0E3EC32D3D1A2ull,
2620 0x11301D0125B51ull, 0x1429AAEA92DE0ull, 0x172B83C7D517Bull,
2621 0x1A35BEB6FCB75ull, 0x1D4873168B9AAull, 0x2063B88628CD6ull,
2622 0x2387A6E756238ull, 0x26B4565E27CDDull, 0x29E9DF51FDEE1ull,
2623 0x2D285A6E4030Bull, 0x306FE0A31B715ull, 0x33C08B26416FFull,
2624 0x371A7373AA9CBull, 0x3A7DB34E59FF7ull, 0x3DEA64C123422ull,
2625 0x4160A21F72E2Aull, 0x44E086061892Dull, 0x486A2B5C13CD0ull,
2626 0x4BFDAD5362A27ull, 0x4F9B2769D2CA7ull, 0x5342B569D4F82ull,
2627 0x56F4736B527DAull, 0x5AB07DD485429ull, 0x5E76F15AD2148ull,
2628 0x6247EB03A5585ull, 0x6623882552225ull, 0x6A09E667F3BCDull,
2629 0x6DFB23C651A2Full, 0x71F75E8EC5F74ull, 0x75FEB564267C9ull,
2630 0x7A11473EB0187ull, 0x7E2F336CF4E62ull, 0x82589994CCE13ull,
2631 0x868D99B4492EDull, 0x8ACE5422AA0DBull, 0x8F1AE99157736ull,
2632 0x93737B0CDC5E5ull, 0x97D829FDE4E50ull, 0x9C49182A3F090ull,
2633 0xA0C667B5DE565ull, 0xA5503B23E255Dull, 0xA9E6B5579FDBFull,
2634 0xAE89F995AD3ADull, 0xB33A2B84F15FBull, 0xB7F76F2FB5E47ull,
2635 0xBCC1E904BC1D2ull, 0xC199BDD85529Cull, 0xC67F12E57D14Bull,
2636 0xCB720DCEF9069ull, 0xD072D4A07897Cull, 0xD5818DCFBA487ull,
2637 0xDA9E603DB3285ull, 0xDFC97337B9B5Full, 0xE502EE78B3FF6ull,
2638 0xEA4AFA2A490DAull, 0xEFA1BEE615A27ull, 0xF50765B6E4540ull,
2639 0xFA7C1819E90D8ull,
2640 };
2641 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2642 uint64_t *d = vd, *n = vn;
2643
2644 for (i = 0; i < opr_sz; i++) {
2645 uint64_t nn = n[i];
2646 intptr_t idx = extract32(nn, 0, 6);
2647 uint64_t exp = extract32(nn, 6, 11);
2648 d[i] = coeff[idx] | (exp << 52);
2649 }
2650 }
2651
2652 void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc)
2653 {
2654 intptr_t i, opr_sz = simd_oprsz(desc) / 2;
2655 uint16_t *d = vd, *n = vn, *m = vm;
2656 for (i = 0; i < opr_sz; i += 1) {
2657 uint16_t nn = n[i];
2658 uint16_t mm = m[i];
2659 if (mm & 1) {
2660 nn = float16_one;
2661 }
2662 d[i] = nn ^ (mm & 2) << 14;
2663 }
2664 }
2665
2666 void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc)
2667 {
2668 intptr_t i, opr_sz = simd_oprsz(desc) / 4;
2669 uint32_t *d = vd, *n = vn, *m = vm;
2670 for (i = 0; i < opr_sz; i += 1) {
2671 uint32_t nn = n[i];
2672 uint32_t mm = m[i];
2673 if (mm & 1) {
2674 nn = float32_one;
2675 }
2676 d[i] = nn ^ (mm & 2) << 30;
2677 }
2678 }
2679
2680 void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc)
2681 {
2682 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2683 uint64_t *d = vd, *n = vn, *m = vm;
2684 for (i = 0; i < opr_sz; i += 1) {
2685 uint64_t nn = n[i];
2686 uint64_t mm = m[i];
2687 if (mm & 1) {
2688 nn = float64_one;
2689 }
2690 d[i] = nn ^ (mm & 2) << 62;
2691 }
2692 }
2693
2694 /*
2695 * Signed saturating addition with scalar operand.
2696 */
2697
2698 void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
2699 {
2700 intptr_t i, oprsz = simd_oprsz(desc);
2701
2702 for (i = 0; i < oprsz; i += sizeof(int8_t)) {
2703 *(int8_t *)(d + i) = DO_SQADD_B(b, *(int8_t *)(a + i));
2704 }
2705 }
2706
2707 void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
2708 {
2709 intptr_t i, oprsz = simd_oprsz(desc);
2710
2711 for (i = 0; i < oprsz; i += sizeof(int16_t)) {
2712 *(int16_t *)(d + i) = DO_SQADD_H(b, *(int16_t *)(a + i));
2713 }
2714 }
2715
2716 void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
2717 {
2718 intptr_t i, oprsz = simd_oprsz(desc);
2719
2720 for (i = 0; i < oprsz; i += sizeof(int32_t)) {
2721 *(int32_t *)(d + i) = DO_SQADD_S(b, *(int32_t *)(a + i));
2722 }
2723 }
2724
2725 void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc)
2726 {
2727 intptr_t i, oprsz = simd_oprsz(desc);
2728
2729 for (i = 0; i < oprsz; i += sizeof(int64_t)) {
2730 *(int64_t *)(d + i) = do_sqadd_d(b, *(int64_t *)(a + i));
2731 }
2732 }
2733
2734 /*
2735 * Unsigned saturating addition with scalar operand.
2736 */
2737
2738 void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
2739 {
2740 intptr_t i, oprsz = simd_oprsz(desc);
2741
2742 for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
2743 *(uint8_t *)(d + i) = DO_UQADD_B(b, *(uint8_t *)(a + i));
2744 }
2745 }
2746
2747 void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
2748 {
2749 intptr_t i, oprsz = simd_oprsz(desc);
2750
2751 for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
2752 *(uint16_t *)(d + i) = DO_UQADD_H(b, *(uint16_t *)(a + i));
2753 }
2754 }
2755
2756 void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
2757 {
2758 intptr_t i, oprsz = simd_oprsz(desc);
2759
2760 for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
2761 *(uint32_t *)(d + i) = DO_UQADD_S(b, *(uint32_t *)(a + i));
2762 }
2763 }
2764
2765 void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc)
2766 {
2767 intptr_t i, oprsz = simd_oprsz(desc);
2768
2769 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
2770 *(uint64_t *)(d + i) = do_uqadd_d(b, *(uint64_t *)(a + i));
2771 }
2772 }
2773
2774 void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc)
2775 {
2776 intptr_t i, oprsz = simd_oprsz(desc);
2777
2778 for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
2779 *(uint64_t *)(d + i) = do_uqsub_d(*(uint64_t *)(a + i), b);
2780 }
2781 }
2782
2783 /* Two operand predicated copy immediate with merge. All valid immediates
2784 * can fit within 17 signed bits in the simd_data field.
2785 */
2786 void HELPER(sve_cpy_m_b)(void *vd, void *vn, void *vg,
2787 uint64_t mm, uint32_t desc)
2788 {
2789 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2790 uint64_t *d = vd, *n = vn;
2791 uint8_t *pg = vg;
2792
2793 mm = dup_const(MO_8, mm);
2794 for (i = 0; i < opr_sz; i += 1) {
2795 uint64_t nn = n[i];
2796 uint64_t pp = expand_pred_b(pg[H1(i)]);
2797 d[i] = (mm & pp) | (nn & ~pp);
2798 }
2799 }
2800
2801 void HELPER(sve_cpy_m_h)(void *vd, void *vn, void *vg,
2802 uint64_t mm, uint32_t desc)
2803 {
2804 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2805 uint64_t *d = vd, *n = vn;
2806 uint8_t *pg = vg;
2807
2808 mm = dup_const(MO_16, mm);
2809 for (i = 0; i < opr_sz; i += 1) {
2810 uint64_t nn = n[i];
2811 uint64_t pp = expand_pred_h(pg[H1(i)]);
2812 d[i] = (mm & pp) | (nn & ~pp);
2813 }
2814 }
2815
2816 void HELPER(sve_cpy_m_s)(void *vd, void *vn, void *vg,
2817 uint64_t mm, uint32_t desc)
2818 {
2819 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2820 uint64_t *d = vd, *n = vn;
2821 uint8_t *pg = vg;
2822
2823 mm = dup_const(MO_32, mm);
2824 for (i = 0; i < opr_sz; i += 1) {
2825 uint64_t nn = n[i];
2826 uint64_t pp = expand_pred_s(pg[H1(i)]);
2827 d[i] = (mm & pp) | (nn & ~pp);
2828 }
2829 }
2830
2831 void HELPER(sve_cpy_m_d)(void *vd, void *vn, void *vg,
2832 uint64_t mm, uint32_t desc)
2833 {
2834 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2835 uint64_t *d = vd, *n = vn;
2836 uint8_t *pg = vg;
2837
2838 for (i = 0; i < opr_sz; i += 1) {
2839 uint64_t nn = n[i];
2840 d[i] = (pg[H1(i)] & 1 ? mm : nn);
2841 }
2842 }
2843
2844 void HELPER(sve_cpy_z_b)(void *vd, void *vg, uint64_t val, uint32_t desc)
2845 {
2846 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2847 uint64_t *d = vd;
2848 uint8_t *pg = vg;
2849
2850 val = dup_const(MO_8, val);
2851 for (i = 0; i < opr_sz; i += 1) {
2852 d[i] = val & expand_pred_b(pg[H1(i)]);
2853 }
2854 }
2855
2856 void HELPER(sve_cpy_z_h)(void *vd, void *vg, uint64_t val, uint32_t desc)
2857 {
2858 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2859 uint64_t *d = vd;
2860 uint8_t *pg = vg;
2861
2862 val = dup_const(MO_16, val);
2863 for (i = 0; i < opr_sz; i += 1) {
2864 d[i] = val & expand_pred_h(pg[H1(i)]);
2865 }
2866 }
2867
2868 void HELPER(sve_cpy_z_s)(void *vd, void *vg, uint64_t val, uint32_t desc)
2869 {
2870 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2871 uint64_t *d = vd;
2872 uint8_t *pg = vg;
2873
2874 val = dup_const(MO_32, val);
2875 for (i = 0; i < opr_sz; i += 1) {
2876 d[i] = val & expand_pred_s(pg[H1(i)]);
2877 }
2878 }
2879
2880 void HELPER(sve_cpy_z_d)(void *vd, void *vg, uint64_t val, uint32_t desc)
2881 {
2882 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
2883 uint64_t *d = vd;
2884 uint8_t *pg = vg;
2885
2886 for (i = 0; i < opr_sz; i += 1) {
2887 d[i] = (pg[H1(i)] & 1 ? val : 0);
2888 }
2889 }
2890
2891 /* Big-endian hosts need to frob the byte indices. If the copy
2892 * happens to be 8-byte aligned, then no frobbing necessary.
2893 */
2894 static void swap_memmove(void *vd, void *vs, size_t n)
2895 {
2896 uintptr_t d = (uintptr_t)vd;
2897 uintptr_t s = (uintptr_t)vs;
2898 uintptr_t o = (d | s | n) & 7;
2899 size_t i;
2900
2901 #ifndef HOST_WORDS_BIGENDIAN
2902 o = 0;
2903 #endif
2904 switch (o) {
2905 case 0:
2906 memmove(vd, vs, n);
2907 break;
2908
2909 case 4:
2910 if (d < s || d >= s + n) {
2911 for (i = 0; i < n; i += 4) {
2912 *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i);
2913 }
2914 } else {
2915 for (i = n; i > 0; ) {
2916 i -= 4;
2917 *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i);
2918 }
2919 }
2920 break;
2921
2922 case 2:
2923 case 6:
2924 if (d < s || d >= s + n) {
2925 for (i = 0; i < n; i += 2) {
2926 *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i);
2927 }
2928 } else {
2929 for (i = n; i > 0; ) {
2930 i -= 2;
2931 *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i);
2932 }
2933 }
2934 break;
2935
2936 default:
2937 if (d < s || d >= s + n) {
2938 for (i = 0; i < n; i++) {
2939 *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i);
2940 }
2941 } else {
2942 for (i = n; i > 0; ) {
2943 i -= 1;
2944 *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i);
2945 }
2946 }
2947 break;
2948 }
2949 }
2950
2951 /* Similarly for memset of 0. */
2952 static void swap_memzero(void *vd, size_t n)
2953 {
2954 uintptr_t d = (uintptr_t)vd;
2955 uintptr_t o = (d | n) & 7;
2956 size_t i;
2957
2958 /* Usually, the first bit of a predicate is set, so N is 0. */
2959 if (likely(n == 0)) {
2960 return;
2961 }
2962
2963 #ifndef HOST_WORDS_BIGENDIAN
2964 o = 0;
2965 #endif
2966 switch (o) {
2967 case 0:
2968 memset(vd, 0, n);
2969 break;
2970
2971 case 4:
2972 for (i = 0; i < n; i += 4) {
2973 *(uint32_t *)H1_4(d + i) = 0;
2974 }
2975 break;
2976
2977 case 2:
2978 case 6:
2979 for (i = 0; i < n; i += 2) {
2980 *(uint16_t *)H1_2(d + i) = 0;
2981 }
2982 break;
2983
2984 default:
2985 for (i = 0; i < n; i++) {
2986 *(uint8_t *)H1(d + i) = 0;
2987 }
2988 break;
2989 }
2990 }
2991
2992 void HELPER(sve_ext)(void *vd, void *vn, void *vm, uint32_t desc)
2993 {
2994 intptr_t opr_sz = simd_oprsz(desc);
2995 size_t n_ofs = simd_data(desc);
2996 size_t n_siz = opr_sz - n_ofs;
2997
2998 if (vd != vm) {
2999 swap_memmove(vd, vn + n_ofs, n_siz);
3000 swap_memmove(vd + n_siz, vm, n_ofs);
3001 } else if (vd != vn) {
3002 swap_memmove(vd + n_siz, vd, n_ofs);
3003 swap_memmove(vd, vn + n_ofs, n_siz);
3004 } else {
3005 /* vd == vn == vm. Need temp space. */
3006 ARMVectorReg tmp;
3007 swap_memmove(&tmp, vm, n_ofs);
3008 swap_memmove(vd, vd + n_ofs, n_siz);
3009 memcpy(vd + n_siz, &tmp, n_ofs);
3010 }
3011 }
3012
3013 #define DO_INSR(NAME, TYPE, H) \
3014 void HELPER(NAME)(void *vd, void *vn, uint64_t val, uint32_t desc) \
3015 { \
3016 intptr_t opr_sz = simd_oprsz(desc); \
3017 swap_memmove(vd + sizeof(TYPE), vn, opr_sz - sizeof(TYPE)); \
3018 *(TYPE *)(vd + H(0)) = val; \
3019 }
3020
3021 DO_INSR(sve_insr_b, uint8_t, H1)
3022 DO_INSR(sve_insr_h, uint16_t, H1_2)
3023 DO_INSR(sve_insr_s, uint32_t, H1_4)
3024 DO_INSR(sve_insr_d, uint64_t, )
3025
3026 #undef DO_INSR
3027
3028 void HELPER(sve_rev_b)(void *vd, void *vn, uint32_t desc)
3029 {
3030 intptr_t i, j, opr_sz = simd_oprsz(desc);
3031 for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
3032 uint64_t f = *(uint64_t *)(vn + i);
3033 uint64_t b = *(uint64_t *)(vn + j);
3034 *(uint64_t *)(vd + i) = bswap64(b);
3035 *(uint64_t *)(vd + j) = bswap64(f);
3036 }
3037 }
3038
3039 void HELPER(sve_rev_h)(void *vd, void *vn, uint32_t desc)
3040 {
3041 intptr_t i, j, opr_sz = simd_oprsz(desc);
3042 for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
3043 uint64_t f = *(uint64_t *)(vn + i);
3044 uint64_t b = *(uint64_t *)(vn + j);
3045 *(uint64_t *)(vd + i) = hswap64(b);
3046 *(uint64_t *)(vd + j) = hswap64(f);
3047 }
3048 }
3049
3050 void HELPER(sve_rev_s)(void *vd, void *vn, uint32_t desc)
3051 {
3052 intptr_t i, j, opr_sz = simd_oprsz(desc);
3053 for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
3054 uint64_t f = *(uint64_t *)(vn + i);
3055 uint64_t b = *(uint64_t *)(vn + j);
3056 *(uint64_t *)(vd + i) = rol64(b, 32);
3057 *(uint64_t *)(vd + j) = rol64(f, 32);
3058 }
3059 }
3060
3061 void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc)
3062 {
3063 intptr_t i, j, opr_sz = simd_oprsz(desc);
3064 for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
3065 uint64_t f = *(uint64_t *)(vn + i);
3066 uint64_t b = *(uint64_t *)(vn + j);
3067 *(uint64_t *)(vd + i) = b;
3068 *(uint64_t *)(vd + j) = f;
3069 }
3070 }
3071
3072 typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool);
3073
3074 static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc,
3075 bool is_tbx, tb_impl_fn *fn)
3076 {
3077 ARMVectorReg scratch;
3078 uintptr_t oprsz = simd_oprsz(desc);
3079
3080 if (unlikely(vd == vn)) {
3081 vn = memcpy(&scratch, vn, oprsz);
3082 }
3083
3084 fn(vd, vn, NULL, vm, oprsz, is_tbx);
3085 }
3086
3087 static inline void do_tbl2(void *vd, void *vn0, void *vn1, void *vm,
3088 uint32_t desc, bool is_tbx, tb_impl_fn *fn)
3089 {
3090 ARMVectorReg scratch;
3091 uintptr_t oprsz = simd_oprsz(desc);
3092
3093 if (unlikely(vd == vn0)) {
3094 vn0 = memcpy(&scratch, vn0, oprsz);
3095 if (vd == vn1) {
3096 vn1 = vn0;
3097 }
3098 } else if (unlikely(vd == vn1)) {
3099 vn1 = memcpy(&scratch, vn1, oprsz);
3100 }
3101
3102 fn(vd, vn0, vn1, vm, oprsz, is_tbx);
3103 }
3104
3105 #define DO_TB(SUFF, TYPE, H) \
3106 static inline void do_tb_##SUFF(void *vd, void *vt0, void *vt1, \
3107 void *vm, uintptr_t oprsz, bool is_tbx) \
3108 { \
3109 TYPE *d = vd, *tbl0 = vt0, *tbl1 = vt1, *indexes = vm; \
3110 uintptr_t i, nelem = oprsz / sizeof(TYPE); \
3111 for (i = 0; i < nelem; ++i) { \
3112 TYPE index = indexes[H1(i)], val = 0; \
3113 if (index < nelem) { \
3114 val = tbl0[H(index)]; \
3115 } else { \
3116 index -= nelem; \
3117 if (tbl1 && index < nelem) { \
3118 val = tbl1[H(index)]; \
3119 } else if (is_tbx) { \
3120 continue; \
3121 } \
3122 } \
3123 d[H(i)] = val; \
3124 } \
3125 } \
3126 void HELPER(sve_tbl_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
3127 { \
3128 do_tbl1(vd, vn, vm, desc, false, do_tb_##SUFF); \
3129 } \
3130 void HELPER(sve2_tbl_##SUFF)(void *vd, void *vn0, void *vn1, \
3131 void *vm, uint32_t desc) \
3132 { \
3133 do_tbl2(vd, vn0, vn1, vm, desc, false, do_tb_##SUFF); \
3134 } \
3135 void HELPER(sve2_tbx_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
3136 { \
3137 do_tbl1(vd, vn, vm, desc, true, do_tb_##SUFF); \
3138 }
3139
3140 DO_TB(b, uint8_t, H1)
3141 DO_TB(h, uint16_t, H2)
3142 DO_TB(s, uint32_t, H4)
3143 DO_TB(d, uint64_t, )
3144
3145 #undef DO_TB
3146
3147 #define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \
3148 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
3149 { \
3150 intptr_t i, opr_sz = simd_oprsz(desc); \
3151 TYPED *d = vd; \
3152 TYPES *n = vn; \
3153 ARMVectorReg tmp; \
3154 if (unlikely(vn - vd < opr_sz)) { \
3155 n = memcpy(&tmp, n, opr_sz / 2); \
3156 } \
3157 for (i = 0; i < opr_sz / sizeof(TYPED); i++) { \
3158 d[HD(i)] = n[HS(i)]; \
3159 } \
3160 }
3161
3162 DO_UNPK(sve_sunpk_h, int16_t, int8_t, H2, H1)
3163 DO_UNPK(sve_sunpk_s, int32_t, int16_t, H4, H2)
3164 DO_UNPK(sve_sunpk_d, int64_t, int32_t, , H4)
3165
3166 DO_UNPK(sve_uunpk_h, uint16_t, uint8_t, H2, H1)
3167 DO_UNPK(sve_uunpk_s, uint32_t, uint16_t, H4, H2)
3168 DO_UNPK(sve_uunpk_d, uint64_t, uint32_t, , H4)
3169
3170 #undef DO_UNPK
3171
3172 /* Mask of bits included in the even numbered predicates of width esz.
3173 * We also use this for expand_bits/compress_bits, and so extend the
3174 * same pattern out to 16-bit units.
3175 */
3176 static const uint64_t even_bit_esz_masks[5] = {
3177 0x5555555555555555ull,
3178 0x3333333333333333ull,
3179 0x0f0f0f0f0f0f0f0full,
3180 0x00ff00ff00ff00ffull,
3181 0x0000ffff0000ffffull,
3182 };
3183
3184 /* Zero-extend units of 2**N bits to units of 2**(N+1) bits.
3185 * For N==0, this corresponds to the operation that in qemu/bitops.h
3186 * we call half_shuffle64; this algorithm is from Hacker's Delight,
3187 * section 7-2 Shuffling Bits.
3188 */
3189 static uint64_t expand_bits(uint64_t x, int n)
3190 {
3191 int i;
3192
3193 x &= 0xffffffffu;
3194 for (i = 4; i >= n; i--) {
3195 int sh = 1 << i;
3196 x = ((x << sh) | x) & even_bit_esz_masks[i];
3197 }
3198 return x;
3199 }
3200
3201 /* Compress units of 2**(N+1) bits to units of 2**N bits.
3202 * For N==0, this corresponds to the operation that in qemu/bitops.h
3203 * we call half_unshuffle64; this algorithm is from Hacker's Delight,
3204 * section 7-2 Shuffling Bits, where it is called an inverse half shuffle.
3205 */
3206 static uint64_t compress_bits(uint64_t x, int n)
3207 {
3208 int i;
3209
3210 for (i = n; i <= 4; i++) {
3211 int sh = 1 << i;
3212 x &= even_bit_esz_masks[i];
3213 x = (x >> sh) | x;
3214 }
3215 return x & 0xffffffffu;
3216 }
3217
3218 void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
3219 {
3220 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
3221 int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
3222 intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
3223 int esize = 1 << esz;
3224 uint64_t *d = vd;
3225 intptr_t i;
3226
3227 if (oprsz <= 8) {
3228 uint64_t nn = *(uint64_t *)vn;
3229 uint64_t mm = *(uint64_t *)vm;
3230 int half = 4 * oprsz;
3231
3232 nn = extract64(nn, high * half, half);
3233 mm = extract64(mm, high * half, half);
3234 nn = expand_bits(nn, esz);
3235 mm = expand_bits(mm, esz);
3236 d[0] = nn | (mm << esize);
3237 } else {
3238 ARMPredicateReg tmp;
3239
3240 /* We produce output faster than we consume input.
3241 Therefore we must be mindful of possible overlap. */
3242 if (vd == vn) {
3243 vn = memcpy(&tmp, vn, oprsz);
3244 if (vd == vm) {
3245 vm = vn;
3246 }
3247 } else if (vd == vm) {
3248 vm = memcpy(&tmp, vm, oprsz);
3249 }
3250 if (high) {
3251 high = oprsz >> 1;
3252 }
3253
3254 if ((oprsz & 7) == 0) {
3255 uint32_t *n = vn, *m = vm;
3256 high >>= 2;
3257
3258 for (i = 0; i < oprsz / 8; i++) {
3259 uint64_t nn = n[H4(high + i)];
3260 uint64_t mm = m[H4(high + i)];
3261
3262 nn = expand_bits(nn, esz);
3263 mm = expand_bits(mm, esz);
3264 d[i] = nn | (mm << esize);
3265 }
3266 } else {
3267 uint8_t *n = vn, *m = vm;
3268 uint16_t *d16 = vd;
3269
3270 for (i = 0; i < oprsz / 2; i++) {
3271 uint16_t nn = n[H1(high + i)];
3272 uint16_t mm = m[H1(high + i)];
3273
3274 nn = expand_bits(nn, esz);
3275 mm = expand_bits(mm, esz);
3276 d16[H2(i)] = nn | (mm << esize);
3277 }
3278 }
3279 }
3280 }
3281
3282 void HELPER(sve_uzp_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
3283 {
3284 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
3285 int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
3286 int odd = FIELD_EX32(pred_desc, PREDDESC, DATA) << esz;
3287 uint64_t *d = vd, *n = vn, *m = vm;
3288 uint64_t l, h;
3289 intptr_t i;
3290
3291 if (oprsz <= 8) {
3292 l = compress_bits(n[0] >> odd, esz);
3293 h = compress_bits(m[0] >> odd, esz);
3294 d[0] = l | (h << (4 * oprsz));
3295 } else {
3296 ARMPredicateReg tmp_m;
3297 intptr_t oprsz_16 = oprsz / 16;
3298
3299 if ((vm - vd) < (uintptr_t)oprsz) {
3300 m = memcpy(&tmp_m, vm, oprsz);
3301 }
3302
3303 for (i = 0; i < oprsz_16; i++) {
3304 l = n[2 * i + 0];
3305 h = n[2 * i + 1];
3306 l = compress_bits(l >> odd, esz);
3307 h = compress_bits(h >> odd, esz);
3308 d[i] = l | (h << 32);
3309 }
3310
3311 /*
3312 * For VL which is not a multiple of 512, the results from M do not
3313 * align nicely with the uint64_t for D. Put the aligned results
3314 * from M into TMP_M and then copy it into place afterward.
3315 */
3316 if (oprsz & 15) {
3317 int final_shift = (oprsz & 15) * 2;
3318
3319 l = n[2 * i + 0];
3320 h = n[2 * i + 1];
3321 l = compress_bits(l >> odd, esz);
3322 h = compress_bits(h >> odd, esz);
3323 d[i] = l | (h << final_shift);
3324
3325 for (i = 0; i < oprsz_16; i++) {
3326 l = m[2 * i + 0];
3327 h = m[2 * i + 1];
3328 l = compress_bits(l >> odd, esz);
3329 h = compress_bits(h >> odd, esz);
3330 tmp_m.p[i] = l | (h << 32);
3331 }
3332 l = m[2 * i + 0];
3333 h = m[2 * i + 1];
3334 l = compress_bits(l >> odd, esz);
3335 h = compress_bits(h >> odd, esz);
3336 tmp_m.p[i] = l | (h << final_shift);
3337
3338 swap_memmove(vd + oprsz / 2, &tmp_m, oprsz / 2);
3339 } else {
3340 for (i = 0; i < oprsz_16; i++) {
3341 l = m[2 * i + 0];
3342 h = m[2 * i + 1];
3343 l = compress_bits(l >> odd, esz);
3344 h = compress_bits(h >> odd, esz);
3345 d[oprsz_16 + i] = l | (h << 32);
3346 }
3347 }
3348 }
3349 }
3350
3351 void HELPER(sve_trn_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
3352 {
3353 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
3354 int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
3355 int odd = FIELD_EX32(pred_desc, PREDDESC, DATA);
3356 uint64_t *d = vd, *n = vn, *m = vm;
3357 uint64_t mask;
3358 int shr, shl;
3359 intptr_t i;
3360
3361 shl = 1 << esz;
3362 shr = 0;
3363 mask = even_bit_esz_masks[esz];
3364 if (odd) {
3365 mask <<= shl;
3366 shr = shl;
3367 shl = 0;
3368 }
3369
3370 for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) {
3371 uint64_t nn = (n[i] & mask) >> shr;
3372 uint64_t mm = (m[i] & mask) << shl;
3373 d[i] = nn + mm;
3374 }
3375 }
3376
3377 /* Reverse units of 2**N bits. */
3378 static uint64_t reverse_bits_64(uint64_t x, int n)
3379 {
3380 int i, sh;
3381
3382 x = bswap64(x);
3383 for (i = 2, sh = 4; i >= n; i--, sh >>= 1) {
3384 uint64_t mask = even_bit_esz_masks[i];
3385 x = ((x & mask) << sh) | ((x >> sh) & mask);
3386 }
3387 return x;
3388 }
3389
3390 static uint8_t reverse_bits_8(uint8_t x, int n)
3391 {
3392 static const uint8_t mask[3] = { 0x55, 0x33, 0x0f };
3393 int i, sh;
3394
3395 for (i = 2, sh = 4; i >= n; i--, sh >>= 1) {
3396 x = ((x & mask[i]) << sh) | ((x >> sh) & mask[i]);
3397 }
3398 return x;
3399 }
3400
3401 void HELPER(sve_rev_p)(void *vd, void *vn, uint32_t pred_desc)
3402 {
3403 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
3404 int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
3405 intptr_t i, oprsz_2 = oprsz / 2;
3406
3407 if (oprsz <= 8) {
3408 uint64_t l = *(uint64_t *)vn;
3409 l = reverse_bits_64(l << (64 - 8 * oprsz), esz);
3410 *(uint64_t *)vd = l;
3411 } else if ((oprsz & 15) == 0) {
3412 for (i = 0; i < oprsz_2; i += 8) {
3413 intptr_t ih = oprsz - 8 - i;
3414 uint64_t l = reverse_bits_64(*(uint64_t *)(vn + i), esz);
3415 uint64_t h = reverse_bits_64(*(uint64_t *)(vn + ih), esz);
3416 *(uint64_t *)(vd + i) = h;
3417 *(uint64_t *)(vd + ih) = l;
3418 }
3419 } else {
3420 for (i = 0; i < oprsz_2; i += 1) {
3421 intptr_t il = H1(i);
3422 intptr_t ih = H1(oprsz - 1 - i);
3423 uint8_t l = reverse_bits_8(*(uint8_t *)(vn + il), esz);
3424 uint8_t h = reverse_bits_8(*(uint8_t *)(vn + ih), esz);
3425 *(uint8_t *)(vd + il) = h;
3426 *(uint8_t *)(vd + ih) = l;
3427 }
3428 }
3429 }
3430
3431 void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc)
3432 {
3433 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
3434 intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
3435 uint64_t *d = vd;
3436 intptr_t i;
3437
3438 if (oprsz <= 8) {
3439 uint64_t nn = *(uint64_t *)vn;
3440 int half = 4 * oprsz;
3441
3442 nn = extract64(nn, high * half, half);
3443 nn = expand_bits(nn, 0);
3444 d[0] = nn;
3445 } else {
3446 ARMPredicateReg tmp_n;
3447
3448 /* We produce output faster than we consume input.
3449 Therefore we must be mindful of possible overlap. */
3450 if ((vn - vd) < (uintptr_t)oprsz) {
3451 vn = memcpy(&tmp_n, vn, oprsz);
3452 }
3453 if (high) {
3454 high = oprsz >> 1;
3455 }
3456
3457 if ((oprsz & 7) == 0) {
3458 uint32_t *n = vn;
3459 high >>= 2;
3460
3461 for (i = 0; i < oprsz / 8; i++) {
3462 uint64_t nn = n[H4(high + i)];
3463 d[i] = expand_bits(nn, 0);
3464 }
3465 } else {
3466 uint16_t *d16 = vd;
3467 uint8_t *n = vn;
3468
3469 for (i = 0; i < oprsz / 2; i++) {
3470 uint16_t nn = n[H1(high + i)];
3471 d16[H2(i)] = expand_bits(nn, 0);
3472 }
3473 }
3474 }
3475 }
3476
3477 #define DO_ZIP(NAME, TYPE, H) \
3478 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
3479 { \
3480 intptr_t oprsz = simd_oprsz(desc); \
3481 intptr_t i, oprsz_2 = oprsz / 2; \
3482 ARMVectorReg tmp_n, tmp_m; \
3483 /* We produce output faster than we consume input. \
3484 Therefore we must be mindful of possible overlap. */ \
3485 if (unlikely((vn - vd) < (uintptr_t)oprsz)) { \
3486 vn = memcpy(&tmp_n, vn, oprsz_2); \
3487 } \
3488 if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \
3489 vm = memcpy(&tmp_m, vm, oprsz_2); \
3490 } \
3491 for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
3492 *(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i)); \
3493 *(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)(vm + H(i)); \
3494 } \
3495 }
3496
3497 DO_ZIP(sve_zip_b, uint8_t, H1)
3498 DO_ZIP(sve_zip_h, uint16_t, H1_2)
3499 DO_ZIP(sve_zip_s, uint32_t, H1_4)
3500 DO_ZIP(sve_zip_d, uint64_t, )
3501
3502 #define DO_UZP(NAME, TYPE, H) \
3503 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
3504 { \
3505 intptr_t oprsz = simd_oprsz(desc); \
3506 intptr_t oprsz_2 = oprsz / 2; \
3507 intptr_t odd_ofs = simd_data(desc); \
3508 intptr_t i; \
3509 ARMVectorReg tmp_m; \
3510 if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \
3511 vm = memcpy(&tmp_m, vm, oprsz); \
3512 } \
3513 for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
3514 *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(2 * i + odd_ofs)); \
3515 } \
3516 for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
3517 *(TYPE *)(vd + H(oprsz_2 + i)) = *(TYPE *)(vm + H(2 * i + odd_ofs)); \
3518 } \
3519 }
3520
3521 DO_UZP(sve_uzp_b, uint8_t, H1)
3522 DO_UZP(sve_uzp_h, uint16_t, H1_2)
3523 DO_UZP(sve_uzp_s, uint32_t, H1_4)
3524 DO_UZP(sve_uzp_d, uint64_t, )
3525
3526 #define DO_TRN(NAME, TYPE, H) \
3527 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
3528 { \
3529 intptr_t oprsz = simd_oprsz(desc); \
3530 intptr_t odd_ofs = simd_data(desc); \
3531 intptr_t i; \
3532 for (i = 0; i < oprsz; i += 2 * sizeof(TYPE)) { \
3533 TYPE ae = *(TYPE *)(vn + H(i + odd_ofs)); \
3534 TYPE be = *(TYPE *)(vm + H(i + odd_ofs)); \
3535 *(TYPE *)(vd + H(i + 0)) = ae; \
3536 *(TYPE *)(vd + H(i + sizeof(TYPE))) = be; \
3537 } \
3538 }
3539
3540 DO_TRN(sve_trn_b, uint8_t, H1)
3541 DO_TRN(sve_trn_h, uint16_t, H1_2)
3542 DO_TRN(sve_trn_s, uint32_t, H1_4)
3543 DO_TRN(sve_trn_d, uint64_t, )
3544
3545 #undef DO_ZIP
3546 #undef DO_UZP
3547 #undef DO_TRN
3548
3549 void HELPER(sve_compact_s)(void *vd, void *vn, void *vg, uint32_t desc)
3550 {
3551 intptr_t i, j, opr_sz = simd_oprsz(desc) / 4;
3552 uint32_t *d = vd, *n = vn;
3553 uint8_t *pg = vg;
3554
3555 for (i = j = 0; i < opr_sz; i++) {
3556 if (pg[H1(i / 2)] & (i & 1 ? 0x10 : 0x01)) {
3557 d[H4(j)] = n[H4(i)];
3558 j++;
3559 }
3560 }
3561 for (; j < opr_sz; j++) {
3562 d[H4(j)] = 0;
3563 }
3564 }
3565
3566 void HELPER(sve_compact_d)(void *vd, void *vn, void *vg, uint32_t desc)
3567 {
3568 intptr_t i, j, opr_sz = simd_oprsz(desc) / 8;
3569 uint64_t *d = vd, *n = vn;
3570 uint8_t *pg = vg;
3571
3572 for (i = j = 0; i < opr_sz; i++) {
3573 if (pg[H1(i)] & 1) {
3574 d[j] = n[i];
3575 j++;
3576 }
3577 }
3578 for (; j < opr_sz; j++) {
3579 d[j] = 0;
3580 }
3581 }
3582
3583 /* Similar to the ARM LastActiveElement pseudocode function, except the
3584 * result is multiplied by the element size. This includes the not found
3585 * indication; e.g. not found for esz=3 is -8.
3586 */
3587 int32_t HELPER(sve_last_active_element)(void *vg, uint32_t pred_desc)
3588 {
3589 intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
3590 intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
3591
3592 return last_active_element(vg, words, esz);
3593 }
3594
3595 void HELPER(sve_splice)(void *vd, void *vn, void *vm, void *vg, uint32_t desc)
3596 {
3597 intptr_t opr_sz = simd_oprsz(desc) / 8;
3598 int esz = simd_data(desc);
3599 uint64_t pg, first_g, last_g, len, mask = pred_esz_masks[esz];
3600 intptr_t i, first_i, last_i;
3601 ARMVectorReg tmp;
3602
3603 first_i = last_i = 0;
3604 first_g = last_g = 0;
3605
3606 /* Find the extent of the active elements within VG. */
3607 for (i = QEMU_ALIGN_UP(opr_sz, 8) - 8; i >= 0; i -= 8) {
3608 pg = *(uint64_t *)(vg + i) & mask;
3609 if (pg) {
3610 if (last_g == 0) {
3611 last_g = pg;
3612 last_i = i;
3613 }
3614 first_g = pg;
3615 first_i = i;
3616 }
3617 }
3618
3619 len = 0;
3620 if (first_g != 0) {
3621 first_i = first_i * 8 + ctz64(first_g);
3622 last_i = last_i * 8 + 63 - clz64(last_g);
3623 len = last_i - first_i + (1 << esz);
3624 if (vd == vm) {
3625 vm = memcpy(&tmp, vm, opr_sz * 8);
3626 }
3627 swap_memmove(vd, vn + first_i, len);
3628 }
3629 swap_memmove(vd + len, vm, opr_sz * 8 - len);
3630 }
3631
3632 void HELPER(sve_sel_zpzz_b)(void *vd, void *vn, void *vm,
3633 void *vg, uint32_t desc)
3634 {
3635 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
3636 uint64_t *d = vd, *n = vn, *m = vm;
3637 uint8_t *pg = vg;
3638
3639 for (i = 0; i < opr_sz; i += 1) {
3640 uint64_t nn = n[i], mm = m[i];
3641 uint64_t pp = expand_pred_b(pg[H1(i)]);
3642 d[i] = (nn & pp) | (mm & ~pp);
3643 }
3644 }
3645
3646 void HELPER(sve_sel_zpzz_h)(void *vd, void *vn, void *vm,
3647 void *vg, uint32_t desc)
3648 {
3649 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
3650 uint64_t *d = vd, *n = vn, *m = vm;
3651 uint8_t *pg = vg;
3652
3653 for (i = 0; i < opr_sz; i += 1) {
3654 uint64_t nn = n[i], mm = m[i];
3655 uint64_t pp = expand_pred_h(pg[H1(i)]);
3656 d[i] = (nn & pp) | (mm & ~pp);
3657 }
3658 }
3659
3660 void HELPER(sve_sel_zpzz_s)(void *vd, void *vn, void *vm,
3661 void *vg, uint32_t desc)
3662 {
3663 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
3664 uint64_t *d = vd, *n = vn, *m = vm;
3665 uint8_t *pg = vg;
3666
3667 for (i = 0; i < opr_sz; i += 1) {
3668 uint64_t nn = n[i], mm = m[i];
3669 uint64_t pp = expand_pred_s(pg[H1(i)]);
3670 d[i] = (nn & pp) | (mm & ~pp);
3671 }
3672 }
3673
3674 void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
3675 void *vg, uint32_t desc)
3676 {
3677 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
3678 uint64_t *d = vd, *n = vn, *m = vm;
3679 uint8_t *pg = vg;
3680
3681 for (i = 0; i < opr_sz; i += 1) {
3682 uint64_t nn = n[i], mm = m[i];
3683 d[i] = (pg[H1(i)] & 1 ? nn : mm);
3684 }
3685 }
3686
3687 /* Two operand comparison controlled by a predicate.
3688 * ??? It is very tempting to want to be able to expand this inline
3689 * with x86 instructions, e.g.
3690 *
3691 * vcmpeqw zm, zn, %ymm0
3692 * vpmovmskb %ymm0, %eax
3693 * and $0x5555, %eax
3694 * and pg, %eax
3695 *
3696 * or even aarch64, e.g.
3697 *
3698 * // mask = 4000 1000 0400 0100 0040 0010 0004 0001
3699 * cmeq v0.8h, zn, zm
3700 * and v0.8h, v0.8h, mask
3701 * addv h0, v0.8h
3702 * and v0.8b, pg
3703 *
3704 * However, coming up with an abstraction that allows vector inputs and
3705 * a scalar output, and also handles the byte-ordering of sub-uint64_t
3706 * scalar outputs, is tricky.
3707 */
3708 #define DO_CMP_PPZZ(NAME, TYPE, OP, H, MASK) \
3709 uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
3710 { \
3711 intptr_t opr_sz = simd_oprsz(desc); \
3712 uint32_t flags = PREDTEST_INIT; \
3713 intptr_t i = opr_sz; \
3714 do { \
3715 uint64_t out = 0, pg; \
3716 do { \
3717 i -= sizeof(TYPE), out <<= sizeof(TYPE); \
3718 TYPE nn = *(TYPE *)(vn + H(i)); \
3719 TYPE mm = *(TYPE *)(vm + H(i)); \
3720 out |= nn OP mm; \
3721 } while (i & 63); \
3722 pg = *(uint64_t *)(vg + (i >> 3)) & MASK; \
3723 out &= pg; \
3724 *(uint64_t *)(vd + (i >> 3)) = out; \
3725 flags = iter_predtest_bwd(out, pg, flags); \
3726 } while (i > 0); \
3727 return flags; \
3728 }
3729
3730 #define DO_CMP_PPZZ_B(NAME, TYPE, OP) \
3731 DO_CMP_PPZZ(NAME, TYPE, OP, H1, 0xffffffffffffffffull)
3732 #define DO_CMP_PPZZ_H(NAME, TYPE, OP) \
3733 DO_CMP_PPZZ(NAME, TYPE, OP, H1_2, 0x5555555555555555ull)
3734 #define DO_CMP_PPZZ_S(NAME, TYPE, OP) \
3735 DO_CMP_PPZZ(NAME, TYPE, OP, H1_4, 0x1111111111111111ull)
3736 #define DO_CMP_PPZZ_D(NAME, TYPE, OP) \
3737 DO_CMP_PPZZ(NAME, TYPE, OP, , 0x0101010101010101ull)
3738
3739 DO_CMP_PPZZ_B(sve_cmpeq_ppzz_b, uint8_t, ==)
3740 DO_CMP_PPZZ_H(sve_cmpeq_ppzz_h, uint16_t, ==)
3741 DO_CMP_PPZZ_S(sve_cmpeq_ppzz_s, uint32_t, ==)
3742 DO_CMP_PPZZ_D(sve_cmpeq_ppzz_d, uint64_t, ==)
3743
3744 DO_CMP_PPZZ_B(sve_cmpne_ppzz_b, uint8_t, !=)
3745 DO_CMP_PPZZ_H(sve_cmpne_ppzz_h, uint16_t, !=)
3746 DO_CMP_PPZZ_S(sve_cmpne_ppzz_s, uint32_t, !=)
3747 DO_CMP_PPZZ_D(sve_cmpne_ppzz_d, uint64_t, !=)
3748
3749 DO_CMP_PPZZ_B(sve_cmpgt_ppzz_b, int8_t, >)
3750 DO_CMP_PPZZ_H(sve_cmpgt_ppzz_h, int16_t, >)
3751 DO_CMP_PPZZ_S(sve_cmpgt_ppzz_s, int32_t, >)
3752 DO_CMP_PPZZ_D(sve_cmpgt_ppzz_d, int64_t, >)
3753
3754 DO_CMP_PPZZ_B(sve_cmpge_ppzz_b, int8_t, >=)
3755 DO_CMP_PPZZ_H(sve_cmpge_ppzz_h, int16_t, >=)
3756 DO_CMP_PPZZ_S(sve_cmpge_ppzz_s, int32_t, >=)
3757 DO_CMP_PPZZ_D(sve_cmpge_ppzz_d, int64_t, >=)
3758
3759 DO_CMP_PPZZ_B(sve_cmphi_ppzz_b, uint8_t, >)
3760 DO_CMP_PPZZ_H(sve_cmphi_ppzz_h, uint16_t, >)
3761 DO_CMP_PPZZ_S(sve_cmphi_ppzz_s, uint32_t, >)
3762 DO_CMP_PPZZ_D(sve_cmphi_ppzz_d, uint64_t, >)
3763
3764 DO_CMP_PPZZ_B(sve_cmphs_ppzz_b, uint8_t, >=)
3765 DO_CMP_PPZZ_H(sve_cmphs_ppzz_h, uint16_t, >=)
3766 DO_CMP_PPZZ_S(sve_cmphs_ppzz_s, uint32_t, >=)
3767 DO_CMP_PPZZ_D(sve_cmphs_ppzz_d, uint64_t, >=)
3768
3769 #undef DO_CMP_PPZZ_B
3770 #undef DO_CMP_PPZZ_H
3771 #undef DO_CMP_PPZZ_S
3772 #undef DO_CMP_PPZZ_D
3773 #undef DO_CMP_PPZZ
3774
3775 /* Similar, but the second source is "wide". */
3776 #define DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H, MASK) \
3777 uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
3778 { \
3779 intptr_t opr_sz = simd_oprsz(desc); \
3780 uint32_t flags = PREDTEST_INIT; \
3781 intptr_t i = opr_sz; \
3782 do { \
3783 uint64_t out = 0, pg; \
3784 do { \
3785 TYPEW mm = *(TYPEW *)(vm + i - 8); \
3786 do { \
3787 i -= sizeof(TYPE), out <<= sizeof(TYPE); \
3788 TYPE nn = *(TYPE *)(vn + H(i)); \
3789 out |= nn OP mm; \
3790 } while (i & 7); \
3791 } while (i & 63); \
3792 pg = *(uint64_t *)(vg + (i >> 3)) & MASK; \
3793 out &= pg; \
3794 *(uint64_t *)(vd + (i >> 3)) = out; \
3795 flags = iter_predtest_bwd(out, pg, flags); \
3796 } while (i > 0); \
3797 return flags; \
3798 }
3799
3800 #define DO_CMP_PPZW_B(NAME, TYPE, TYPEW, OP) \
3801 DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1, 0xffffffffffffffffull)
3802 #define DO_CMP_PPZW_H(NAME, TYPE, TYPEW, OP) \
3803 DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_2, 0x5555555555555555ull)
3804 #define DO_CMP_PPZW_S(NAME, TYPE, TYPEW, OP) \
3805 DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_4, 0x1111111111111111ull)
3806
3807 DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, int8_t, uint64_t, ==)
3808 DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, int16_t, uint64_t, ==)
3809 DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, int32_t, uint64_t, ==)
3810
3811 DO_CMP_PPZW_B(sve_cmpne_ppzw_b, int8_t, uint64_t, !=)
3812 DO_CMP_PPZW_H(sve_cmpne_ppzw_h, int16_t, uint64_t, !=)
3813 DO_CMP_PPZW_S(sve_cmpne_ppzw_s, int32_t, uint64_t, !=)
3814
3815 DO_CMP_PPZW_B(sve_cmpgt_ppzw_b, int8_t, int64_t, >)
3816 DO_CMP_PPZW_H(sve_cmpgt_ppzw_h, int16_t, int64_t, >)
3817 DO_CMP_PPZW_S(sve_cmpgt_ppzw_s, int32_t, int64_t, >)
3818
3819 DO_CMP_PPZW_B(sve_cmpge_ppzw_b, int8_t, int64_t, >=)
3820 DO_CMP_PPZW_H(sve_cmpge_ppzw_h, int16_t, int64_t, >=)
3821 DO_CMP_PPZW_S(sve_cmpge_ppzw_s, int32_t, int64_t, >=)
3822
3823 DO_CMP_PPZW_B(sve_cmphi_ppzw_b, uint8_t, uint64_t, >)
3824 DO_CMP_PPZW_H(sve_cmphi_ppzw_h, uint16_t, uint64_t, >)
3825 DO_CMP_PPZW_S(sve_cmphi_ppzw_s, uint32_t, uint64_t, >)
3826
3827 DO_CMP_PPZW_B(sve_cmphs_ppzw_b, uint8_t, uint64_t, >=)
3828 DO_CMP_PPZW_H(sve_cmphs_ppzw_h, uint16_t, uint64_t, >=)
3829 DO_CMP_PPZW_S(sve_cmphs_ppzw_s, uint32_t, uint64_t, >=)
3830
3831 DO_CMP_PPZW_B(sve_cmplt_ppzw_b, int8_t, int64_t, <)
3832 DO_CMP_PPZW_H(sve_cmplt_ppzw_h, int16_t, int64_t, <)
3833 DO_CMP_PPZW_S(sve_cmplt_ppzw_s, int32_t, int64_t, <)
3834
3835 DO_CMP_PPZW_B(sve_cmple_ppzw_b, int8_t, int64_t, <=)
3836 DO_CMP_PPZW_H(sve_cmple_ppzw_h, int16_t, int64_t, <=)
3837 DO_CMP_PPZW_S(sve_cmple_ppzw_s, int32_t, int64_t, <=)
3838
3839 DO_CMP_PPZW_B(sve_cmplo_ppzw_b, uint8_t, uint64_t, <)
3840 DO_CMP_PPZW_H(sve_cmplo_ppzw_h, uint16_t, uint64_t, <)
3841 DO_CMP_PPZW_S(sve_cmplo_ppzw_s, uint32_t, uint64_t, <)
3842
3843 DO_CMP_PPZW_B(sve_cmpls_ppzw_b, uint8_t, uint64_t, <=)
3844 DO_CMP_PPZW_H(sve_cmpls_ppzw_h, uint16_t, uint64_t, <=)
3845 DO_CMP_PPZW_S(sve_cmpls_ppzw_s, uint32_t, uint64_t, <=)
3846
3847 #undef DO_CMP_PPZW_B
3848 #undef DO_CMP_PPZW_H
3849 #undef DO_CMP_PPZW_S
3850 #undef DO_CMP_PPZW
3851
3852 /* Similar, but the second source is immediate. */
3853 #define DO_CMP_PPZI(NAME, TYPE, OP, H, MASK) \
3854 uint32_t HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
3855 { \
3856 intptr_t opr_sz = simd_oprsz(desc); \
3857 uint32_t flags = PREDTEST_INIT; \
3858 TYPE mm = simd_data(desc); \
3859 intptr_t i = opr_sz; \
3860 do { \
3861 uint64_t out = 0, pg; \
3862 do { \
3863 i -= sizeof(TYPE), out <<= sizeof(TYPE); \
3864 TYPE nn = *(TYPE *)(vn + H(i)); \
3865 out |= nn OP mm; \
3866 } while (i & 63); \
3867 pg = *(uint64_t *)(vg + (i >> 3)) & MASK; \
3868 out &= pg; \
3869 *(uint64_t *)(vd + (i >> 3)) = out; \
3870 flags = iter_predtest_bwd(out, pg, flags); \
3871 } while (i > 0); \
3872 return flags; \
3873 }
3874
3875 #define DO_CMP_PPZI_B(NAME, TYPE, OP) \
3876 DO_CMP_PPZI(NAME, TYPE, OP, H1, 0xffffffffffffffffull)
3877 #define DO_CMP_PPZI_H(NAME, TYPE, OP) \
3878 DO_CMP_PPZI(NAME, TYPE, OP, H1_2, 0x5555555555555555ull)
3879 #define DO_CMP_PPZI_S(NAME, TYPE, OP) \
3880 DO_CMP_PPZI(NAME, TYPE, OP, H1_4, 0x1111111111111111ull)
3881 #define DO_CMP_PPZI_D(NAME, TYPE, OP) \
3882 DO_CMP_PPZI(NAME, TYPE, OP, , 0x0101010101010101ull)
3883
3884 DO_CMP_PPZI_B(sve_cmpeq_ppzi_b, uint8_t, ==)
3885 DO_CMP_PPZI_H(sve_cmpeq_ppzi_h, uint16_t, ==)
3886 DO_CMP_PPZI_S(sve_cmpeq_ppzi_s, uint32_t, ==)
3887 DO_CMP_PPZI_D(sve_cmpeq_ppzi_d, uint64_t, ==)
3888
3889 DO_CMP_PPZI_B(sve_cmpne_ppzi_b, uint8_t, !=)
3890 DO_CMP_PPZI_H(sve_cmpne_ppzi_h, uint16_t, !=)
3891 DO_CMP_PPZI_S(sve_cmpne_ppzi_s, uint32_t, !=)
3892 DO_CMP_PPZI_D(sve_cmpne_ppzi_d, uint64_t, !=)
3893
3894 DO_CMP_PPZI_B(sve_cmpgt_ppzi_b, int8_t, >)
3895 DO_CMP_PPZI_H(sve_cmpgt_ppzi_h, int16_t, >)
3896 DO_CMP_PPZI_S(sve_cmpgt_ppzi_s, int32_t, >)
3897 DO_CMP_PPZI_D(sve_cmpgt_ppzi_d, int64_t, >)
3898
3899 DO_CMP_PPZI_B(sve_cmpge_ppzi_b, int8_t, >=)
3900 DO_CMP_PPZI_H(sve_cmpge_ppzi_h, int16_t, >=)
3901 DO_CMP_PPZI_S(sve_cmpge_ppzi_s, int32_t, >=)
3902 DO_CMP_PPZI_D(sve_cmpge_ppzi_d, int64_t, >=)
3903
3904 DO_CMP_PPZI_B(sve_cmphi_ppzi_b, uint8_t, >)
3905 DO_CMP_PPZI_H(sve_cmphi_ppzi_h, uint16_t, >)
3906 DO_CMP_PPZI_S(sve_cmphi_ppzi_s, uint32_t, >)
3907 DO_CMP_PPZI_D(sve_cmphi_ppzi_d, uint64_t, >)
3908
3909 DO_CMP_PPZI_B(sve_cmphs_ppzi_b, uint8_t, >=)
3910 DO_CMP_PPZI_H(sve_cmphs_ppzi_h, uint16_t, >=)
3911 DO_CMP_PPZI_S(sve_cmphs_ppzi_s, uint32_t, >=)
3912 DO_CMP_PPZI_D(sve_cmphs_ppzi_d, uint64_t, >=)
3913
3914 DO_CMP_PPZI_B(sve_cmplt_ppzi_b, int8_t, <)
3915 DO_CMP_PPZI_H(sve_cmplt_ppzi_h, int16_t, <)
3916 DO_CMP_PPZI_S(sve_cmplt_ppzi_s, int32_t, <)
3917 DO_CMP_PPZI_D(sve_cmplt_ppzi_d, int64_t, <)
3918
3919 DO_CMP_PPZI_B(sve_cmple_ppzi_b, int8_t, <=)
3920 DO_CMP_PPZI_H(sve_cmple_ppzi_h, int16_t, <=)
3921 DO_CMP_PPZI_S(sve_cmple_ppzi_s, int32_t, <=)
3922 DO_CMP_PPZI_D(sve_cmple_ppzi_d, int64_t, <=)
3923
3924 DO_CMP_PPZI_B(sve_cmplo_ppzi_b, uint8_t, <)
3925 DO_CMP_PPZI_H(sve_cmplo_ppzi_h, uint16_t, <)
3926 DO_CMP_PPZI_S(sve_cmplo_ppzi_s, uint32_t, <)
3927 DO_CMP_PPZI_D(sve_cmplo_ppzi_d, uint64_t, <)
3928
3929 DO_CMP_PPZI_B(sve_cmpls_ppzi_b, uint8_t, <=)
3930 DO_CMP_PPZI_H(sve_cmpls_ppzi_h, uint16_t, <=)
3931 DO_CMP_PPZI_S(sve_cmpls_ppzi_s, uint32_t, <=)
3932 DO_CMP_PPZI_D(sve_cmpls_ppzi_d, uint64_t, <=)
3933
3934 #undef DO_CMP_PPZI_B
3935 #undef DO_CMP_PPZI_H
3936 #undef DO_CMP_PPZI_S
3937 #undef DO_CMP_PPZI_D
3938 #undef DO_CMP_PPZI
3939
3940 /* Similar to the ARM LastActive pseudocode function. */
3941 static bool last_active_pred(void *vd, void *vg, intptr_t oprsz)
3942 {
3943 intptr_t i;
3944
3945 for (i = QEMU_ALIGN_UP(oprsz, 8) - 8; i >= 0; i -= 8) {
3946 uint64_t pg = *(uint64_t *)(vg + i);
3947 if (pg) {
3948 return (pow2floor(pg) & *(uint64_t *)(vd + i)) != 0;
3949 }
3950 }
3951 return 0;
3952 }
3953
3954 /* Compute a mask into RETB that is true for all G, up to and including
3955 * (if after) or excluding (if !after) the first G & N.
3956 * Return true if BRK found.
3957 */
3958 static bool compute_brk(uint64_t *retb, uint64_t n, uint64_t g,
3959 bool brk, bool after)
3960 {
3961 uint64_t b;
3962
3963 if (brk) {
3964 b = 0;
3965 } else if ((g & n) == 0) {
3966 /* For all G, no N are set; break not found. */
3967 b = g;
3968 } else {
3969 /* Break somewhere in N. Locate it. */
3970 b = g & n; /* guard true, pred true */
3971 b = b & -b; /* first such */
3972 if (after) {
3973 b = b | (b - 1); /* break after same */
3974 } else {
3975 b = b - 1; /* break before same */
3976 }
3977 brk = true;
3978 }
3979
3980 *retb = b;
3981 return brk;
3982 }
3983
3984 /* Compute a zeroing BRK. */
3985 static void compute_brk_z(uint64_t *d, uint64_t *n, uint64_t *g,
3986 intptr_t oprsz, bool after)
3987 {
3988 bool brk = false;
3989 intptr_t i;
3990
3991 for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) {
3992 uint64_t this_b, this_g = g[i];
3993
3994 brk = compute_brk(&this_b, n[i], this_g, brk, after);
3995 d[i] = this_b & this_g;
3996 }
3997 }
3998
3999 /* Likewise, but also compute flags. */
4000 static uint32_t compute_brks_z(uint64_t *d, uint64_t *n, uint64_t *g,
4001 intptr_t oprsz, bool after)
4002 {
4003 uint32_t flags = PREDTEST_INIT;
4004 bool brk = false;
4005 intptr_t i;
4006
4007 for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) {
4008 uint64_t this_b, this_d, this_g = g[i];
4009
4010 brk = compute_brk(&this_b, n[i], this_g, brk, after);
4011 d[i] = this_d = this_b & this_g;
4012 flags = iter_predtest_fwd(this_d, this_g, flags);
4013 }
4014 return flags;
4015 }
4016
4017 /* Compute a merging BRK. */
4018 static void compute_brk_m(uint64_t *d, uint64_t *n, uint64_t *g,
4019 intptr_t oprsz, bool after)
4020 {
4021 bool brk = false;
4022 intptr_t i;
4023
4024 for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) {
4025 uint64_t this_b, this_g = g[i];
4026
4027 brk = compute_brk(&this_b, n[i], this_g, brk, after);
4028 d[i] = (this_b & this_g) | (d[i] & ~this_g);
4029 }
4030 }
4031
4032 /* Likewise, but also compute flags. */
4033 static uint32_t compute_brks_m(uint64_t *d, uint64_t *n, uint64_t *g,
4034 intptr_t oprsz, bool after)
4035 {
4036 uint32_t flags = PREDTEST_INIT;
4037 bool brk = false;
4038 intptr_t i;
4039
4040 for (i = 0; i < oprsz / 8; ++i) {
4041 uint64_t this_b, this_d = d[i], this_g = g[i];
4042
4043 brk = compute_brk(&this_b, n[i], this_g, brk, after);
4044 d[i] = this_d = (this_b & this_g) | (this_d & ~this_g);
4045 flags = iter_predtest_fwd(this_d, this_g, flags);
4046 }
4047 return flags;
4048 }
4049
4050 static uint32_t do_zero(ARMPredicateReg *d, intptr_t oprsz)
4051 {
4052 /* It is quicker to zero the whole predicate than loop on OPRSZ.
4053 * The compiler should turn this into 4 64-bit integer stores.
4054 */
4055 memset(d, 0, sizeof(ARMPredicateReg));
4056 return PREDTEST_INIT;
4057 }
4058
4059 void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg,
4060 uint32_t pred_desc)
4061 {
4062 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4063 if (last_active_pred(vn, vg, oprsz)) {
4064 compute_brk_z(vd, vm, vg, oprsz, true);
4065 } else {
4066 do_zero(vd, oprsz);
4067 }
4068 }
4069
4070 uint32_t HELPER(sve_brkpas)(void *vd, void *vn, void *vm, void *vg,
4071 uint32_t pred_desc)
4072 {
4073 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4074 if (last_active_pred(vn, vg, oprsz)) {
4075 return compute_brks_z(vd, vm, vg, oprsz, true);
4076 } else {
4077 return do_zero(vd, oprsz);
4078 }
4079 }
4080
4081 void HELPER(sve_brkpb)(void *vd, void *vn, void *vm, void *vg,
4082 uint32_t pred_desc)
4083 {
4084 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4085 if (last_active_pred(vn, vg, oprsz)) {
4086 compute_brk_z(vd, vm, vg, oprsz, false);
4087 } else {
4088 do_zero(vd, oprsz);
4089 }
4090 }
4091
4092 uint32_t HELPER(sve_brkpbs)(void *vd, void *vn, void *vm, void *vg,
4093 uint32_t pred_desc)
4094 {
4095 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4096 if (last_active_pred(vn, vg, oprsz)) {
4097 return compute_brks_z(vd, vm, vg, oprsz, false);
4098 } else {
4099 return do_zero(vd, oprsz);
4100 }
4101 }
4102
4103 void HELPER(sve_brka_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4104 {
4105 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4106 compute_brk_z(vd, vn, vg, oprsz, true);
4107 }
4108
4109 uint32_t HELPER(sve_brkas_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4110 {
4111 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4112 return compute_brks_z(vd, vn, vg, oprsz, true);
4113 }
4114
4115 void HELPER(sve_brkb_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4116 {
4117 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4118 compute_brk_z(vd, vn, vg, oprsz, false);
4119 }
4120
4121 uint32_t HELPER(sve_brkbs_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4122 {
4123 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4124 return compute_brks_z(vd, vn, vg, oprsz, false);
4125 }
4126
4127 void HELPER(sve_brka_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4128 {
4129 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4130 compute_brk_m(vd, vn, vg, oprsz, true);
4131 }
4132
4133 uint32_t HELPER(sve_brkas_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4134 {
4135 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4136 return compute_brks_m(vd, vn, vg, oprsz, true);
4137 }
4138
4139 void HELPER(sve_brkb_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4140 {
4141 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4142 compute_brk_m(vd, vn, vg, oprsz, false);
4143 }
4144
4145 uint32_t HELPER(sve_brkbs_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4146 {
4147 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4148 return compute_brks_m(vd, vn, vg, oprsz, false);
4149 }
4150
4151 void HELPER(sve_brkn)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4152 {
4153 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4154 if (!last_active_pred(vn, vg, oprsz)) {
4155 do_zero(vd, oprsz);
4156 }
4157 }
4158
4159 /* As if PredTest(Ones(PL), D, esz). */
4160 static uint32_t predtest_ones(ARMPredicateReg *d, intptr_t oprsz,
4161 uint64_t esz_mask)
4162 {
4163 uint32_t flags = PREDTEST_INIT;
4164 intptr_t i;
4165
4166 for (i = 0; i < oprsz / 8; i++) {
4167 flags = iter_predtest_fwd(d->p[i], esz_mask, flags);
4168 }
4169 if (oprsz & 7) {
4170 uint64_t mask = ~(-1ULL << (8 * (oprsz & 7)));
4171 flags = iter_predtest_fwd(d->p[i], esz_mask & mask, flags);
4172 }
4173 return flags;
4174 }
4175
4176 uint32_t HELPER(sve_brkns)(void *vd, void *vn, void *vg, uint32_t pred_desc)
4177 {
4178 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4179 if (last_active_pred(vn, vg, oprsz)) {
4180 return predtest_ones(vd, oprsz, -1);
4181 } else {
4182 return do_zero(vd, oprsz);
4183 }
4184 }
4185
4186 uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc)
4187 {
4188 intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
4189 intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
4190 uint64_t *n = vn, *g = vg, sum = 0, mask = pred_esz_masks[esz];
4191 intptr_t i;
4192
4193 for (i = 0; i < words; ++i) {
4194 uint64_t t = n[i] & g[i] & mask;
4195 sum += ctpop64(t);
4196 }
4197 return sum;
4198 }
4199
4200 uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc)
4201 {
4202 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4203 intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
4204 uint64_t esz_mask = pred_esz_masks[esz];
4205 ARMPredicateReg *d = vd;
4206 uint32_t flags;
4207 intptr_t i;
4208
4209 /* Begin with a zero predicate register. */
4210 flags = do_zero(d, oprsz);
4211 if (count == 0) {
4212 return flags;
4213 }
4214
4215 /* Set all of the requested bits. */
4216 for (i = 0; i < count / 64; ++i) {
4217 d->p[i] = esz_mask;
4218 }
4219 if (count & 63) {
4220 d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask;
4221 }
4222
4223 return predtest_ones(d, oprsz, esz_mask);
4224 }
4225
4226 uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
4227 {
4228 intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
4229 intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
4230 uint64_t esz_mask = pred_esz_masks[esz];
4231 ARMPredicateReg *d = vd;
4232 intptr_t i, invcount, oprbits;
4233 uint64_t bits;
4234
4235 if (count == 0) {
4236 return do_zero(d, oprsz);
4237 }
4238
4239 oprbits = oprsz * 8;
4240 tcg_debug_assert(count <= oprbits);
4241
4242 bits = esz_mask;
4243 if (oprbits & 63) {
4244 bits &= MAKE_64BIT_MASK(0, oprbits & 63);
4245 }
4246
4247 invcount = oprbits - count;
4248 for (i = (oprsz - 1) / 8; i > invcount / 64; --i) {
4249 d->p[i] = bits;
4250 bits = esz_mask;
4251 }
4252
4253 d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64);
4254
4255 while (--i >= 0) {
4256 d->p[i] = 0;
4257 }
4258
4259 return predtest_ones(d, oprsz, esz_mask);
4260 }
4261
4262 /* Recursive reduction on a function;
4263 * C.f. the ARM ARM function ReducePredicated.
4264 *
4265 * While it would be possible to write this without the DATA temporary,
4266 * it is much simpler to process the predicate register this way.
4267 * The recursion is bounded to depth 7 (128 fp16 elements), so there's
4268 * little to gain with a more complex non-recursive form.
4269 */
4270 #define DO_REDUCE(NAME, TYPE, H, FUNC, IDENT) \
4271 static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \
4272 { \
4273 if (n == 1) { \
4274 return *data; \
4275 } else { \
4276 uintptr_t half = n / 2; \
4277 TYPE lo = NAME##_reduce(data, status, half); \
4278 TYPE hi = NAME##_reduce(data + half, status, half); \
4279 return TYPE##_##FUNC(lo, hi, status); \
4280 } \
4281 } \
4282 uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \
4283 { \
4284 uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_data(desc); \
4285 TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \
4286 for (i = 0; i < oprsz; ) { \
4287 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
4288 do { \
4289 TYPE nn = *(TYPE *)(vn + H(i)); \
4290 *(TYPE *)((void *)data + i) = (pg & 1 ? nn : IDENT); \
4291 i += sizeof(TYPE), pg >>= sizeof(TYPE); \
4292 } while (i & 15); \
4293 } \
4294 for (; i < maxsz; i += sizeof(TYPE)) { \
4295 *(TYPE *)((void *)data + i) = IDENT; \
4296 } \
4297 return NAME##_reduce(data, vs, maxsz / sizeof(TYPE)); \
4298 }
4299
4300 DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero)
4301 DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero)
4302 DO_REDUCE(sve_faddv_d, float64, , add, float64_zero)
4303
4304 /* Identity is floatN_default_nan, without the function call. */
4305 DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00)
4306 DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000)
4307 DO_REDUCE(sve_fminnmv_d, float64, , minnum, 0x7FF8000000000000ULL)
4308
4309 DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00)
4310 DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000)
4311 DO_REDUCE(sve_fmaxnmv_d, float64, , maxnum, 0x7FF8000000000000ULL)
4312
4313 DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity)
4314 DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity)
4315 DO_REDUCE(sve_fminv_d, float64, , min, float64_infinity)
4316
4317 DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity))
4318 DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity))
4319 DO_REDUCE(sve_fmaxv_d, float64, , max, float64_chs(float64_infinity))
4320
4321 #undef DO_REDUCE
4322
4323 uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg,
4324 void *status, uint32_t desc)
4325 {
4326 intptr_t i = 0, opr_sz = simd_oprsz(desc);
4327 float16 result = nn;
4328
4329 do {
4330 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
4331 do {
4332 if (pg & 1) {
4333 float16 mm = *(float16 *)(vm + H1_2(i));
4334 result = float16_add(result, mm, status);
4335 }
4336 i += sizeof(float16), pg >>= sizeof(float16);
4337 } while (i & 15);
4338 } while (i < opr_sz);
4339
4340 return result;
4341 }
4342
4343 uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg,
4344 void *status, uint32_t desc)
4345 {
4346 intptr_t i = 0, opr_sz = simd_oprsz(desc);
4347 float32 result = nn;
4348
4349 do {
4350 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
4351 do {
4352 if (pg & 1) {
4353 float32 mm = *(float32 *)(vm + H1_2(i));
4354 result = float32_add(result, mm, status);
4355 }
4356 i += sizeof(float32), pg >>= sizeof(float32);
4357 } while (i & 15);
4358 } while (i < opr_sz);
4359
4360 return result;
4361 }
4362
4363 uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg,
4364 void *status, uint32_t desc)
4365 {
4366 intptr_t i = 0, opr_sz = simd_oprsz(desc) / 8;
4367 uint64_t *m = vm;
4368 uint8_t *pg = vg;
4369
4370 for (i = 0; i < opr_sz; i++) {
4371 if (pg[H1(i)] & 1) {
4372 nn = float64_add(nn, m[i], status);
4373 }
4374 }
4375
4376 return nn;
4377 }
4378
4379 /* Fully general three-operand expander, controlled by a predicate,
4380 * With the extra float_status parameter.
4381 */
4382 #define DO_ZPZZ_FP(NAME, TYPE, H, OP) \
4383 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
4384 void *status, uint32_t desc) \
4385 { \
4386 intptr_t i = simd_oprsz(desc); \
4387 uint64_t *g = vg; \
4388 do { \
4389 uint64_t pg = g[(i - 1) >> 6]; \
4390 do { \
4391 i -= sizeof(TYPE); \
4392 if (likely((pg >> (i & 63)) & 1)) { \
4393 TYPE nn = *(TYPE *)(vn + H(i)); \
4394 TYPE mm = *(TYPE *)(vm + H(i)); \
4395 *(TYPE *)(vd + H(i)) = OP(nn, mm, status); \
4396 } \
4397 } while (i & 63); \
4398 } while (i != 0); \
4399 }
4400
4401 DO_ZPZZ_FP(sve_fadd_h, uint16_t, H1_2, float16_add)
4402 DO_ZPZZ_FP(sve_fadd_s, uint32_t, H1_4, float32_add)
4403 DO_ZPZZ_FP(sve_fadd_d, uint64_t, , float64_add)
4404
4405 DO_ZPZZ_FP(sve_fsub_h, uint16_t, H1_2, float16_sub)
4406 DO_ZPZZ_FP(sve_fsub_s, uint32_t, H1_4, float32_sub)
4407 DO_ZPZZ_FP(sve_fsub_d, uint64_t, , float64_sub)
4408
4409 DO_ZPZZ_FP(sve_fmul_h, uint16_t, H1_2, float16_mul)
4410 DO_ZPZZ_FP(sve_fmul_s, uint32_t, H1_4, float32_mul)
4411 DO_ZPZZ_FP(sve_fmul_d, uint64_t, , float64_mul)
4412
4413 DO_ZPZZ_FP(sve_fdiv_h, uint16_t, H1_2, float16_div)
4414 DO_ZPZZ_FP(sve_fdiv_s, uint32_t, H1_4, float32_div)
4415 DO_ZPZZ_FP(sve_fdiv_d, uint64_t, , float64_div)
4416
4417 DO_ZPZZ_FP(sve_fmin_h, uint16_t, H1_2, float16_min)
4418 DO_ZPZZ_FP(sve_fmin_s, uint32_t, H1_4, float32_min)
4419 DO_ZPZZ_FP(sve_fmin_d, uint64_t, , float64_min)
4420
4421 DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max)
4422 DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max)
4423 DO_ZPZZ_FP(sve_fmax_d, uint64_t, , float64_max)
4424
4425 DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum)
4426 DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum)
4427 DO_ZPZZ_FP(sve_fminnum_d, uint64_t, , float64_minnum)
4428
4429 DO_ZPZZ_FP(sve_fmaxnum_h, uint16_t, H1_2, float16_maxnum)
4430 DO_ZPZZ_FP(sve_fmaxnum_s, uint32_t, H1_4, float32_maxnum)
4431 DO_ZPZZ_FP(sve_fmaxnum_d, uint64_t, , float64_maxnum)
4432
4433 static inline float16 abd_h(float16 a, float16 b, float_status *s)
4434 {
4435 return float16_abs(float16_sub(a, b, s));
4436 }
4437
4438 static inline float32 abd_s(float32 a, float32 b, float_status *s)
4439 {
4440 return float32_abs(float32_sub(a, b, s));
4441 }
4442
4443 static inline float64 abd_d(float64 a, float64 b, float_status *s)
4444 {
4445 return float64_abs(float64_sub(a, b, s));
4446 }
4447
4448 DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h)
4449 DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s)
4450 DO_ZPZZ_FP(sve_fabd_d, uint64_t, , abd_d)
4451
4452 static inline float64 scalbn_d(float64 a, int64_t b, float_status *s)
4453 {
4454 int b_int = MIN(MAX(b, INT_MIN), INT_MAX);
4455 return float64_scalbn(a, b_int, s);
4456 }
4457
4458 DO_ZPZZ_FP(sve_fscalbn_h, int16_t, H1_2, float16_scalbn)
4459 DO_ZPZZ_FP(sve_fscalbn_s, int32_t, H1_4, float32_scalbn)
4460 DO_ZPZZ_FP(sve_fscalbn_d, int64_t, , scalbn_d)
4461
4462 DO_ZPZZ_FP(sve_fmulx_h, uint16_t, H1_2, helper_advsimd_mulxh)
4463 DO_ZPZZ_FP(sve_fmulx_s, uint32_t, H1_4, helper_vfp_mulxs)
4464 DO_ZPZZ_FP(sve_fmulx_d, uint64_t, , helper_vfp_mulxd)
4465
4466 #undef DO_ZPZZ_FP
4467
4468 /* Three-operand expander, with one scalar operand, controlled by
4469 * a predicate, with the extra float_status parameter.
4470 */
4471 #define DO_ZPZS_FP(NAME, TYPE, H, OP) \
4472 void HELPER(NAME)(void *vd, void *vn, void *vg, uint64_t scalar, \
4473 void *status, uint32_t desc) \
4474 { \
4475 intptr_t i = simd_oprsz(desc); \
4476 uint64_t *g = vg; \
4477 TYPE mm = scalar; \
4478 do { \
4479 uint64_t pg = g[(i - 1) >> 6]; \
4480 do { \
4481 i -= sizeof(TYPE); \
4482 if (likely((pg >> (i & 63)) & 1)) { \
4483 TYPE nn = *(TYPE *)(vn + H(i)); \
4484 *(TYPE *)(vd + H(i)) = OP(nn, mm, status); \
4485 } \
4486 } while (i & 63); \
4487 } while (i != 0); \
4488 }
4489
4490 DO_ZPZS_FP(sve_fadds_h, float16, H1_2, float16_add)
4491 DO_ZPZS_FP(sve_fadds_s, float32, H1_4, float32_add)
4492 DO_ZPZS_FP(sve_fadds_d, float64, , float64_add)
4493
4494 DO_ZPZS_FP(sve_fsubs_h, float16, H1_2, float16_sub)
4495 DO_ZPZS_FP(sve_fsubs_s, float32, H1_4, float32_sub)
4496 DO_ZPZS_FP(sve_fsubs_d, float64, , float64_sub)
4497
4498 DO_ZPZS_FP(sve_fmuls_h, float16, H1_2, float16_mul)
4499 DO_ZPZS_FP(sve_fmuls_s, float32, H1_4, float32_mul)
4500 DO_ZPZS_FP(sve_fmuls_d, float64, , float64_mul)
4501
4502 static inline float16 subr_h(float16 a, float16 b, float_status *s)
4503 {
4504 return float16_sub(b, a, s);
4505 }
4506
4507 static inline float32 subr_s(float32 a, float32 b, float_status *s)
4508 {
4509 return float32_sub(b, a, s);
4510 }
4511
4512 static inline float64 subr_d(float64 a, float64 b, float_status *s)
4513 {
4514 return float64_sub(b, a, s);
4515 }
4516
4517 DO_ZPZS_FP(sve_fsubrs_h, float16, H1_2, subr_h)
4518 DO_ZPZS_FP(sve_fsubrs_s, float32, H1_4, subr_s)
4519 DO_ZPZS_FP(sve_fsubrs_d, float64, , subr_d)
4520
4521 DO_ZPZS_FP(sve_fmaxnms_h, float16, H1_2, float16_maxnum)
4522 DO_ZPZS_FP(sve_fmaxnms_s, float32, H1_4, float32_maxnum)
4523 DO_ZPZS_FP(sve_fmaxnms_d, float64, , float64_maxnum)
4524
4525 DO_ZPZS_FP(sve_fminnms_h, float16, H1_2, float16_minnum)
4526 DO_ZPZS_FP(sve_fminnms_s, float32, H1_4, float32_minnum)
4527 DO_ZPZS_FP(sve_fminnms_d, float64, , float64_minnum)
4528
4529 DO_ZPZS_FP(sve_fmaxs_h, float16, H1_2, float16_max)
4530 DO_ZPZS_FP(sve_fmaxs_s, float32, H1_4, float32_max)
4531 DO_ZPZS_FP(sve_fmaxs_d, float64, , float64_max)
4532
4533 DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min)
4534 DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min)
4535 DO_ZPZS_FP(sve_fmins_d, float64, , float64_min)
4536
4537 /* Fully general two-operand expander, controlled by a predicate,
4538 * With the extra float_status parameter.
4539 */
4540 #define DO_ZPZ_FP(NAME, TYPE, H, OP) \
4541 void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
4542 { \
4543 intptr_t i = simd_oprsz(desc); \
4544 uint64_t *g = vg; \
4545 do { \
4546 uint64_t pg = g[(i - 1) >> 6]; \
4547 do { \
4548 i -= sizeof(TYPE); \
4549 if (likely((pg >> (i & 63)) & 1)) { \
4550 TYPE nn = *(TYPE *)(vn + H(i)); \
4551 *(TYPE *)(vd + H(i)) = OP(nn, status); \
4552 } \
4553 } while (i & 63); \
4554 } while (i != 0); \
4555 }
4556
4557 /* SVE fp16 conversions always use IEEE mode. Like AdvSIMD, they ignore
4558 * FZ16. When converting from fp16, this affects flushing input denormals;
4559 * when converting to fp16, this affects flushing output denormals.
4560 */
4561 static inline float32 sve_f16_to_f32(float16 f, float_status *fpst)
4562 {
4563 bool save = get_flush_inputs_to_zero(fpst);
4564 float32 ret;
4565
4566 set_flush_inputs_to_zero(false, fpst);
4567 ret = float16_to_float32(f, true, fpst);
4568 set_flush_inputs_to_zero(save, fpst);
4569 return ret;
4570 }
4571
4572 static inline float64 sve_f16_to_f64(float16 f, float_status *fpst)
4573 {
4574 bool save = get_flush_inputs_to_zero(fpst);
4575 float64 ret;
4576
4577 set_flush_inputs_to_zero(false, fpst);
4578 ret = float16_to_float64(f, true, fpst);
4579 set_flush_inputs_to_zero(save, fpst);
4580 return ret;
4581 }
4582
4583 static inline float16 sve_f32_to_f16(float32 f, float_status *fpst)
4584 {
4585 bool save = get_flush_to_zero(fpst);
4586 float16 ret;
4587
4588 set_flush_to_zero(false, fpst);
4589 ret = float32_to_float16(f, true, fpst);
4590 set_flush_to_zero(save, fpst);
4591 return ret;
4592 }
4593
4594 static inline float16 sve_f64_to_f16(float64 f, float_status *fpst)
4595 {
4596 bool save = get_flush_to_zero(fpst);
4597 float16 ret;
4598
4599 set_flush_to_zero(false, fpst);
4600 ret = float64_to_float16(f, true, fpst);
4601 set_flush_to_zero(save, fpst);
4602 return ret;
4603 }
4604
4605 static inline int16_t vfp_float16_to_int16_rtz(float16 f, float_status *s)
4606 {
4607 if (float16_is_any_nan(f)) {
4608 float_raise(float_flag_invalid, s);
4609 return 0;
4610 }
4611 return float16_to_int16_round_to_zero(f, s);
4612 }
4613
4614 static inline int64_t vfp_float16_to_int64_rtz(float16 f, float_status *s)
4615 {
4616 if (float16_is_any_nan(f)) {
4617 float_raise(float_flag_invalid, s);
4618 return 0;
4619 }
4620 return float16_to_int64_round_to_zero(f, s);
4621 }
4622
4623 static inline int64_t vfp_float32_to_int64_rtz(float32 f, float_status *s)
4624 {
4625 if (float32_is_any_nan(f)) {
4626 float_raise(float_flag_invalid, s);
4627 return 0;
4628 }
4629 return float32_to_int64_round_to_zero(f, s);
4630 }
4631
4632 static inline int64_t vfp_float64_to_int64_rtz(float64 f, float_status *s)
4633 {
4634 if (float64_is_any_nan(f)) {
4635 float_raise(float_flag_invalid, s);
4636 return 0;
4637 }
4638 return float64_to_int64_round_to_zero(f, s);
4639 }
4640
4641 static inline uint16_t vfp_float16_to_uint16_rtz(float16 f, float_status *s)
4642 {
4643 if (float16_is_any_nan(f)) {
4644 float_raise(float_flag_invalid, s);
4645 return 0;
4646 }
4647 return float16_to_uint16_round_to_zero(f, s);
4648 }
4649
4650 static inline uint64_t vfp_float16_to_uint64_rtz(float16 f, float_status *s)
4651 {
4652 if (float16_is_any_nan(f)) {
4653 float_raise(float_flag_invalid, s);
4654 return 0;
4655 }
4656 return float16_to_uint64_round_to_zero(f, s);
4657 }
4658
4659 static inline uint64_t vfp_float32_to_uint64_rtz(float32 f, float_status *s)
4660 {
4661 if (float32_is_any_nan(f)) {
4662 float_raise(float_flag_invalid, s);
4663 return 0;
4664 }
4665 return float32_to_uint64_round_to_zero(f, s);
4666 }
4667
4668 static inline uint64_t vfp_float64_to_uint64_rtz(float64 f, float_status *s)
4669 {
4670 if (float64_is_any_nan(f)) {
4671 float_raise(float_flag_invalid, s);
4672 return 0;
4673 }
4674 return float64_to_uint64_round_to_zero(f, s);
4675 }
4676
4677 DO_ZPZ_FP(sve_fcvt_sh, uint32_t, H1_4, sve_f32_to_f16)
4678 DO_ZPZ_FP(sve_fcvt_hs, uint32_t, H1_4, sve_f16_to_f32)
4679 DO_ZPZ_FP(sve_fcvt_dh, uint64_t, , sve_f64_to_f16)
4680 DO_ZPZ_FP(sve_fcvt_hd, uint64_t, , sve_f16_to_f64)
4681 DO_ZPZ_FP(sve_fcvt_ds, uint64_t, , float64_to_float32)
4682 DO_ZPZ_FP(sve_fcvt_sd, uint64_t, , float32_to_float64)
4683
4684 DO_ZPZ_FP(sve_fcvtzs_hh, uint16_t, H1_2, vfp_float16_to_int16_rtz)
4685 DO_ZPZ_FP(sve_fcvtzs_hs, uint32_t, H1_4, helper_vfp_tosizh)
4686 DO_ZPZ_FP(sve_fcvtzs_ss, uint32_t, H1_4, helper_vfp_tosizs)
4687 DO_ZPZ_FP(sve_fcvtzs_hd, uint64_t, , vfp_float16_to_int64_rtz)
4688 DO_ZPZ_FP(sve_fcvtzs_sd, uint64_t, , vfp_float32_to_int64_rtz)
4689 DO_ZPZ_FP(sve_fcvtzs_ds, uint64_t, , helper_vfp_tosizd)
4690 DO_ZPZ_FP(sve_fcvtzs_dd, uint64_t, , vfp_float64_to_int64_rtz)
4691
4692 DO_ZPZ_FP(sve_fcvtzu_hh, uint16_t, H1_2, vfp_float16_to_uint16_rtz)
4693 DO_ZPZ_FP(sve_fcvtzu_hs, uint32_t, H1_4, helper_vfp_touizh)
4694 DO_ZPZ_FP(sve_fcvtzu_ss, uint32_t, H1_4, helper_vfp_touizs)
4695 DO_ZPZ_FP(sve_fcvtzu_hd, uint64_t, , vfp_float16_to_uint64_rtz)
4696 DO_ZPZ_FP(sve_fcvtzu_sd, uint64_t, , vfp_float32_to_uint64_rtz)
4697 DO_ZPZ_FP(sve_fcvtzu_ds, uint64_t, , helper_vfp_touizd)
4698 DO_ZPZ_FP(sve_fcvtzu_dd, uint64_t, , vfp_float64_to_uint64_rtz)
4699
4700 DO_ZPZ_FP(sve_frint_h, uint16_t, H1_2, helper_advsimd_rinth)
4701 DO_ZPZ_FP(sve_frint_s, uint32_t, H1_4, helper_rints)
4702 DO_ZPZ_FP(sve_frint_d, uint64_t, , helper_rintd)
4703
4704 DO_ZPZ_FP(sve_frintx_h, uint16_t, H1_2, float16_round_to_int)
4705 DO_ZPZ_FP(sve_frintx_s, uint32_t, H1_4, float32_round_to_int)
4706 DO_ZPZ_FP(sve_frintx_d, uint64_t, , float64_round_to_int)
4707
4708 DO_ZPZ_FP(sve_frecpx_h, uint16_t, H1_2, helper_frecpx_f16)
4709 DO_ZPZ_FP(sve_frecpx_s, uint32_t, H1_4, helper_frecpx_f32)
4710 DO_ZPZ_FP(sve_frecpx_d, uint64_t, , helper_frecpx_f64)
4711
4712 DO_ZPZ_FP(sve_fsqrt_h, uint16_t, H1_2, float16_sqrt)
4713 DO_ZPZ_FP(sve_fsqrt_s, uint32_t, H1_4, float32_sqrt)
4714 DO_ZPZ_FP(sve_fsqrt_d, uint64_t, , float64_sqrt)
4715
4716 DO_ZPZ_FP(sve_scvt_hh, uint16_t, H1_2, int16_to_float16)
4717 DO_ZPZ_FP(sve_scvt_sh, uint32_t, H1_4, int32_to_float16)
4718 DO_ZPZ_FP(sve_scvt_ss, uint32_t, H1_4, int32_to_float32)
4719 DO_ZPZ_FP(sve_scvt_sd, uint64_t, , int32_to_float64)
4720 DO_ZPZ_FP(sve_scvt_dh, uint64_t, , int64_to_float16)
4721 DO_ZPZ_FP(sve_scvt_ds, uint64_t, , int64_to_float32)
4722 DO_ZPZ_FP(sve_scvt_dd, uint64_t, , int64_to_float64)
4723
4724 DO_ZPZ_FP(sve_ucvt_hh, uint16_t, H1_2, uint16_to_float16)
4725 DO_ZPZ_FP(sve_ucvt_sh, uint32_t, H1_4, uint32_to_float16)
4726 DO_ZPZ_FP(sve_ucvt_ss, uint32_t, H1_4, uint32_to_float32)
4727 DO_ZPZ_FP(sve_ucvt_sd, uint64_t, , uint32_to_float64)
4728 DO_ZPZ_FP(sve_ucvt_dh, uint64_t, , uint64_to_float16)
4729 DO_ZPZ_FP(sve_ucvt_ds, uint64_t, , uint64_to_float32)
4730 DO_ZPZ_FP(sve_ucvt_dd, uint64_t, , uint64_to_float64)
4731
4732 #undef DO_ZPZ_FP
4733
4734 static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg,
4735 float_status *status, uint32_t desc,
4736 uint16_t neg1, uint16_t neg3)
4737 {
4738 intptr_t i = simd_oprsz(desc);
4739 uint64_t *g = vg;
4740
4741 do {
4742 uint64_t pg = g[(i - 1) >> 6];
4743 do {
4744 i -= 2;
4745 if (likely((pg >> (i & 63)) & 1)) {
4746 float16 e1, e2, e3, r;
4747
4748 e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1;
4749 e2 = *(uint16_t *)(vm + H1_2(i));
4750 e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3;
4751 r = float16_muladd(e1, e2, e3, 0, status);
4752 *(uint16_t *)(vd + H1_2(i)) = r;
4753 }
4754 } while (i & 63);
4755 } while (i != 0);
4756 }
4757
4758 void HELPER(sve_fmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
4759 void *vg, void *status, uint32_t desc)
4760 {
4761 do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0);
4762 }
4763
4764 void HELPER(sve_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
4765 void *vg, void *status, uint32_t desc)
4766 {
4767 do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0);
4768 }
4769
4770 void HELPER(sve_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
4771 void *vg, void *status, uint32_t desc)
4772 {
4773 do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000);
4774 }
4775
4776 void HELPER(sve_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
4777 void *vg, void *status, uint32_t desc)
4778 {
4779 do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000);
4780 }
4781
4782 static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg,
4783 float_status *status, uint32_t desc,
4784 uint32_t neg1, uint32_t neg3)
4785 {
4786 intptr_t i = simd_oprsz(desc);
4787 uint64_t *g = vg;
4788
4789 do {
4790 uint64_t pg = g[(i - 1) >> 6];
4791 do {
4792 i -= 4;
4793 if (likely((pg >> (i & 63)) & 1)) {
4794 float32 e1, e2, e3, r;
4795
4796 e1 = *(uint32_t *)(vn + H1_4(i)) ^ neg1;
4797 e2 = *(uint32_t *)(vm + H1_4(i));
4798 e3 = *(uint32_t *)(va + H1_4(i)) ^ neg3;
4799 r = float32_muladd(e1, e2, e3, 0, status);
4800 *(uint32_t *)(vd + H1_4(i)) = r;
4801 }
4802 } while (i & 63);
4803 } while (i != 0);
4804 }
4805
4806 void HELPER(sve_fmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
4807 void *vg, void *status, uint32_t desc)
4808 {
4809 do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0);
4810 }
4811
4812 void HELPER(sve_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
4813 void *vg, void *status, uint32_t desc)
4814 {
4815 do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0);
4816 }
4817
4818 void HELPER(sve_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
4819 void *vg, void *status, uint32_t desc)
4820 {
4821 do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000);
4822 }
4823
4824 void HELPER(sve_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
4825 void *vg, void *status, uint32_t desc)
4826 {
4827 do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000);
4828 }
4829
4830 static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg,
4831 float_status *status, uint32_t desc,
4832 uint64_t neg1, uint64_t neg3)
4833 {
4834 intptr_t i = simd_oprsz(desc);
4835 uint64_t *g = vg;
4836
4837 do {
4838 uint64_t pg = g[(i - 1) >> 6];
4839 do {
4840 i -= 8;
4841 if (likely((pg >> (i & 63)) & 1)) {
4842 float64 e1, e2, e3, r;
4843
4844 e1 = *(uint64_t *)(vn + i) ^ neg1;
4845 e2 = *(uint64_t *)(vm + i);
4846 e3 = *(uint64_t *)(va + i) ^ neg3;
4847 r = float64_muladd(e1, e2, e3, 0, status);
4848 *(uint64_t *)(vd + i) = r;
4849 }
4850 } while (i & 63);
4851 } while (i != 0);
4852 }
4853
4854 void HELPER(sve_fmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
4855 void *vg, void *status, uint32_t desc)
4856 {
4857 do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0);
4858 }
4859
4860 void HELPER(sve_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
4861 void *vg, void *status, uint32_t desc)
4862 {
4863 do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0);
4864 }
4865
4866 void HELPER(sve_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
4867 void *vg, void *status, uint32_t desc)
4868 {
4869 do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN);
4870 }
4871
4872 void HELPER(sve_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
4873 void *vg, void *status, uint32_t desc)
4874 {
4875 do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN);
4876 }
4877
4878 /* Two operand floating-point comparison controlled by a predicate.
4879 * Unlike the integer version, we are not allowed to optimistically
4880 * compare operands, since the comparison may have side effects wrt
4881 * the FPSR.
4882 */
4883 #define DO_FPCMP_PPZZ(NAME, TYPE, H, OP) \
4884 void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
4885 void *status, uint32_t desc) \
4886 { \
4887 intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \
4888 uint64_t *d = vd, *g = vg; \
4889 do { \
4890 uint64_t out = 0, pg = g[j]; \
4891 do { \
4892 i -= sizeof(TYPE), out <<= sizeof(TYPE); \
4893 if (likely((pg >> (i & 63)) & 1)) { \
4894 TYPE nn = *(TYPE *)(vn + H(i)); \
4895 TYPE mm = *(TYPE *)(vm + H(i)); \
4896 out |= OP(TYPE, nn, mm, status); \
4897 } \
4898 } while (i & 63); \
4899 d[j--] = out; \
4900 } while (i > 0); \
4901 }
4902
4903 #define DO_FPCMP_PPZZ_H(NAME, OP) \
4904 DO_FPCMP_PPZZ(NAME##_h, float16, H1_2, OP)
4905 #define DO_FPCMP_PPZZ_S(NAME, OP) \
4906 DO_FPCMP_PPZZ(NAME##_s, float32, H1_4, OP)
4907 #define DO_FPCMP_PPZZ_D(NAME, OP) \
4908 DO_FPCMP_PPZZ(NAME##_d, float64, , OP)
4909
4910 #define DO_FPCMP_PPZZ_ALL(NAME, OP) \
4911 DO_FPCMP_PPZZ_H(NAME, OP) \
4912 DO_FPCMP_PPZZ_S(NAME, OP) \
4913 DO_FPCMP_PPZZ_D(NAME, OP)
4914
4915 #define DO_FCMGE(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) <= 0
4916 #define DO_FCMGT(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) < 0
4917 #define DO_FCMLE(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) <= 0
4918 #define DO_FCMLT(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) < 0
4919 #define DO_FCMEQ(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) == 0
4920 #define DO_FCMNE(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) != 0
4921 #define DO_FCMUO(TYPE, X, Y, ST) \
4922 TYPE##_compare_quiet(X, Y, ST) == float_relation_unordered
4923 #define DO_FACGE(TYPE, X, Y, ST) \
4924 TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) <= 0
4925 #define DO_FACGT(TYPE, X, Y, ST) \
4926 TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) < 0
4927
4928 DO_FPCMP_PPZZ_ALL(sve_fcmge, DO_FCMGE)
4929 DO_FPCMP_PPZZ_ALL(sve_fcmgt, DO_FCMGT)
4930 DO_FPCMP_PPZZ_ALL(sve_fcmeq, DO_FCMEQ)
4931 DO_FPCMP_PPZZ_ALL(sve_fcmne, DO_FCMNE)
4932 DO_FPCMP_PPZZ_ALL(sve_fcmuo, DO_FCMUO)
4933 DO_FPCMP_PPZZ_ALL(sve_facge, DO_FACGE)
4934 DO_FPCMP_PPZZ_ALL(sve_facgt, DO_FACGT)
4935
4936 #undef DO_FPCMP_PPZZ_ALL
4937 #undef DO_FPCMP_PPZZ_D
4938 #undef DO_FPCMP_PPZZ_S
4939 #undef DO_FPCMP_PPZZ_H
4940 #undef DO_FPCMP_PPZZ
4941
4942 /* One operand floating-point comparison against zero, controlled
4943 * by a predicate.
4944 */
4945 #define DO_FPCMP_PPZ0(NAME, TYPE, H, OP) \
4946 void HELPER(NAME)(void *vd, void *vn, void *vg, \
4947 void *status, uint32_t desc) \
4948 { \
4949 intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \
4950 uint64_t *d = vd, *g = vg; \
4951 do { \
4952 uint64_t out = 0, pg = g[j]; \
4953 do { \
4954 i -= sizeof(TYPE), out <<= sizeof(TYPE); \
4955 if ((pg >> (i & 63)) & 1) { \
4956 TYPE nn = *(TYPE *)(vn + H(i)); \
4957 out |= OP(TYPE, nn, 0, status); \
4958 } \
4959 } while (i & 63); \
4960 d[j--] = out; \
4961 } while (i > 0); \
4962 }
4963
4964 #define DO_FPCMP_PPZ0_H(NAME, OP) \
4965 DO_FPCMP_PPZ0(NAME##_h, float16, H1_2, OP)
4966 #define DO_FPCMP_PPZ0_S(NAME, OP) \
4967 DO_FPCMP_PPZ0(NAME##_s, float32, H1_4, OP)
4968 #define DO_FPCMP_PPZ0_D(NAME, OP) \
4969 DO_FPCMP_PPZ0(NAME##_d, float64, , OP)
4970
4971 #define DO_FPCMP_PPZ0_ALL(NAME, OP) \
4972 DO_FPCMP_PPZ0_H(NAME, OP) \
4973 DO_FPCMP_PPZ0_S(NAME, OP) \
4974 DO_FPCMP_PPZ0_D(NAME, OP)
4975
4976 DO_FPCMP_PPZ0_ALL(sve_fcmge0, DO_FCMGE)
4977 DO_FPCMP_PPZ0_ALL(sve_fcmgt0, DO_FCMGT)
4978 DO_FPCMP_PPZ0_ALL(sve_fcmle0, DO_FCMLE)
4979 DO_FPCMP_PPZ0_ALL(sve_fcmlt0, DO_FCMLT)
4980 DO_FPCMP_PPZ0_ALL(sve_fcmeq0, DO_FCMEQ)
4981 DO_FPCMP_PPZ0_ALL(sve_fcmne0, DO_FCMNE)
4982
4983 /* FP Trig Multiply-Add. */
4984
4985 void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
4986 {
4987 static const float16 coeff[16] = {
4988 0x3c00, 0xb155, 0x2030, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
4989 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
4990 };
4991 intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16);
4992 intptr_t x = simd_data(desc);
4993 float16 *d = vd, *n = vn, *m = vm;
4994 for (i = 0; i < opr_sz; i++) {
4995 float16 mm = m[i];
4996 intptr_t xx = x;
4997 if (float16_is_neg(mm)) {
4998 mm = float16_abs(mm);
4999 xx += 8;
5000 }
5001 d[i] = float16_muladd(n[i], mm, coeff[xx], 0, vs);
5002 }
5003 }
5004
5005 void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
5006 {
5007 static const float32 coeff[16] = {
5008 0x3f800000, 0xbe2aaaab, 0x3c088886, 0xb95008b9,
5009 0x36369d6d, 0x00000000, 0x00000000, 0x00000000,
5010 0x3f800000, 0xbf000000, 0x3d2aaaa6, 0xbab60705,
5011 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000,
5012 };
5013 intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32);
5014 intptr_t x = simd_data(desc);
5015 float32 *d = vd, *n = vn, *m = vm;
5016 for (i = 0; i < opr_sz; i++) {
5017 float32 mm = m[i];
5018 intptr_t xx = x;
5019 if (float32_is_neg(mm)) {
5020 mm = float32_abs(mm);
5021 xx += 8;
5022 }
5023 d[i] = float32_muladd(n[i], mm, coeff[xx], 0, vs);
5024 }
5025 }
5026
5027 void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
5028 {
5029 static const float64 coeff[16] = {
5030 0x3ff0000000000000ull, 0xbfc5555555555543ull,
5031 0x3f8111111110f30cull, 0xbf2a01a019b92fc6ull,
5032 0x3ec71de351f3d22bull, 0xbe5ae5e2b60f7b91ull,
5033 0x3de5d8408868552full, 0x0000000000000000ull,
5034 0x3ff0000000000000ull, 0xbfe0000000000000ull,
5035 0x3fa5555555555536ull, 0xbf56c16c16c13a0bull,
5036 0x3efa01a019b1e8d8ull, 0xbe927e4f7282f468ull,
5037 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull,
5038 };
5039 intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64);
5040 intptr_t x = simd_data(desc);
5041 float64 *d = vd, *n = vn, *m = vm;
5042 for (i = 0; i < opr_sz; i++) {
5043 float64 mm = m[i];
5044 intptr_t xx = x;
5045 if (float64_is_neg(mm)) {
5046 mm = float64_abs(mm);
5047 xx += 8;
5048 }
5049 d[i] = float64_muladd(n[i], mm, coeff[xx], 0, vs);
5050 }
5051 }
5052
5053 /*
5054 * FP Complex Add
5055 */
5056
5057 void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg,
5058 void *vs, uint32_t desc)
5059 {
5060 intptr_t j, i = simd_oprsz(desc);
5061 uint64_t *g = vg;
5062 float16 neg_imag = float16_set_sign(0, simd_data(desc));
5063 float16 neg_real = float16_chs(neg_imag);
5064
5065 do {
5066 uint64_t pg = g[(i - 1) >> 6];
5067 do {
5068 float16 e0, e1, e2, e3;
5069
5070 /* I holds the real index; J holds the imag index. */
5071 j = i - sizeof(float16);
5072 i -= 2 * sizeof(float16);
5073
5074 e0 = *(float16 *)(vn + H1_2(i));
5075 e1 = *(float16 *)(vm + H1_2(j)) ^ neg_real;
5076 e2 = *(float16 *)(vn + H1_2(j));
5077 e3 = *(float16 *)(vm + H1_2(i)) ^ neg_imag;
5078
5079 if (likely((pg >> (i & 63)) & 1)) {
5080 *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, vs);
5081 }
5082 if (likely((pg >> (j & 63)) & 1)) {
5083 *(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, vs);
5084 }
5085 } while (i & 63);
5086 } while (i != 0);
5087 }
5088
5089 void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg,
5090 void *vs, uint32_t desc)
5091 {
5092 intptr_t j, i = simd_oprsz(desc);
5093 uint64_t *g = vg;
5094 float32 neg_imag = float32_set_sign(0, simd_data(desc));
5095 float32 neg_real = float32_chs(neg_imag);
5096
5097 do {
5098 uint64_t pg = g[(i - 1) >> 6];
5099 do {
5100 float32 e0, e1, e2, e3;
5101
5102 /* I holds the real index; J holds the imag index. */
5103 j = i - sizeof(float32);
5104 i -= 2 * sizeof(float32);
5105
5106 e0 = *(float32 *)(vn + H1_2(i));
5107 e1 = *(float32 *)(vm + H1_2(j)) ^ neg_real;
5108 e2 = *(float32 *)(vn + H1_2(j));
5109 e3 = *(float32 *)(vm + H1_2(i)) ^ neg_imag;
5110
5111 if (likely((pg >> (i & 63)) & 1)) {
5112 *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, vs);
5113 }
5114 if (likely((pg >> (j & 63)) & 1)) {
5115 *(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, vs);
5116 }
5117 } while (i & 63);
5118 } while (i != 0);
5119 }
5120
5121 void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg,
5122 void *vs, uint32_t desc)
5123 {
5124 intptr_t j, i = simd_oprsz(desc);
5125 uint64_t *g = vg;
5126 float64 neg_imag = float64_set_sign(0, simd_data(desc));
5127 float64 neg_real = float64_chs(neg_imag);
5128
5129 do {
5130 uint64_t pg = g[(i - 1) >> 6];
5131 do {
5132 float64 e0, e1, e2, e3;
5133
5134 /* I holds the real index; J holds the imag index. */
5135 j = i - sizeof(float64);
5136 i -= 2 * sizeof(float64);
5137
5138 e0 = *(float64 *)(vn + H1_2(i));
5139 e1 = *(float64 *)(vm + H1_2(j)) ^ neg_real;
5140 e2 = *(float64 *)(vn + H1_2(j));
5141 e3 = *(float64 *)(vm + H1_2(i)) ^ neg_imag;
5142
5143 if (likely((pg >> (i & 63)) & 1)) {
5144 *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, vs);
5145 }
5146 if (likely((pg >> (j & 63)) & 1)) {
5147 *(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, vs);
5148 }
5149 } while (i & 63);
5150 } while (i != 0);
5151 }
5152
5153 /*
5154 * FP Complex Multiply
5155 */
5156
5157 void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
5158 void *vg, void *status, uint32_t desc)
5159 {
5160 intptr_t j, i = simd_oprsz(desc);
5161 unsigned rot = simd_data(desc);
5162 bool flip = rot & 1;
5163 float16 neg_imag, neg_real;
5164 uint64_t *g = vg;
5165
5166 neg_imag = float16_set_sign(0, (rot & 2) != 0);
5167 neg_real = float16_set_sign(0, rot == 1 || rot == 2);
5168
5169 do {
5170 uint64_t pg = g[(i - 1) >> 6];
5171 do {
5172 float16 e1, e2, e3, e4, nr, ni, mr, mi, d;
5173
5174 /* I holds the real index; J holds the imag index. */
5175 j = i - sizeof(float16);
5176 i -= 2 * sizeof(float16);
5177
5178 nr = *(float16 *)(vn + H1_2(i));
5179 ni = *(float16 *)(vn + H1_2(j));
5180 mr = *(float16 *)(vm + H1_2(i));
5181 mi = *(float16 *)(vm + H1_2(j));
5182
5183 e2 = (flip ? ni : nr);
5184 e1 = (flip ? mi : mr) ^ neg_real;
5185 e4 = e2;
5186 e3 = (flip ? mr : mi) ^ neg_imag;
5187
5188 if (likely((pg >> (i & 63)) & 1)) {
5189 d = *(float16 *)(va + H1_2(i));
5190 d = float16_muladd(e2, e1, d, 0, status);
5191 *(float16 *)(vd + H1_2(i)) = d;
5192 }
5193 if (likely((pg >> (j & 63)) & 1)) {
5194 d = *(float16 *)(va + H1_2(j));
5195 d = float16_muladd(e4, e3, d, 0, status);
5196 *(float16 *)(vd + H1_2(j)) = d;
5197 }
5198 } while (i & 63);
5199 } while (i != 0);
5200 }
5201
5202 void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
5203 void *vg, void *status, uint32_t desc)
5204 {
5205 intptr_t j, i = simd_oprsz(desc);
5206 unsigned rot = simd_data(desc);
5207 bool flip = rot & 1;
5208 float32 neg_imag, neg_real;
5209 uint64_t *g = vg;
5210
5211 neg_imag = float32_set_sign(0, (rot & 2) != 0);
5212 neg_real = float32_set_sign(0, rot == 1 || rot == 2);
5213
5214 do {
5215 uint64_t pg = g[(i - 1) >> 6];
5216 do {
5217 float32 e1, e2, e3, e4, nr, ni, mr, mi, d;
5218
5219 /* I holds the real index; J holds the imag index. */
5220 j = i - sizeof(float32);
5221 i -= 2 * sizeof(float32);
5222
5223 nr = *(float32 *)(vn + H1_2(i));
5224 ni = *(float32 *)(vn + H1_2(j));
5225 mr = *(float32 *)(vm + H1_2(i));
5226 mi = *(float32 *)(vm + H1_2(j));
5227
5228 e2 = (flip ? ni : nr);
5229 e1 = (flip ? mi : mr) ^ neg_real;
5230 e4 = e2;
5231 e3 = (flip ? mr : mi) ^ neg_imag;
5232
5233 if (likely((pg >> (i & 63)) & 1)) {
5234 d = *(float32 *)(va + H1_2(i));
5235 d = float32_muladd(e2, e1, d, 0, status);
5236 *(float32 *)(vd + H1_2(i)) = d;
5237 }
5238 if (likely((pg >> (j & 63)) & 1)) {
5239 d = *(float32 *)(va + H1_2(j));
5240 d = float32_muladd(e4, e3, d, 0, status);
5241 *(float32 *)(vd + H1_2(j)) = d;
5242 }
5243 } while (i & 63);
5244 } while (i != 0);
5245 }
5246
5247 void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
5248 void *vg, void *status, uint32_t desc)
5249 {
5250 intptr_t j, i = simd_oprsz(desc);
5251 unsigned rot = simd_data(desc);
5252 bool flip = rot & 1;
5253 float64 neg_imag, neg_real;
5254 uint64_t *g = vg;
5255
5256 neg_imag = float64_set_sign(0, (rot & 2) != 0);
5257 neg_real = float64_set_sign(0, rot == 1 || rot == 2);
5258
5259 do {
5260 uint64_t pg = g[(i - 1) >> 6];
5261 do {
5262 float64 e1, e2, e3, e4, nr, ni, mr, mi, d;
5263
5264 /* I holds the real index; J holds the imag index. */
5265 j = i - sizeof(float64);
5266 i -= 2 * sizeof(float64);
5267
5268 nr = *(float64 *)(vn + H1_2(i));
5269 ni = *(float64 *)(vn + H1_2(j));
5270 mr = *(float64 *)(vm + H1_2(i));
5271 mi = *(float64 *)(vm + H1_2(j));
5272
5273 e2 = (flip ? ni : nr);
5274 e1 = (flip ? mi : mr) ^ neg_real;
5275 e4 = e2;
5276 e3 = (flip ? mr : mi) ^ neg_imag;
5277
5278 if (likely((pg >> (i & 63)) & 1)) {
5279 d = *(float64 *)(va + H1_2(i));
5280 d = float64_muladd(e2, e1, d, 0, status);
5281 *(float64 *)(vd + H1_2(i)) = d;
5282 }
5283 if (likely((pg >> (j & 63)) & 1)) {
5284 d = *(float64 *)(va + H1_2(j));
5285 d = float64_muladd(e4, e3, d, 0, status);
5286 *(float64 *)(vd + H1_2(j)) = d;
5287 }
5288 } while (i & 63);
5289 } while (i != 0);
5290 }
5291
5292 /*
5293 * Load contiguous data, protected by a governing predicate.
5294 */
5295
5296 /*
5297 * Load one element into @vd + @reg_off from @host.
5298 * The controlling predicate is known to be true.
5299 */
5300 typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host);
5301
5302 /*
5303 * Load one element into @vd + @reg_off from (@env, @vaddr, @ra).
5304 * The controlling predicate is known to be true.
5305 */
5306 typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
5307 target_ulong vaddr, uintptr_t retaddr);
5308
5309 /*
5310 * Generate the above primitives.
5311 */
5312
5313 #define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \
5314 static void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
5315 { \
5316 TYPEM val = HOST(host); \
5317 *(TYPEE *)(vd + H(reg_off)) = val; \
5318 }
5319
5320 #define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \
5321 static void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
5322 { HOST(host, (TYPEM)*(TYPEE *)(vd + H(reg_off))); }
5323
5324 #define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \
5325 static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
5326 target_ulong addr, uintptr_t ra) \
5327 { \
5328 *(TYPEE *)(vd + H(reg_off)) = \
5329 (TYPEM)TLB(env, useronly_clean_ptr(addr), ra); \
5330 }
5331
5332 #define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \
5333 static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
5334 target_ulong addr, uintptr_t ra) \
5335 { \
5336 TLB(env, useronly_clean_ptr(addr), \
5337 (TYPEM)*(TYPEE *)(vd + H(reg_off)), ra); \
5338 }
5339
5340 #define DO_LD_PRIM_1(NAME, H, TE, TM) \
5341 DO_LD_HOST(NAME, H, TE, TM, ldub_p) \
5342 DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra)
5343
5344 DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t)
5345 DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t)
5346 DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t)
5347 DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t)
5348 DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t)
5349 DO_LD_PRIM_1(ld1bdu, , uint64_t, uint8_t)
5350 DO_LD_PRIM_1(ld1bds, , uint64_t, int8_t)
5351
5352 #define DO_ST_PRIM_1(NAME, H, TE, TM) \
5353 DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \
5354 DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra)
5355
5356 DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t)
5357 DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t)
5358 DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t)
5359 DO_ST_PRIM_1(bd, , uint64_t, uint8_t)
5360
5361 #define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \
5362 DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \
5363 DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \
5364 DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \
5365 DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra)
5366
5367 #define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \
5368 DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \
5369 DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \
5370 DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \
5371 DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra)
5372
5373 DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw)
5374 DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw)
5375 DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw)
5376 DO_LD_PRIM_2(hdu, , uint64_t, uint16_t, lduw)
5377 DO_LD_PRIM_2(hds, , uint64_t, int16_t, lduw)
5378
5379 DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw)
5380 DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw)
5381 DO_ST_PRIM_2(hd, , uint64_t, uint16_t, stw)
5382
5383 DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl)
5384 DO_LD_PRIM_2(sdu, , uint64_t, uint32_t, ldl)
5385 DO_LD_PRIM_2(sds, , uint64_t, int32_t, ldl)
5386
5387 DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl)
5388 DO_ST_PRIM_2(sd, , uint64_t, uint32_t, stl)
5389
5390 DO_LD_PRIM_2(dd, , uint64_t, uint64_t, ldq)
5391 DO_ST_PRIM_2(dd, , uint64_t, uint64_t, stq)
5392
5393 #undef DO_LD_TLB
5394 #undef DO_ST_TLB
5395 #undef DO_LD_HOST
5396 #undef DO_LD_PRIM_1
5397 #undef DO_ST_PRIM_1
5398 #undef DO_LD_PRIM_2
5399 #undef DO_ST_PRIM_2
5400
5401 /*
5402 * Skip through a sequence of inactive elements in the guarding predicate @vg,
5403 * beginning at @reg_off bounded by @reg_max. Return the offset of the active
5404 * element >= @reg_off, or @reg_max if there were no active elements at all.
5405 */
5406 static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off,
5407 intptr_t reg_max, int esz)
5408 {
5409 uint64_t pg_mask = pred_esz_masks[esz];
5410 uint64_t pg = (vg[reg_off >> 6] & pg_mask) >> (reg_off & 63);
5411
5412 /* In normal usage, the first element is active. */
5413 if (likely(pg & 1)) {
5414 return reg_off;
5415 }
5416
5417 if (pg == 0) {
5418 reg_off &= -64;
5419 do {
5420 reg_off += 64;
5421 if (unlikely(reg_off >= reg_max)) {
5422 /* The entire predicate was false. */
5423 return reg_max;
5424 }
5425 pg = vg[reg_off >> 6] & pg_mask;
5426 } while (pg == 0);
5427 }
5428 reg_off += ctz64(pg);
5429
5430 /* We should never see an out of range predicate bit set. */
5431 tcg_debug_assert(reg_off < reg_max);
5432 return reg_off;
5433 }
5434
5435 /*
5436 * Resolve the guest virtual address to info->host and info->flags.
5437 * If @nofault, return false if the page is invalid, otherwise
5438 * exit via page fault exception.
5439 */
5440
5441 typedef struct {
5442 void *host;
5443 int flags;
5444 MemTxAttrs attrs;
5445 } SVEHostPage;
5446
5447 static bool sve_probe_page(SVEHostPage *info, bool nofault,
5448 CPUARMState *env, target_ulong addr,
5449 int mem_off, MMUAccessType access_type,
5450 int mmu_idx, uintptr_t retaddr)
5451 {
5452 int flags;
5453
5454 addr += mem_off;
5455
5456 /*
5457 * User-only currently always issues with TBI. See the comment
5458 * above useronly_clean_ptr. Usually we clean this top byte away
5459 * during translation, but we can't do that for e.g. vector + imm
5460 * addressing modes.
5461 *
5462 * We currently always enable TBI for user-only, and do not provide
5463 * a way to turn it off. So clean the pointer unconditionally here,
5464 * rather than look it up here, or pass it down from above.
5465 */
5466 addr = useronly_clean_ptr(addr);
5467
5468 flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
5469 &info->host, retaddr);
5470 info->flags = flags;
5471
5472 if (flags & TLB_INVALID_MASK) {
5473 g_assert(nofault);
5474 return false;
5475 }
5476
5477 /* Ensure that info->host[] is relative to addr, not addr + mem_off. */
5478 info->host -= mem_off;
5479
5480 #ifdef CONFIG_USER_ONLY
5481 memset(&info->attrs, 0, sizeof(info->attrs));
5482 #else
5483 /*
5484 * Find the iotlbentry for addr and return the transaction attributes.
5485 * This *must* be present in the TLB because we just found the mapping.
5486 */
5487 {
5488 uintptr_t index = tlb_index(env, mmu_idx, addr);
5489
5490 # ifdef CONFIG_DEBUG_TCG
5491 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
5492 target_ulong comparator = (access_type == MMU_DATA_LOAD
5493 ? entry->addr_read
5494 : tlb_addr_write(entry));
5495 g_assert(tlb_hit(comparator, addr));
5496 # endif
5497
5498 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
5499 info->attrs = iotlbentry->attrs;
5500 }
5501 #endif
5502
5503 return true;
5504 }
5505
5506
5507 /*
5508 * Analyse contiguous data, protected by a governing predicate.
5509 */
5510
5511 typedef enum {
5512 FAULT_NO,
5513 FAULT_FIRST,
5514 FAULT_ALL,
5515 } SVEContFault;
5516
5517 typedef struct {
5518 /*
5519 * First and last element wholly contained within the two pages.
5520 * mem_off_first[0] and reg_off_first[0] are always set >= 0.
5521 * reg_off_last[0] may be < 0 if the first element crosses pages.
5522 * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
5523 * are set >= 0 only if there are complete elements on a second page.
5524 *
5525 * The reg_off_* offsets are relative to the internal vector register.
5526 * The mem_off_first offset is relative to the memory address; the
5527 * two offsets are different when a load operation extends, a store
5528 * operation truncates, or for multi-register operations.
5529 */
5530 int16_t mem_off_first[2];
5531 int16_t reg_off_first[2];
5532 int16_t reg_off_last[2];
5533
5534 /*
5535 * One element that is misaligned and spans both pages,
5536 * or -1 if there is no such active element.
5537 */
5538 int16_t mem_off_split;
5539 int16_t reg_off_split;
5540
5541 /*
5542 * The byte offset at which the entire operation crosses a page boundary.
5543 * Set >= 0 if and only if the entire operation spans two pages.
5544 */
5545 int16_t page_split;
5546
5547 /* TLB data for the two pages. */
5548 SVEHostPage page[2];
5549 } SVEContLdSt;
5550
5551 /*
5552 * Find first active element on each page, and a loose bound for the
5553 * final element on each page. Identify any single element that spans
5554 * the page boundary. Return true if there are any active elements.
5555 */
5556 static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr,
5557 uint64_t *vg, intptr_t reg_max,
5558 int esz, int msize)
5559 {
5560 const int esize = 1 << esz;
5561 const uint64_t pg_mask = pred_esz_masks[esz];
5562 intptr_t reg_off_first = -1, reg_off_last = -1, reg_off_split;
5563 intptr_t mem_off_last, mem_off_split;
5564 intptr_t page_split, elt_split;
5565 intptr_t i;
5566
5567 /* Set all of the element indices to -1, and the TLB data to 0. */
5568 memset(info, -1, offsetof(SVEContLdSt, page));
5569 memset(info->page, 0, sizeof(info->page));
5570
5571 /* Gross scan over the entire predicate to find bounds. */
5572 i = 0;
5573 do {
5574 uint64_t pg = vg[i] & pg_mask;
5575 if (pg) {
5576 reg_off_last = i * 64 + 63 - clz64(pg);
5577 if (reg_off_first < 0) {
5578 reg_off_first = i * 64 + ctz64(pg);
5579 }
5580 }
5581 } while (++i * 64 < reg_max);
5582
5583 if (unlikely(reg_off_first < 0)) {
5584 /* No active elements, no pages touched. */
5585 return false;
5586 }
5587 tcg_debug_assert(reg_off_last >= 0 && reg_off_last < reg_max);
5588
5589 info->reg_off_first[0] = reg_off_first;
5590 info->mem_off_first[0] = (reg_off_first >> esz) * msize;
5591 mem_off_last = (reg_off_last >> esz) * msize;
5592
5593 page_split = -(addr | TARGET_PAGE_MASK);
5594 if (likely(mem_off_last + msize <= page_split)) {
5595 /* The entire operation fits within a single page. */
5596 info->reg_off_last[0] = reg_off_last;
5597 return true;
5598 }
5599
5600 info->page_split = page_split;
5601 elt_split = page_split / msize;
5602 reg_off_split = elt_split << esz;
5603 mem_off_split = elt_split * msize;
5604
5605 /*
5606 * This is the last full element on the first page, but it is not
5607 * necessarily active. If there is no full element, i.e. the first
5608 * active element is the one that's split, this value remains -1.
5609 * It is useful as iteration bounds.
5610 */
5611 if (elt_split != 0) {
5612 info->reg_off_last[0] = reg_off_split - esize;
5613 }
5614
5615 /* Determine if an unaligned element spans the pages. */
5616 if (page_split % msize != 0) {
5617 /* It is helpful to know if the split element is active. */
5618 if ((vg[reg_off_split >> 6] >> (reg_off_split & 63)) & 1) {
5619 info->reg_off_split = reg_off_split;
5620 info->mem_off_split = mem_off_split;
5621
5622 if (reg_off_split == reg_off_last) {
5623 /* The page crossing element is last. */
5624 return true;
5625 }
5626 }
5627 reg_off_split += esize;
5628 mem_off_split += msize;
5629 }
5630
5631 /*
5632 * We do want the first active element on the second page, because
5633 * this may affect the address reported in an exception.
5634 */
5635 reg_off_split = find_next_active(vg, reg_off_split, reg_max, esz);
5636 tcg_debug_assert(reg_off_split <= reg_off_last);
5637 info->reg_off_first[1] = reg_off_split;
5638 info->mem_off_first[1] = (reg_off_split >> esz) * msize;
5639 info->reg_off_last[1] = reg_off_last;
5640 return true;
5641 }
5642
5643 /*
5644 * Resolve the guest virtual addresses to info->page[].
5645 * Control the generation of page faults with @fault. Return false if
5646 * there is no work to do, which can only happen with @fault == FAULT_NO.
5647 */
5648 static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
5649 CPUARMState *env, target_ulong addr,
5650 MMUAccessType access_type, uintptr_t retaddr)
5651 {
5652 int mmu_idx = cpu_mmu_index(env, false);
5653 int mem_off = info->mem_off_first[0];
5654 bool nofault = fault == FAULT_NO;
5655 bool have_work = true;
5656
5657 if (!sve_probe_page(&info->page[0], nofault, env, addr, mem_off,
5658 access_type, mmu_idx, retaddr)) {
5659 /* No work to be done. */
5660 return false;
5661 }
5662
5663 if (likely(info->page_split < 0)) {
5664 /* The entire operation was on the one page. */
5665 return true;
5666 }
5667
5668 /*
5669 * If the second page is invalid, then we want the fault address to be
5670 * the first byte on that page which is accessed.
5671 */
5672 if (info->mem_off_split >= 0) {
5673 /*
5674 * There is an element split across the pages. The fault address
5675 * should be the first byte of the second page.
5676 */
5677 mem_off = info->page_split;
5678 /*
5679 * If the split element is also the first active element
5680 * of the vector, then: For first-fault we should continue
5681 * to generate faults for the second page. For no-fault,
5682 * we have work only if the second page is valid.
5683 */
5684 if (info->mem_off_first[0] < info->mem_off_split) {
5685 nofault = FAULT_FIRST;
5686 have_work = false;
5687 }
5688 } else {
5689 /*
5690 * There is no element split across the pages. The fault address
5691 * should be the first active element on the second page.
5692 */
5693 mem_off = info->mem_off_first[1];
5694 /*
5695 * There must have been one active element on the first page,
5696 * so we're out of first-fault territory.
5697 */
5698 nofault = fault != FAULT_ALL;
5699 }
5700
5701 have_work |= sve_probe_page(&info->page[1], nofault, env, addr, mem_off,
5702 access_type, mmu_idx, retaddr);
5703 return have_work;
5704 }
5705
5706 static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
5707 uint64_t *vg, target_ulong addr,
5708 int esize, int msize, int wp_access,
5709 uintptr_t retaddr)
5710 {
5711 #ifndef CONFIG_USER_ONLY
5712 intptr_t mem_off, reg_off, reg_last;
5713 int flags0 = info->page[0].flags;
5714 int flags1 = info->page[1].flags;
5715
5716 if (likely(!((flags0 | flags1) & TLB_WATCHPOINT))) {
5717 return;
5718 }
5719
5720 /* Indicate that watchpoints are handled. */
5721 info->page[0].flags = flags0 & ~TLB_WATCHPOINT;
5722 info->page[1].flags = flags1 & ~TLB_WATCHPOINT;
5723
5724 if (flags0 & TLB_WATCHPOINT) {
5725 mem_off = info->mem_off_first[0];
5726 reg_off = info->reg_off_first[0];
5727 reg_last = info->reg_off_last[0];
5728
5729 while (reg_off <= reg_last) {
5730 uint64_t pg = vg[reg_off >> 6];
5731 do {
5732 if ((pg >> (reg_off & 63)) & 1) {
5733 cpu_check_watchpoint(env_cpu(env), addr + mem_off,
5734 msize, info->page[0].attrs,
5735 wp_access, retaddr);
5736 }
5737 reg_off += esize;
5738 mem_off += msize;
5739 } while (reg_off <= reg_last && (reg_off & 63));
5740 }
5741 }
5742
5743 mem_off = info->mem_off_split;
5744 if (mem_off >= 0) {
5745 cpu_check_watchpoint(env_cpu(env), addr + mem_off, msize,
5746 info->page[0].attrs, wp_access, retaddr);
5747 }
5748
5749 mem_off = info->mem_off_first[1];
5750 if ((flags1 & TLB_WATCHPOINT) && mem_off >= 0) {
5751 reg_off = info->reg_off_first[1];
5752 reg_last = info->reg_off_last[1];
5753
5754 do {
5755 uint64_t pg = vg[reg_off >> 6];
5756 do {
5757 if ((pg >> (reg_off & 63)) & 1) {
5758 cpu_check_watchpoint(env_cpu(env), addr + mem_off,
5759 msize, info->page[1].attrs,
5760 wp_access, retaddr);
5761 }
5762 reg_off += esize;
5763 mem_off += msize;
5764 } while (reg_off & 63);
5765 } while (reg_off <= reg_last);
5766 }
5767 #endif
5768 }
5769
5770 static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
5771 uint64_t *vg, target_ulong addr, int esize,
5772 int msize, uint32_t mtedesc, uintptr_t ra)
5773 {
5774 intptr_t mem_off, reg_off, reg_last;
5775
5776 /* Process the page only if MemAttr == Tagged. */
5777 if (arm_tlb_mte_tagged(&info->page[0].attrs)) {
5778 mem_off = info->mem_off_first[0];
5779 reg_off = info->reg_off_first[0];
5780 reg_last = info->reg_off_split;
5781 if (reg_last < 0) {
5782 reg_last = info->reg_off_last[0];
5783 }
5784
5785 do {
5786 uint64_t pg = vg[reg_off >> 6];
5787 do {
5788 if ((pg >> (reg_off & 63)) & 1) {
5789 mte_check(env, mtedesc, addr, ra);
5790 }
5791 reg_off += esize;
5792 mem_off += msize;
5793 } while (reg_off <= reg_last && (reg_off & 63));
5794 } while (reg_off <= reg_last);
5795 }
5796
5797 mem_off = info->mem_off_first[1];
5798 if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) {
5799 reg_off = info->reg_off_first[1];
5800 reg_last = info->reg_off_last[1];
5801
5802 do {
5803 uint64_t pg = vg[reg_off >> 6];
5804 do {
5805 if ((pg >> (reg_off & 63)) & 1) {
5806 mte_check(env, mtedesc, addr, ra);
5807 }
5808 reg_off += esize;
5809 mem_off += msize;
5810 } while (reg_off & 63);
5811 } while (reg_off <= reg_last);
5812 }
5813 }
5814
5815 /*
5816 * Common helper for all contiguous 1,2,3,4-register predicated stores.
5817 */
5818 static inline QEMU_ALWAYS_INLINE
5819 void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
5820 uint32_t desc, const uintptr_t retaddr,
5821 const int esz, const int msz, const int N, uint32_t mtedesc,
5822 sve_ldst1_host_fn *host_fn,
5823 sve_ldst1_tlb_fn *tlb_fn)
5824 {
5825 const unsigned rd = simd_data(desc);
5826 const intptr_t reg_max = simd_oprsz(desc);
5827 intptr_t reg_off, reg_last, mem_off;
5828 SVEContLdSt info;
5829 void *host;
5830 int flags, i;
5831
5832 /* Find the active elements. */
5833 if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, N << msz)) {
5834 /* The entire predicate was false; no load occurs. */
5835 for (i = 0; i < N; ++i) {
5836 memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
5837 }
5838 return;
5839 }
5840
5841 /* Probe the page(s). Exit with exception for any invalid page. */
5842 sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, retaddr);
5843
5844 /* Handle watchpoints for all active elements. */
5845 sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, N << msz,
5846 BP_MEM_READ, retaddr);
5847
5848 /*
5849 * Handle mte checks for all active elements.
5850 * Since TBI must be set for MTE, !mtedesc => !mte_active.
5851 */
5852 if (mtedesc) {
5853 sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
5854 mtedesc, retaddr);
5855 }
5856
5857 flags = info.page[0].flags | info.page[1].flags;
5858 if (unlikely(flags != 0)) {
5859 #ifdef CONFIG_USER_ONLY
5860 g_assert_not_reached();
5861 #else
5862 /*
5863 * At least one page includes MMIO.
5864 * Any bus operation can fail with cpu_transaction_failed,
5865 * which for ARM will raise SyncExternal. Perform the load
5866 * into scratch memory to preserve register state until the end.
5867 */
5868 ARMVectorReg scratch[4] = { };
5869
5870 mem_off = info.mem_off_first[0];
5871 reg_off = info.reg_off_first[0];
5872 reg_last = info.reg_off_last[1];
5873 if (reg_last < 0) {
5874 reg_last = info.reg_off_split;
5875 if (reg_last < 0) {
5876 reg_last = info.reg_off_last[0];
5877 }
5878 }
5879
5880 do {
5881 uint64_t pg = vg[reg_off >> 6];
5882 do {
5883 if ((pg >> (reg_off & 63)) & 1) {
5884 for (i = 0; i < N; ++i) {
5885 tlb_fn(env, &scratch[i], reg_off,
5886 addr + mem_off + (i << msz), retaddr);
5887 }
5888 }
5889 reg_off += 1 << esz;
5890 mem_off += N << msz;
5891 } while (reg_off & 63);
5892 } while (reg_off <= reg_last);
5893
5894 for (i = 0; i < N; ++i) {
5895 memcpy(&env->vfp.zregs[(rd + i) & 31], &scratch[i], reg_max);
5896 }
5897 return;
5898 #endif
5899 }
5900
5901 /* The entire operation is in RAM, on valid pages. */
5902
5903 for (i = 0; i < N; ++i) {
5904 memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
5905 }
5906
5907 mem_off = info.mem_off_first[0];
5908 reg_off = info.reg_off_first[0];
5909 reg_last = info.reg_off_last[0];
5910 host = info.page[0].host;
5911
5912 while (reg_off <= reg_last) {
5913 uint64_t pg = vg[reg_off >> 6];
5914 do {
5915 if ((pg >> (reg_off & 63)) & 1) {
5916 for (i = 0; i < N; ++i) {
5917 host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
5918 host + mem_off + (i << msz));
5919 }
5920 }
5921 reg_off += 1 << esz;
5922 mem_off += N << msz;
5923 } while (reg_off <= reg_last && (reg_off & 63));
5924 }
5925
5926 /*
5927 * Use the slow path to manage the cross-page misalignment.
5928 * But we know this is RAM and cannot trap.
5929 */
5930 mem_off = info.mem_off_split;
5931 if (unlikely(mem_off >= 0)) {
5932 reg_off = info.reg_off_split;
5933 for (i = 0; i < N; ++i) {
5934 tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
5935 addr + mem_off + (i << msz), retaddr);
5936 }
5937 }
5938
5939 mem_off = info.mem_off_first[1];
5940 if (unlikely(mem_off >= 0)) {
5941 reg_off = info.reg_off_first[1];
5942 reg_last = info.reg_off_last[1];
5943 host = info.page[1].host;
5944
5945 do {
5946 uint64_t pg = vg[reg_off >> 6];
5947 do {
5948 if ((pg >> (reg_off & 63)) & 1) {
5949 for (i = 0; i < N; ++i) {
5950 host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
5951 host + mem_off + (i << msz));
5952 }
5953 }
5954 reg_off += 1 << esz;
5955 mem_off += N << msz;
5956 } while (reg_off & 63);
5957 } while (reg_off <= reg_last);
5958 }
5959 }
5960
5961 static inline QEMU_ALWAYS_INLINE
5962 void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
5963 uint32_t desc, const uintptr_t ra,
5964 const int esz, const int msz, const int N,
5965 sve_ldst1_host_fn *host_fn,
5966 sve_ldst1_tlb_fn *tlb_fn)
5967 {
5968 uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
5969 int bit55 = extract64(addr, 55, 1);
5970
5971 /* Remove mtedesc from the normal sve descriptor. */
5972 desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
5973
5974 /* Perform gross MTE suppression early. */
5975 if (!tbi_check(desc, bit55) ||
5976 tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
5977 mtedesc = 0;
5978 }
5979
5980 sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
5981 }
5982
5983 #define DO_LD1_1(NAME, ESZ) \
5984 void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
5985 target_ulong addr, uint32_t desc) \
5986 { \
5987 sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \
5988 sve_##NAME##_host, sve_##NAME##_tlb); \
5989 } \
5990 void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \
5991 target_ulong addr, uint32_t desc) \
5992 { \
5993 sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, \
5994 sve_##NAME##_host, sve_##NAME##_tlb); \
5995 }
5996
5997 #define DO_LD1_2(NAME, ESZ, MSZ) \
5998 void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
5999 target_ulong addr, uint32_t desc) \
6000 { \
6001 sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
6002 sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
6003 } \
6004 void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
6005 target_ulong addr, uint32_t desc) \
6006 { \
6007 sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
6008 sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
6009 } \
6010 void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
6011 target_ulong addr, uint32_t desc) \
6012 { \
6013 sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
6014 sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
6015 } \
6016 void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
6017 target_ulong addr, uint32_t desc) \
6018 { \
6019 sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
6020 sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
6021 }
6022
6023 DO_LD1_1(ld1bb, MO_8)
6024 DO_LD1_1(ld1bhu, MO_16)
6025 DO_LD1_1(ld1bhs, MO_16)
6026 DO_LD1_1(ld1bsu, MO_32)
6027 DO_LD1_1(ld1bss, MO_32)
6028 DO_LD1_1(ld1bdu, MO_64)
6029 DO_LD1_1(ld1bds, MO_64)
6030
6031 DO_LD1_2(ld1hh, MO_16, MO_16)
6032 DO_LD1_2(ld1hsu, MO_32, MO_16)
6033 DO_LD1_2(ld1hss, MO_32, MO_16)
6034 DO_LD1_2(ld1hdu, MO_64, MO_16)
6035 DO_LD1_2(ld1hds, MO_64, MO_16)
6036
6037 DO_LD1_2(ld1ss, MO_32, MO_32)
6038 DO_LD1_2(ld1sdu, MO_64, MO_32)
6039 DO_LD1_2(ld1sds, MO_64, MO_32)
6040
6041 DO_LD1_2(ld1dd, MO_64, MO_64)
6042
6043 #undef DO_LD1_1
6044 #undef DO_LD1_2
6045
6046 #define DO_LDN_1(N) \
6047 void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
6048 target_ulong addr, uint32_t desc) \
6049 { \
6050 sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \
6051 sve_ld1bb_host, sve_ld1bb_tlb); \
6052 } \
6053 void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \
6054 target_ulong addr, uint32_t desc) \
6055 { \
6056 sve_ldN_r_mte(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, \
6057 sve_ld1bb_host, sve_ld1bb_tlb); \
6058 }
6059
6060 #define DO_LDN_2(N, SUFF, ESZ) \
6061 void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
6062 target_ulong addr, uint32_t desc) \
6063 { \
6064 sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
6065 sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
6066 } \
6067 void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
6068 target_ulong addr, uint32_t desc) \
6069 { \
6070 sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
6071 sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
6072 } \
6073 void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \
6074 target_ulong addr, uint32_t desc) \
6075 { \
6076 sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
6077 sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
6078 } \
6079 void HELPER(sve_ld##N##SUFF##_be_r_mte)(CPUARMState *env, void *vg, \
6080 target_ulong addr, uint32_t desc) \
6081 { \
6082 sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
6083 sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
6084 }
6085
6086 DO_LDN_1(2)
6087 DO_LDN_1(3)
6088 DO_LDN_1(4)
6089
6090 DO_LDN_2(2, hh, MO_16)
6091 DO_LDN_2(3, hh, MO_16)
6092 DO_LDN_2(4, hh, MO_16)
6093
6094 DO_LDN_2(2, ss, MO_32)
6095 DO_LDN_2(3, ss, MO_32)
6096 DO_LDN_2(4, ss, MO_32)
6097
6098 DO_LDN_2(2, dd, MO_64)
6099 DO_LDN_2(3, dd, MO_64)
6100 DO_LDN_2(4, dd, MO_64)
6101
6102 #undef DO_LDN_1
6103 #undef DO_LDN_2
6104
6105 /*
6106 * Load contiguous data, first-fault and no-fault.
6107 *
6108 * For user-only, one could argue that we should hold the mmap_lock during
6109 * the operation so that there is no race between page_check_range and the
6110 * load operation. However, unmapping pages out from under a running thread
6111 * is extraordinarily unlikely. This theoretical race condition also affects
6112 * linux-user/ in its get_user/put_user macros.
6113 *
6114 * TODO: Construct some helpers, written in assembly, that interact with
6115 * handle_cpu_signal to produce memory ops which can properly report errors
6116 * without racing.
6117 */
6118
6119 /* Fault on byte I. All bits in FFR from I are cleared. The vector
6120 * result from I is CONSTRAINED UNPREDICTABLE; we choose the MERGE
6121 * option, which leaves subsequent data unchanged.
6122 */
6123 static void record_fault(CPUARMState *env, uintptr_t i, uintptr_t oprsz)
6124 {
6125 uint64_t *ffr = env->vfp.pregs[FFR_PRED_NUM].p;
6126
6127 if (i & 63) {
6128 ffr[i / 64] &= MAKE_64BIT_MASK(0, i & 63);
6129 i = ROUND_UP(i, 64);
6130 }
6131 for (; i < oprsz; i += 64) {
6132 ffr[i / 64] = 0;
6133 }
6134 }
6135
6136 /*
6137 * Common helper for all contiguous no-fault and first-fault loads.
6138 */
6139 static inline QEMU_ALWAYS_INLINE
6140 void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
6141 uint32_t desc, const uintptr_t retaddr, uint32_t mtedesc,
6142 const int esz, const int msz, const SVEContFault fault,
6143 sve_ldst1_host_fn *host_fn,
6144 sve_ldst1_tlb_fn *tlb_fn)
6145 {
6146 const unsigned rd = simd_data(desc);
6147 void *vd = &env->vfp.zregs[rd];
6148 const intptr_t reg_max = simd_oprsz(desc);
6149 intptr_t reg_off, mem_off, reg_last;
6150 SVEContLdSt info;
6151 int flags;
6152 void *host;
6153
6154 /* Find the active elements. */
6155 if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, 1 << msz)) {
6156 /* The entire predicate was false; no load occurs. */
6157 memset(vd, 0, reg_max);
6158 return;
6159 }
6160 reg_off = info.reg_off_first[0];
6161
6162 /* Probe the page(s). */
6163 if (!sve_cont_ldst_pages(&info, fault, env, addr, MMU_DATA_LOAD, retaddr)) {
6164 /* Fault on first element. */
6165 tcg_debug_assert(fault == FAULT_NO);
6166 memset(vd, 0, reg_max);
6167 goto do_fault;
6168 }
6169
6170 mem_off = info.mem_off_first[0];
6171 flags = info.page[0].flags;
6172
6173 /*
6174 * Disable MTE checking if the Tagged bit is not set. Since TBI must
6175 * be set within MTEDESC for MTE, !mtedesc => !mte_active.
6176 */
6177 if (arm_tlb_mte_tagged(&info.page[0].attrs)) {
6178 mtedesc = 0;
6179 }
6180
6181 if (fault == FAULT_FIRST) {
6182 /* Trapping mte check for the first-fault element. */
6183 if (mtedesc) {
6184 mte_check(env, mtedesc, addr + mem_off, retaddr);
6185 }
6186
6187 /*
6188 * Special handling of the first active element,
6189 * if it crosses a page boundary or is MMIO.
6190 */
6191 bool is_split = mem_off == info.mem_off_split;
6192 if (unlikely(flags != 0) || unlikely(is_split)) {
6193 /*
6194 * Use the slow path for cross-page handling.
6195 * Might trap for MMIO or watchpoints.
6196 */
6197 tlb_fn(env, vd, reg_off, addr + mem_off, retaddr);
6198
6199 /* After any fault, zero the other elements. */
6200 swap_memzero(vd, reg_off);
6201 reg_off += 1 << esz;
6202 mem_off += 1 << msz;
6203 swap_memzero(vd + reg_off, reg_max - reg_off);
6204
6205 if (is_split) {
6206 goto second_page;
6207 }
6208 } else {
6209 memset(vd, 0, reg_max);
6210 }
6211 } else {
6212 memset(vd, 0, reg_max);
6213 if (unlikely(mem_off == info.mem_off_split)) {
6214 /* The first active element crosses a page boundary. */
6215 flags |= info.page[1].flags;
6216 if (unlikely(flags & TLB_MMIO)) {
6217 /* Some page is MMIO, see below. */
6218 goto do_fault;
6219 }
6220 if (unlikely(flags & TLB_WATCHPOINT) &&
6221 (cpu_watchpoint_address_matches
6222 (env_cpu(env), addr + mem_off, 1 << msz)
6223 & BP_MEM_READ)) {
6224 /* Watchpoint hit, see below. */
6225 goto do_fault;
6226 }
6227 if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
6228 goto do_fault;
6229 }
6230 /*
6231 * Use the slow path for cross-page handling.
6232 * This is RAM, without a watchpoint, and will not trap.
6233 */
6234 tlb_fn(env, vd, reg_off, addr + mem_off, retaddr);
6235 goto second_page;
6236 }
6237 }
6238
6239 /*
6240 * From this point on, all memory operations are MemSingleNF.
6241 *
6242 * Per the MemSingleNF pseudocode, a no-fault load from Device memory
6243 * must not actually hit the bus -- it returns (UNKNOWN, FAULT) instead.
6244 *
6245 * Unfortuately we do not have access to the memory attributes from the
6246 * PTE to tell Device memory from Normal memory. So we make a mostly
6247 * correct check, and indicate (UNKNOWN, FAULT) for any MMIO.
6248 * This gives the right answer for the common cases of "Normal memory,
6249 * backed by host RAM" and "Device memory, backed by MMIO".
6250 * The architecture allows us to suppress an NF load and return
6251 * (UNKNOWN, FAULT) for any reason, so our behaviour for the corner
6252 * case of "Normal memory, backed by MMIO" is permitted. The case we
6253 * get wrong is "Device memory, backed by host RAM", for which we
6254 * should return (UNKNOWN, FAULT) for but do not.
6255 *
6256 * Similarly, CPU_BP breakpoints would raise exceptions, and so
6257 * return (UNKNOWN, FAULT). For simplicity, we consider gdb and
6258 * architectural breakpoints the same.
6259 */
6260 if (unlikely(flags & TLB_MMIO)) {
6261 goto do_fault;
6262 }
6263
6264 reg_last = info.reg_off_last[0];
6265 host = info.page[0].host;
6266
6267 do {
6268 uint64_t pg = *(uint64_t *)(vg + (reg_off >> 3));
6269 do {
6270 if ((pg >> (reg_off & 63)) & 1) {
6271 if (unlikely(flags & TLB_WATCHPOINT) &&
6272 (cpu_watchpoint_address_matches
6273 (env_cpu(env), addr + mem_off, 1 << msz)
6274 & BP_MEM_READ)) {
6275 goto do_fault;
6276 }
6277 if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
6278 goto do_fault;
6279 }
6280 host_fn(vd, reg_off, host + mem_off);
6281 }
6282 reg_off += 1 << esz;
6283 mem_off += 1 << msz;
6284 } while (reg_off <= reg_last && (reg_off & 63));
6285 } while (reg_off <= reg_last);
6286
6287 /*
6288 * MemSingleNF is allowed to fail for any reason. We have special
6289 * code above to handle the first element crossing a page boundary.
6290 * As an implementation choice, decline to handle a cross-page element
6291 * in any other position.
6292 */
6293 reg_off = info.reg_off_split;
6294 if (reg_off >= 0) {
6295 goto do_fault;
6296 }
6297
6298 second_page:
6299 reg_off = info.reg_off_first[1];
6300 if (likely(reg_off < 0)) {
6301 /* No active elements on the second page. All done. */
6302 return;
6303 }
6304
6305 /*
6306 * MemSingleNF is allowed to fail for any reason. As an implementation
6307 * choice, decline to handle elements on the second page. This should
6308 * be low frequency as the guest walks through memory -- the next
6309 * iteration of the guest's loop should be aligned on the page boundary,
6310 * and then all following iterations will stay aligned.
6311 */
6312
6313 do_fault:
6314 record_fault(env, reg_off, reg_max);
6315 }
6316
6317 static inline QEMU_ALWAYS_INLINE
6318 void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr,
6319 uint32_t desc, const uintptr_t retaddr,
6320 const int esz, const int msz, const SVEContFault fault,
6321 sve_ldst1_host_fn *host_fn,
6322 sve_ldst1_tlb_fn *tlb_fn)
6323 {
6324 uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6325 int bit55 = extract64(addr, 55, 1);
6326
6327 /* Remove mtedesc from the normal sve descriptor. */
6328 desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6329
6330 /* Perform gross MTE suppression early. */
6331 if (!tbi_check(desc, bit55) ||
6332 tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
6333 mtedesc = 0;
6334 }
6335
6336 sve_ldnfff1_r(env, vg, addr, desc, retaddr, mtedesc,
6337 esz, msz, fault, host_fn, tlb_fn);
6338 }
6339
6340 #define DO_LDFF1_LDNF1_1(PART, ESZ) \
6341 void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \
6342 target_ulong addr, uint32_t desc) \
6343 { \
6344 sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_FIRST, \
6345 sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
6346 } \
6347 void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \
6348 target_ulong addr, uint32_t desc) \
6349 { \
6350 sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_NO, \
6351 sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
6352 } \
6353 void HELPER(sve_ldff1##PART##_r_mte)(CPUARMState *env, void *vg, \
6354 target_ulong addr, uint32_t desc) \
6355 { \
6356 sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_FIRST, \
6357 sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
6358 } \
6359 void HELPER(sve_ldnf1##PART##_r_mte)(CPUARMState *env, void *vg, \
6360 target_ulong addr, uint32_t desc) \
6361 { \
6362 sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_NO, \
6363 sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
6364 }
6365
6366 #define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \
6367 void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \
6368 target_ulong addr, uint32_t desc) \
6369 { \
6370 sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \
6371 sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
6372 } \
6373 void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \
6374 target_ulong addr, uint32_t desc) \
6375 { \
6376 sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \
6377 sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
6378 } \
6379 void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \
6380 target_ulong addr, uint32_t desc) \
6381 { \
6382 sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \
6383 sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
6384 } \
6385 void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \
6386 target_ulong addr, uint32_t desc) \
6387 { \
6388 sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \
6389 sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
6390 } \
6391 void HELPER(sve_ldff1##PART##_le_r_mte)(CPUARMState *env, void *vg, \
6392 target_ulong addr, uint32_t desc) \
6393 { \
6394 sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \
6395 sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
6396 } \
6397 void HELPER(sve_ldnf1##PART##_le_r_mte)(CPUARMState *env, void *vg, \
6398 target_ulong addr, uint32_t desc) \
6399 { \
6400 sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \
6401 sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
6402 } \
6403 void HELPER(sve_ldff1##PART##_be_r_mte)(CPUARMState *env, void *vg, \
6404 target_ulong addr, uint32_t desc) \
6405 { \
6406 sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \
6407 sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
6408 } \
6409 void HELPER(sve_ldnf1##PART##_be_r_mte)(CPUARMState *env, void *vg, \
6410 target_ulong addr, uint32_t desc) \
6411 { \
6412 sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \
6413 sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
6414 }
6415
6416 DO_LDFF1_LDNF1_1(bb, MO_8)
6417 DO_LDFF1_LDNF1_1(bhu, MO_16)
6418 DO_LDFF1_LDNF1_1(bhs, MO_16)
6419 DO_LDFF1_LDNF1_1(bsu, MO_32)
6420 DO_LDFF1_LDNF1_1(bss, MO_32)
6421 DO_LDFF1_LDNF1_1(bdu, MO_64)
6422 DO_LDFF1_LDNF1_1(bds, MO_64)
6423
6424 DO_LDFF1_LDNF1_2(hh, MO_16, MO_16)
6425 DO_LDFF1_LDNF1_2(hsu, MO_32, MO_16)
6426 DO_LDFF1_LDNF1_2(hss, MO_32, MO_16)
6427 DO_LDFF1_LDNF1_2(hdu, MO_64, MO_16)
6428 DO_LDFF1_LDNF1_2(hds, MO_64, MO_16)
6429
6430 DO_LDFF1_LDNF1_2(ss, MO_32, MO_32)
6431 DO_LDFF1_LDNF1_2(sdu, MO_64, MO_32)
6432 DO_LDFF1_LDNF1_2(sds, MO_64, MO_32)
6433
6434 DO_LDFF1_LDNF1_2(dd, MO_64, MO_64)
6435
6436 #undef DO_LDFF1_LDNF1_1
6437 #undef DO_LDFF1_LDNF1_2
6438
6439 /*
6440 * Common helper for all contiguous 1,2,3,4-register predicated stores.
6441 */
6442
6443 static inline QEMU_ALWAYS_INLINE
6444 void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
6445 uint32_t desc, const uintptr_t retaddr,
6446 const int esz, const int msz, const int N, uint32_t mtedesc,
6447 sve_ldst1_host_fn *host_fn,
6448 sve_ldst1_tlb_fn *tlb_fn)
6449 {
6450 const unsigned rd = simd_data(desc);
6451 const intptr_t reg_max = simd_oprsz(desc);
6452 intptr_t reg_off, reg_last, mem_off;
6453 SVEContLdSt info;
6454 void *host;
6455 int i, flags;
6456
6457 /* Find the active elements. */
6458 if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, N << msz)) {
6459 /* The entire predicate was false; no store occurs. */
6460 return;
6461 }
6462
6463 /* Probe the page(s). Exit with exception for any invalid page. */
6464 sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, retaddr);
6465
6466 /* Handle watchpoints for all active elements. */
6467 sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, N << msz,
6468 BP_MEM_WRITE, retaddr);
6469
6470 /*
6471 * Handle mte checks for all active elements.
6472 * Since TBI must be set for MTE, !mtedesc => !mte_active.
6473 */
6474 if (mtedesc) {
6475 sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
6476 mtedesc, retaddr);
6477 }
6478
6479 flags = info.page[0].flags | info.page[1].flags;
6480 if (unlikely(flags != 0)) {
6481 #ifdef CONFIG_USER_ONLY
6482 g_assert_not_reached();
6483 #else
6484 /*
6485 * At least one page includes MMIO.
6486 * Any bus operation can fail with cpu_transaction_failed,
6487 * which for ARM will raise SyncExternal. We cannot avoid
6488 * this fault and will leave with the store incomplete.
6489 */
6490 mem_off = info.mem_off_first[0];
6491 reg_off = info.reg_off_first[0];
6492 reg_last = info.reg_off_last[1];
6493 if (reg_last < 0) {
6494 reg_last = info.reg_off_split;
6495 if (reg_last < 0) {
6496 reg_last = info.reg_off_last[0];
6497 }
6498 }
6499
6500 do {
6501 uint64_t pg = vg[reg_off >> 6];
6502 do {
6503 if ((pg >> (reg_off & 63)) & 1) {
6504 for (i = 0; i < N; ++i) {
6505 tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
6506 addr + mem_off + (i << msz), retaddr);
6507 }
6508 }
6509 reg_off += 1 << esz;
6510 mem_off += N << msz;
6511 } while (reg_off & 63);
6512 } while (reg_off <= reg_last);
6513 return;
6514 #endif
6515 }
6516
6517 mem_off = info.mem_off_first[0];
6518 reg_off = info.reg_off_first[0];
6519 reg_last = info.reg_off_last[0];
6520 host = info.page[0].host;
6521
6522 while (reg_off <= reg_last) {
6523 uint64_t pg = vg[reg_off >> 6];
6524 do {
6525 if ((pg >> (reg_off & 63)) & 1) {
6526 for (i = 0; i < N; ++i) {
6527 host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
6528 host + mem_off + (i << msz));
6529 }
6530 }
6531 reg_off += 1 << esz;
6532 mem_off += N << msz;
6533 } while (reg_off <= reg_last && (reg_off & 63));
6534 }
6535
6536 /*
6537 * Use the slow path to manage the cross-page misalignment.
6538 * But we know this is RAM and cannot trap.
6539 */
6540 mem_off = info.mem_off_split;
6541 if (unlikely(mem_off >= 0)) {
6542 reg_off = info.reg_off_split;
6543 for (i = 0; i < N; ++i) {
6544 tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
6545 addr + mem_off + (i << msz), retaddr);
6546 }
6547 }
6548
6549 mem_off = info.mem_off_first[1];
6550 if (unlikely(mem_off >= 0)) {
6551 reg_off = info.reg_off_first[1];
6552 reg_last = info.reg_off_last[1];
6553 host = info.page[1].host;
6554
6555 do {
6556 uint64_t pg = vg[reg_off >> 6];
6557 do {
6558 if ((pg >> (reg_off & 63)) & 1) {
6559 for (i = 0; i < N; ++i) {
6560 host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
6561 host + mem_off + (i << msz));
6562 }
6563 }
6564 reg_off += 1 << esz;
6565 mem_off += N << msz;
6566 } while (reg_off & 63);
6567 } while (reg_off <= reg_last);
6568 }
6569 }
6570
6571 static inline QEMU_ALWAYS_INLINE
6572 void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
6573 uint32_t desc, const uintptr_t ra,
6574 const int esz, const int msz, const int N,
6575 sve_ldst1_host_fn *host_fn,
6576 sve_ldst1_tlb_fn *tlb_fn)
6577 {
6578 uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6579 int bit55 = extract64(addr, 55, 1);
6580
6581 /* Remove mtedesc from the normal sve descriptor. */
6582 desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6583
6584 /* Perform gross MTE suppression early. */
6585 if (!tbi_check(desc, bit55) ||
6586 tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
6587 mtedesc = 0;
6588 }
6589
6590 sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
6591 }
6592
6593 #define DO_STN_1(N, NAME, ESZ) \
6594 void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \
6595 target_ulong addr, uint32_t desc) \
6596 { \
6597 sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \
6598 sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
6599 } \
6600 void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \
6601 target_ulong addr, uint32_t desc) \
6602 { \
6603 sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, \
6604 sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
6605 }
6606
6607 #define DO_STN_2(N, NAME, ESZ, MSZ) \
6608 void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \
6609 target_ulong addr, uint32_t desc) \
6610 { \
6611 sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
6612 sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
6613 } \
6614 void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \
6615 target_ulong addr, uint32_t desc) \
6616 { \
6617 sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
6618 sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
6619 } \
6620 void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
6621 target_ulong addr, uint32_t desc) \
6622 { \
6623 sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \
6624 sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
6625 } \
6626 void HELPER(sve_st##N##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
6627 target_ulong addr, uint32_t desc) \
6628 { \
6629 sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \
6630 sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
6631 }
6632
6633 DO_STN_1(1, bb, MO_8)
6634 DO_STN_1(1, bh, MO_16)
6635 DO_STN_1(1, bs, MO_32)
6636 DO_STN_1(1, bd, MO_64)
6637 DO_STN_1(2, bb, MO_8)
6638 DO_STN_1(3, bb, MO_8)
6639 DO_STN_1(4, bb, MO_8)
6640
6641 DO_STN_2(1, hh, MO_16, MO_16)
6642 DO_STN_2(1, hs, MO_32, MO_16)
6643 DO_STN_2(1, hd, MO_64, MO_16)
6644 DO_STN_2(2, hh, MO_16, MO_16)
6645 DO_STN_2(3, hh, MO_16, MO_16)
6646 DO_STN_2(4, hh, MO_16, MO_16)
6647
6648 DO_STN_2(1, ss, MO_32, MO_32)
6649 DO_STN_2(1, sd, MO_64, MO_32)
6650 DO_STN_2(2, ss, MO_32, MO_32)
6651 DO_STN_2(3, ss, MO_32, MO_32)
6652 DO_STN_2(4, ss, MO_32, MO_32)
6653
6654 DO_STN_2(1, dd, MO_64, MO_64)
6655 DO_STN_2(2, dd, MO_64, MO_64)
6656 DO_STN_2(3, dd, MO_64, MO_64)
6657 DO_STN_2(4, dd, MO_64, MO_64)
6658
6659 #undef DO_STN_1
6660 #undef DO_STN_2
6661
6662 /*
6663 * Loads with a vector index.
6664 */
6665
6666 /*
6667 * Load the element at @reg + @reg_ofs, sign or zero-extend as needed.
6668 */
6669 typedef target_ulong zreg_off_fn(void *reg, intptr_t reg_ofs);
6670
6671 static target_ulong off_zsu_s(void *reg, intptr_t reg_ofs)
6672 {
6673 return *(uint32_t *)(reg + H1_4(reg_ofs));
6674 }
6675
6676 static target_ulong off_zss_s(void *reg, intptr_t reg_ofs)
6677 {
6678 return *(int32_t *)(reg + H1_4(reg_ofs));
6679 }
6680
6681 static target_ulong off_zsu_d(void *reg, intptr_t reg_ofs)
6682 {
6683 return (uint32_t)*(uint64_t *)(reg + reg_ofs);
6684 }
6685
6686 static target_ulong off_zss_d(void *reg, intptr_t reg_ofs)
6687 {
6688 return (int32_t)*(uint64_t *)(reg + reg_ofs);
6689 }
6690
6691 static target_ulong off_zd_d(void *reg, intptr_t reg_ofs)
6692 {
6693 return *(uint64_t *)(reg + reg_ofs);
6694 }
6695
6696 static inline QEMU_ALWAYS_INLINE
6697 void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
6698 target_ulong base, uint32_t desc, uintptr_t retaddr,
6699 uint32_t mtedesc, int esize, int msize,
6700 zreg_off_fn *off_fn,
6701 sve_ldst1_host_fn *host_fn,
6702 sve_ldst1_tlb_fn *tlb_fn)
6703 {
6704 const int mmu_idx = cpu_mmu_index(env, false);
6705 const intptr_t reg_max = simd_oprsz(desc);
6706 const int scale = simd_data(desc);
6707 ARMVectorReg scratch;
6708 intptr_t reg_off;
6709 SVEHostPage info, info2;
6710
6711 memset(&scratch, 0, reg_max);
6712 reg_off = 0;
6713 do {
6714 uint64_t pg = vg[reg_off >> 6];
6715 do {
6716 if (likely(pg & 1)) {
6717 target_ulong addr = base + (off_fn(vm, reg_off) << scale);
6718 target_ulong in_page = -(addr | TARGET_PAGE_MASK);
6719
6720 sve_probe_page(&info, false, env, addr, 0, MMU_DATA_LOAD,
6721 mmu_idx, retaddr);
6722
6723 if (likely(in_page >= msize)) {
6724 if (unlikely(info.flags & TLB_WATCHPOINT)) {
6725 cpu_check_watchpoint(env_cpu(env), addr, msize,
6726 info.attrs, BP_MEM_READ, retaddr);
6727 }
6728 if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
6729 mte_check(env, mtedesc, addr, retaddr);
6730 }
6731 host_fn(&scratch, reg_off, info.host);
6732 } else {
6733 /* Element crosses the page boundary. */
6734 sve_probe_page(&info2, false, env, addr + in_page, 0,
6735 MMU_DATA_LOAD, mmu_idx, retaddr);
6736 if (unlikely((info.flags | info2.flags) & TLB_WATCHPOINT)) {
6737 cpu_check_watchpoint(env_cpu(env), addr,
6738 msize, info.attrs,
6739 BP_MEM_READ, retaddr);
6740 }
6741 if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
6742 mte_check(env, mtedesc, addr, retaddr);
6743 }
6744 tlb_fn(env, &scratch, reg_off, addr, retaddr);
6745 }
6746 }
6747 reg_off += esize;
6748 pg >>= esize;
6749 } while (reg_off & 63);
6750 } while (reg_off < reg_max);
6751
6752 /* Wait until all exceptions have been raised to write back. */
6753 memcpy(vd, &scratch, reg_max);
6754 }
6755
6756 static inline QEMU_ALWAYS_INLINE
6757 void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
6758 target_ulong base, uint32_t desc, uintptr_t retaddr,
6759 int esize, int msize, zreg_off_fn *off_fn,
6760 sve_ldst1_host_fn *host_fn,
6761 sve_ldst1_tlb_fn *tlb_fn)
6762 {
6763 uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6764 /* Remove mtedesc from the normal sve descriptor. */
6765 desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6766
6767 /*
6768 * ??? TODO: For the 32-bit offset extractions, base + ofs cannot
6769 * offset base entirely over the address space hole to change the
6770 * pointer tag, or change the bit55 selector. So we could here
6771 * examine TBI + TCMA like we do for sve_ldN_r_mte().
6772 */
6773 sve_ld1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc,
6774 esize, msize, off_fn, host_fn, tlb_fn);
6775 }
6776
6777 #define DO_LD1_ZPZ_S(MEM, OFS, MSZ) \
6778 void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
6779 void *vm, target_ulong base, uint32_t desc) \
6780 { \
6781 sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \
6782 off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
6783 } \
6784 void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
6785 void *vm, target_ulong base, uint32_t desc) \
6786 { \
6787 sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \
6788 off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
6789 }
6790
6791 #define DO_LD1_ZPZ_D(MEM, OFS, MSZ) \
6792 void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
6793 void *vm, target_ulong base, uint32_t desc) \
6794 { \
6795 sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \
6796 off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
6797 } \
6798 void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
6799 void *vm, target_ulong base, uint32_t desc) \
6800 { \
6801 sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \
6802 off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
6803 }
6804
6805 DO_LD1_ZPZ_S(bsu, zsu, MO_8)
6806 DO_LD1_ZPZ_S(bsu, zss, MO_8)
6807 DO_LD1_ZPZ_D(bdu, zsu, MO_8)
6808 DO_LD1_ZPZ_D(bdu, zss, MO_8)
6809 DO_LD1_ZPZ_D(bdu, zd, MO_8)
6810
6811 DO_LD1_ZPZ_S(bss, zsu, MO_8)
6812 DO_LD1_ZPZ_S(bss, zss, MO_8)
6813 DO_LD1_ZPZ_D(bds, zsu, MO_8)
6814 DO_LD1_ZPZ_D(bds, zss, MO_8)
6815 DO_LD1_ZPZ_D(bds, zd, MO_8)
6816
6817 DO_LD1_ZPZ_S(hsu_le, zsu, MO_16)
6818 DO_LD1_ZPZ_S(hsu_le, zss, MO_16)
6819 DO_LD1_ZPZ_D(hdu_le, zsu, MO_16)
6820 DO_LD1_ZPZ_D(hdu_le, zss, MO_16)
6821 DO_LD1_ZPZ_D(hdu_le, zd, MO_16)
6822
6823 DO_LD1_ZPZ_S(hsu_be, zsu, MO_16)
6824 DO_LD1_ZPZ_S(hsu_be, zss, MO_16)
6825 DO_LD1_ZPZ_D(hdu_be, zsu, MO_16)
6826 DO_LD1_ZPZ_D(hdu_be, zss, MO_16)
6827 DO_LD1_ZPZ_D(hdu_be, zd, MO_16)
6828
6829 DO_LD1_ZPZ_S(hss_le, zsu, MO_16)
6830 DO_LD1_ZPZ_S(hss_le, zss, MO_16)
6831 DO_LD1_ZPZ_D(hds_le, zsu, MO_16)
6832 DO_LD1_ZPZ_D(hds_le, zss, MO_16)
6833 DO_LD1_ZPZ_D(hds_le, zd, MO_16)
6834
6835 DO_LD1_ZPZ_S(hss_be, zsu, MO_16)
6836 DO_LD1_ZPZ_S(hss_be, zss, MO_16)
6837 DO_LD1_ZPZ_D(hds_be, zsu, MO_16)
6838 DO_LD1_ZPZ_D(hds_be, zss, MO_16)
6839 DO_LD1_ZPZ_D(hds_be, zd, MO_16)
6840
6841 DO_LD1_ZPZ_S(ss_le, zsu, MO_32)
6842 DO_LD1_ZPZ_S(ss_le, zss, MO_32)
6843 DO_LD1_ZPZ_D(sdu_le, zsu, MO_32)
6844 DO_LD1_ZPZ_D(sdu_le, zss, MO_32)
6845 DO_LD1_ZPZ_D(sdu_le, zd, MO_32)
6846
6847 DO_LD1_ZPZ_S(ss_be, zsu, MO_32)
6848 DO_LD1_ZPZ_S(ss_be, zss, MO_32)
6849 DO_LD1_ZPZ_D(sdu_be, zsu, MO_32)
6850 DO_LD1_ZPZ_D(sdu_be, zss, MO_32)
6851 DO_LD1_ZPZ_D(sdu_be, zd, MO_32)
6852
6853 DO_LD1_ZPZ_D(sds_le, zsu, MO_32)
6854 DO_LD1_ZPZ_D(sds_le, zss, MO_32)
6855 DO_LD1_ZPZ_D(sds_le, zd, MO_32)
6856
6857 DO_LD1_ZPZ_D(sds_be, zsu, MO_32)
6858 DO_LD1_ZPZ_D(sds_be, zss, MO_32)
6859 DO_LD1_ZPZ_D(sds_be, zd, MO_32)
6860
6861 DO_LD1_ZPZ_D(dd_le, zsu, MO_64)
6862 DO_LD1_ZPZ_D(dd_le, zss, MO_64)
6863 DO_LD1_ZPZ_D(dd_le, zd, MO_64)
6864
6865 DO_LD1_ZPZ_D(dd_be, zsu, MO_64)
6866 DO_LD1_ZPZ_D(dd_be, zss, MO_64)
6867 DO_LD1_ZPZ_D(dd_be, zd, MO_64)
6868
6869 #undef DO_LD1_ZPZ_S
6870 #undef DO_LD1_ZPZ_D
6871
6872 /* First fault loads with a vector index. */
6873
6874 /*
6875 * Common helpers for all gather first-faulting loads.
6876 */
6877
6878 static inline QEMU_ALWAYS_INLINE
6879 void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
6880 target_ulong base, uint32_t desc, uintptr_t retaddr,
6881 uint32_t mtedesc, const int esz, const int msz,
6882 zreg_off_fn *off_fn,
6883 sve_ldst1_host_fn *host_fn,
6884 sve_ldst1_tlb_fn *tlb_fn)
6885 {
6886 const int mmu_idx = cpu_mmu_index(env, false);
6887 const intptr_t reg_max = simd_oprsz(desc);
6888 const int scale = simd_data(desc);
6889 const int esize = 1 << esz;
6890 const int msize = 1 << msz;
6891 intptr_t reg_off;
6892 SVEHostPage info;
6893 target_ulong addr, in_page;
6894
6895 /* Skip to the first true predicate. */
6896 reg_off = find_next_active(vg, 0, reg_max, esz);
6897 if (unlikely(reg_off >= reg_max)) {
6898 /* The entire predicate was false; no load occurs. */
6899 memset(vd, 0, reg_max);
6900 return;
6901 }
6902
6903 /*
6904 * Probe the first element, allowing faults.
6905 */
6906 addr = base + (off_fn(vm, reg_off) << scale);
6907 if (mtedesc) {
6908 mte_check(env, mtedesc, addr, retaddr);
6909 }
6910 tlb_fn(env, vd, reg_off, addr, retaddr);
6911
6912 /* After any fault, zero the other elements. */
6913 swap_memzero(vd, reg_off);
6914 reg_off += esize;
6915 swap_memzero(vd + reg_off, reg_max - reg_off);
6916
6917 /*
6918 * Probe the remaining elements, not allowing faults.
6919 */
6920 while (reg_off < reg_max) {
6921 uint64_t pg = vg[reg_off >> 6];
6922 do {
6923 if (likely((pg >> (reg_off & 63)) & 1)) {
6924 addr = base + (off_fn(vm, reg_off) << scale);
6925 in_page = -(addr | TARGET_PAGE_MASK);
6926
6927 if (unlikely(in_page < msize)) {
6928 /* Stop if the element crosses a page boundary. */
6929 goto fault;
6930 }
6931
6932 sve_probe_page(&info, true, env, addr, 0, MMU_DATA_LOAD,
6933 mmu_idx, retaddr);
6934 if (unlikely(info.flags & (TLB_INVALID_MASK | TLB_MMIO))) {
6935 goto fault;
6936 }
6937 if (unlikely(info.flags & TLB_WATCHPOINT) &&
6938 (cpu_watchpoint_address_matches
6939 (env_cpu(env), addr, msize) & BP_MEM_READ)) {
6940 goto fault;
6941 }
6942 if (mtedesc &&
6943 arm_tlb_mte_tagged(&info.attrs) &&
6944 !mte_probe(env, mtedesc, addr)) {
6945 goto fault;
6946 }
6947
6948 host_fn(vd, reg_off, info.host);
6949 }
6950 reg_off += esize;
6951 } while (reg_off & 63);
6952 }
6953 return;
6954
6955 fault:
6956 record_fault(env, reg_off, reg_max);
6957 }
6958
6959 static inline QEMU_ALWAYS_INLINE
6960 void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
6961 target_ulong base, uint32_t desc, uintptr_t retaddr,
6962 const int esz, const int msz,
6963 zreg_off_fn *off_fn,
6964 sve_ldst1_host_fn *host_fn,
6965 sve_ldst1_tlb_fn *tlb_fn)
6966 {
6967 uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6968 /* Remove mtedesc from the normal sve descriptor. */
6969 desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
6970
6971 /*
6972 * ??? TODO: For the 32-bit offset extractions, base + ofs cannot
6973 * offset base entirely over the address space hole to change the
6974 * pointer tag, or change the bit55 selector. So we could here
6975 * examine TBI + TCMA like we do for sve_ldN_r_mte().
6976 */
6977 sve_ldff1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc,
6978 esz, msz, off_fn, host_fn, tlb_fn);
6979 }
6980
6981 #define DO_LDFF1_ZPZ_S(MEM, OFS, MSZ) \
6982 void HELPER(sve_ldff##MEM##_##OFS) \
6983 (CPUARMState *env, void *vd, void *vg, \
6984 void *vm, target_ulong base, uint32_t desc) \
6985 { \
6986 sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_32, MSZ, \
6987 off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
6988 } \
6989 void HELPER(sve_ldff##MEM##_##OFS##_mte) \
6990 (CPUARMState *env, void *vd, void *vg, \
6991 void *vm, target_ulong base, uint32_t desc) \
6992 { \
6993 sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_32, MSZ, \
6994 off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
6995 }
6996
6997 #define DO_LDFF1_ZPZ_D(MEM, OFS, MSZ) \
6998 void HELPER(sve_ldff##MEM##_##OFS) \
6999 (CPUARMState *env, void *vd, void *vg, \
7000 void *vm, target_ulong base, uint32_t desc) \
7001 { \
7002 sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_64, MSZ, \
7003 off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
7004 } \
7005 void HELPER(sve_ldff##MEM##_##OFS##_mte) \
7006 (CPUARMState *env, void *vd, void *vg, \
7007 void *vm, target_ulong base, uint32_t desc) \
7008 { \
7009 sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_64, MSZ, \
7010 off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
7011 }
7012
7013 DO_LDFF1_ZPZ_S(bsu, zsu, MO_8)
7014 DO_LDFF1_ZPZ_S(bsu, zss, MO_8)
7015 DO_LDFF1_ZPZ_D(bdu, zsu, MO_8)
7016 DO_LDFF1_ZPZ_D(bdu, zss, MO_8)
7017 DO_LDFF1_ZPZ_D(bdu, zd, MO_8)
7018
7019 DO_LDFF1_ZPZ_S(bss, zsu, MO_8)
7020 DO_LDFF1_ZPZ_S(bss, zss, MO_8)
7021 DO_LDFF1_ZPZ_D(bds, zsu, MO_8)
7022 DO_LDFF1_ZPZ_D(bds, zss, MO_8)
7023 DO_LDFF1_ZPZ_D(bds, zd, MO_8)
7024
7025 DO_LDFF1_ZPZ_S(hsu_le, zsu, MO_16)
7026 DO_LDFF1_ZPZ_S(hsu_le, zss, MO_16)
7027 DO_LDFF1_ZPZ_D(hdu_le, zsu, MO_16)
7028 DO_LDFF1_ZPZ_D(hdu_le, zss, MO_16)
7029 DO_LDFF1_ZPZ_D(hdu_le, zd, MO_16)
7030
7031 DO_LDFF1_ZPZ_S(hsu_be, zsu, MO_16)
7032 DO_LDFF1_ZPZ_S(hsu_be, zss, MO_16)
7033 DO_LDFF1_ZPZ_D(hdu_be, zsu, MO_16)
7034 DO_LDFF1_ZPZ_D(hdu_be, zss, MO_16)
7035 DO_LDFF1_ZPZ_D(hdu_be, zd, MO_16)
7036
7037 DO_LDFF1_ZPZ_S(hss_le, zsu, MO_16)
7038 DO_LDFF1_ZPZ_S(hss_le, zss, MO_16)
7039 DO_LDFF1_ZPZ_D(hds_le, zsu, MO_16)
7040 DO_LDFF1_ZPZ_D(hds_le, zss, MO_16)
7041 DO_LDFF1_ZPZ_D(hds_le, zd, MO_16)
7042
7043 DO_LDFF1_ZPZ_S(hss_be, zsu, MO_16)
7044 DO_LDFF1_ZPZ_S(hss_be, zss, MO_16)
7045 DO_LDFF1_ZPZ_D(hds_be, zsu, MO_16)
7046 DO_LDFF1_ZPZ_D(hds_be, zss, MO_16)
7047 DO_LDFF1_ZPZ_D(hds_be, zd, MO_16)
7048
7049 DO_LDFF1_ZPZ_S(ss_le, zsu, MO_32)
7050 DO_LDFF1_ZPZ_S(ss_le, zss, MO_32)
7051 DO_LDFF1_ZPZ_D(sdu_le, zsu, MO_32)
7052 DO_LDFF1_ZPZ_D(sdu_le, zss, MO_32)
7053 DO_LDFF1_ZPZ_D(sdu_le, zd, MO_32)
7054
7055 DO_LDFF1_ZPZ_S(ss_be, zsu, MO_32)
7056 DO_LDFF1_ZPZ_S(ss_be, zss, MO_32)
7057 DO_LDFF1_ZPZ_D(sdu_be, zsu, MO_32)
7058 DO_LDFF1_ZPZ_D(sdu_be, zss, MO_32)
7059 DO_LDFF1_ZPZ_D(sdu_be, zd, MO_32)
7060
7061 DO_LDFF1_ZPZ_D(sds_le, zsu, MO_32)
7062 DO_LDFF1_ZPZ_D(sds_le, zss, MO_32)
7063 DO_LDFF1_ZPZ_D(sds_le, zd, MO_32)
7064
7065 DO_LDFF1_ZPZ_D(sds_be, zsu, MO_32)
7066 DO_LDFF1_ZPZ_D(sds_be, zss, MO_32)
7067 DO_LDFF1_ZPZ_D(sds_be, zd, MO_32)
7068
7069 DO_LDFF1_ZPZ_D(dd_le, zsu, MO_64)
7070 DO_LDFF1_ZPZ_D(dd_le, zss, MO_64)
7071 DO_LDFF1_ZPZ_D(dd_le, zd, MO_64)
7072
7073 DO_LDFF1_ZPZ_D(dd_be, zsu, MO_64)
7074 DO_LDFF1_ZPZ_D(dd_be, zss, MO_64)
7075 DO_LDFF1_ZPZ_D(dd_be, zd, MO_64)
7076
7077 /* Stores with a vector index. */
7078
7079 static inline QEMU_ALWAYS_INLINE
7080 void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
7081 target_ulong base, uint32_t desc, uintptr_t retaddr,
7082 uint32_t mtedesc, int esize, int msize,
7083 zreg_off_fn *off_fn,
7084 sve_ldst1_host_fn *host_fn,
7085 sve_ldst1_tlb_fn *tlb_fn)
7086 {
7087 const int mmu_idx = cpu_mmu_index(env, false);
7088 const intptr_t reg_max = simd_oprsz(desc);
7089 const int scale = simd_data(desc);
7090 void *host[ARM_MAX_VQ * 4];
7091 intptr_t reg_off, i;
7092 SVEHostPage info, info2;
7093
7094 /*
7095 * Probe all of the elements for host addresses and flags.
7096 */
7097 i = reg_off = 0;
7098 do {
7099 uint64_t pg = vg[reg_off >> 6];
7100 do {
7101 target_ulong addr = base + (off_fn(vm, reg_off) << scale);
7102 target_ulong in_page = -(addr | TARGET_PAGE_MASK);
7103
7104 host[i] = NULL;
7105 if (likely((pg >> (reg_off & 63)) & 1)) {
7106 if (likely(in_page >= msize)) {
7107 sve_probe_page(&info, false, env, addr, 0, MMU_DATA_STORE,
7108 mmu_idx, retaddr);
7109 host[i] = info.host;
7110 } else {
7111 /*
7112 * Element crosses the page boundary.
7113 * Probe both pages, but do not record the host address,
7114 * so that we use the slow path.
7115 */
7116 sve_probe_page(&info, false, env, addr, 0,
7117 MMU_DATA_STORE, mmu_idx, retaddr);
7118 sve_probe_page(&info2, false, env, addr + in_page, 0,
7119 MMU_DATA_STORE, mmu_idx, retaddr);
7120 info.flags |= info2.flags;
7121 }
7122
7123 if (unlikely(info.flags & TLB_WATCHPOINT)) {
7124 cpu_check_watchpoint(env_cpu(env), addr, msize,
7125 info.attrs, BP_MEM_WRITE, retaddr);
7126 }
7127
7128 if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
7129 mte_check(env, mtedesc, addr, retaddr);
7130 }
7131 }
7132 i += 1;
7133 reg_off += esize;
7134 } while (reg_off & 63);
7135 } while (reg_off < reg_max);
7136
7137 /*
7138 * Now that we have recognized all exceptions except SyncExternal
7139 * (from TLB_MMIO), which we cannot avoid, perform all of the stores.
7140 *
7141 * Note for the common case of an element in RAM, not crossing a page
7142 * boundary, we have stored the host address in host[]. This doubles
7143 * as a first-level check against the predicate, since only enabled
7144 * elements have non-null host addresses.
7145 */
7146 i = reg_off = 0;
7147 do {
7148 void *h = host[i];
7149 if (likely(h != NULL)) {
7150 host_fn(vd, reg_off, h);
7151 } else if ((vg[reg_off >> 6] >> (reg_off & 63)) & 1) {
7152 target_ulong addr = base + (off_fn(vm, reg_off) << scale);
7153 tlb_fn(env, vd, reg_off, addr, retaddr);
7154 }
7155 i += 1;
7156 reg_off += esize;
7157 } while (reg_off < reg_max);
7158 }
7159
7160 static inline QEMU_ALWAYS_INLINE
7161 void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
7162 target_ulong base, uint32_t desc, uintptr_t retaddr,
7163 int esize, int msize, zreg_off_fn *off_fn,
7164 sve_ldst1_host_fn *host_fn,
7165 sve_ldst1_tlb_fn *tlb_fn)
7166 {
7167 uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
7168 /* Remove mtedesc from the normal sve descriptor. */
7169 desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
7170
7171 /*
7172 * ??? TODO: For the 32-bit offset extractions, base + ofs cannot
7173 * offset base entirely over the address space hole to change the
7174 * pointer tag, or change the bit55 selector. So we could here
7175 * examine TBI + TCMA like we do for sve_ldN_r_mte().
7176 */
7177 sve_st1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc,
7178 esize, msize, off_fn, host_fn, tlb_fn);
7179 }
7180
7181 #define DO_ST1_ZPZ_S(MEM, OFS, MSZ) \
7182 void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
7183 void *vm, target_ulong base, uint32_t desc) \
7184 { \
7185 sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \
7186 off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
7187 } \
7188 void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
7189 void *vm, target_ulong base, uint32_t desc) \
7190 { \
7191 sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \
7192 off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
7193 }
7194
7195 #define DO_ST1_ZPZ_D(MEM, OFS, MSZ) \
7196 void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
7197 void *vm, target_ulong base, uint32_t desc) \
7198 { \
7199 sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \
7200 off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
7201 } \
7202 void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
7203 void *vm, target_ulong base, uint32_t desc) \
7204 { \
7205 sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \
7206 off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
7207 }
7208
7209 DO_ST1_ZPZ_S(bs, zsu, MO_8)
7210 DO_ST1_ZPZ_S(hs_le, zsu, MO_16)
7211 DO_ST1_ZPZ_S(hs_be, zsu, MO_16)
7212 DO_ST1_ZPZ_S(ss_le, zsu, MO_32)
7213 DO_ST1_ZPZ_S(ss_be, zsu, MO_32)
7214
7215 DO_ST1_ZPZ_S(bs, zss, MO_8)
7216 DO_ST1_ZPZ_S(hs_le, zss, MO_16)
7217 DO_ST1_ZPZ_S(hs_be, zss, MO_16)
7218 DO_ST1_ZPZ_S(ss_le, zss, MO_32)
7219 DO_ST1_ZPZ_S(ss_be, zss, MO_32)
7220
7221 DO_ST1_ZPZ_D(bd, zsu, MO_8)
7222 DO_ST1_ZPZ_D(hd_le, zsu, MO_16)
7223 DO_ST1_ZPZ_D(hd_be, zsu, MO_16)
7224 DO_ST1_ZPZ_D(sd_le, zsu, MO_32)
7225 DO_ST1_ZPZ_D(sd_be, zsu, MO_32)
7226 DO_ST1_ZPZ_D(dd_le, zsu, MO_64)
7227 DO_ST1_ZPZ_D(dd_be, zsu, MO_64)
7228
7229 DO_ST1_ZPZ_D(bd, zss, MO_8)
7230 DO_ST1_ZPZ_D(hd_le, zss, MO_16)
7231 DO_ST1_ZPZ_D(hd_be, zss, MO_16)
7232 DO_ST1_ZPZ_D(sd_le, zss, MO_32)
7233 DO_ST1_ZPZ_D(sd_be, zss, MO_32)
7234 DO_ST1_ZPZ_D(dd_le, zss, MO_64)
7235 DO_ST1_ZPZ_D(dd_be, zss, MO_64)
7236
7237 DO_ST1_ZPZ_D(bd, zd, MO_8)
7238 DO_ST1_ZPZ_D(hd_le, zd, MO_16)
7239 DO_ST1_ZPZ_D(hd_be, zd, MO_16)
7240 DO_ST1_ZPZ_D(sd_le, zd, MO_32)
7241 DO_ST1_ZPZ_D(sd_be, zd, MO_32)
7242 DO_ST1_ZPZ_D(dd_le, zd, MO_64)
7243 DO_ST1_ZPZ_D(dd_be, zd, MO_64)
7244
7245 #undef DO_ST1_ZPZ_S
7246 #undef DO_ST1_ZPZ_D
7247
7248 void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
7249 {
7250 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7251 uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
7252
7253 for (i = 0; i < opr_sz; ++i) {
7254 d[i] = n[i] ^ m[i] ^ k[i];
7255 }
7256 }
7257
7258 void HELPER(sve2_bcax)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
7259 {
7260 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7261 uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
7262
7263 for (i = 0; i < opr_sz; ++i) {
7264 d[i] = n[i] ^ (m[i] & ~k[i]);
7265 }
7266 }
7267
7268 void HELPER(sve2_bsl1n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
7269 {
7270 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7271 uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
7272
7273 for (i = 0; i < opr_sz; ++i) {
7274 d[i] = (~n[i] & k[i]) | (m[i] & ~k[i]);
7275 }
7276 }
7277
7278 void HELPER(sve2_bsl2n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
7279 {
7280 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7281 uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
7282
7283 for (i = 0; i < opr_sz; ++i) {
7284 d[i] = (n[i] & k[i]) | (~m[i] & ~k[i]);
7285 }
7286 }
7287
7288 void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
7289 {
7290 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7291 uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
7292
7293 for (i = 0; i < opr_sz; ++i) {
7294 d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i]));
7295 }
7296 }
7297
7298 /*
7299 * Returns true if m0 or m1 contains the low uint8_t/uint16_t in n.
7300 * See hasless(v,1) from
7301 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
7302 */
7303 static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
7304 {
7305 int bits = 8 << esz;
7306 uint64_t ones = dup_const(esz, 1);
7307 uint64_t signs = ones << (bits - 1);
7308 uint64_t cmp0, cmp1;
7309
7310 cmp1 = dup_const(esz, n);
7311 cmp0 = cmp1 ^ m0;
7312 cmp1 = cmp1 ^ m1;
7313 cmp0 = (cmp0 - ones) & ~cmp0;
7314 cmp1 = (cmp1 - ones) & ~cmp1;
7315 return (cmp0 | cmp1) & signs;
7316 }
7317
7318 static inline uint32_t do_match(void *vd, void *vn, void *vm, void *vg,
7319 uint32_t desc, int esz, bool nmatch)
7320 {
7321 uint16_t esz_mask = pred_esz_masks[esz];
7322 intptr_t opr_sz = simd_oprsz(desc);
7323 uint32_t flags = PREDTEST_INIT;
7324 intptr_t i, j, k;
7325
7326 for (i = 0; i < opr_sz; i += 16) {
7327 uint64_t m0 = *(uint64_t *)(vm + i);
7328 uint64_t m1 = *(uint64_t *)(vm + i + 8);
7329 uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)) & esz_mask;
7330 uint16_t out = 0;
7331
7332 for (j = 0; j < 16; j += 8) {
7333 uint64_t n = *(uint64_t *)(vn + i + j);
7334
7335 for (k = 0; k < 8; k += 1 << esz) {
7336 if (pg & (1 << (j + k))) {
7337 bool o = do_match2(n >> (k * 8), m0, m1, esz);
7338 out |= (o ^ nmatch) << (j + k);
7339 }
7340 }
7341 }
7342 *(uint16_t *)(vd + H1_2(i >> 3)) = out;
7343 flags = iter_predtest_fwd(out, pg, flags);
7344 }
7345 return flags;
7346 }
7347
7348 #define DO_PPZZ_MATCH(NAME, ESZ, INV) \
7349 uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
7350 { \
7351 return do_match(vd, vn, vm, vg, desc, ESZ, INV); \
7352 }
7353
7354 DO_PPZZ_MATCH(sve2_match_ppzz_b, MO_8, false)
7355 DO_PPZZ_MATCH(sve2_match_ppzz_h, MO_16, false)
7356
7357 DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true)
7358 DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true)
7359
7360 #undef DO_PPZZ_MATCH
7361
7362 void HELPER(sve2_histcnt_s)(void *vd, void *vn, void *vm, void *vg,
7363 uint32_t desc)
7364 {
7365 ARMVectorReg scratch;
7366 intptr_t i, j;
7367 intptr_t opr_sz = simd_oprsz(desc);
7368 uint32_t *d = vd, *n = vn, *m = vm;
7369 uint8_t *pg = vg;
7370
7371 if (d == n) {
7372 n = memcpy(&scratch, n, opr_sz);
7373 if (d == m) {
7374 m = n;
7375 }
7376 } else if (d == m) {
7377 m = memcpy(&scratch, m, opr_sz);
7378 }
7379
7380 for (i = 0; i < opr_sz; i += 4) {
7381 uint64_t count = 0;
7382 uint8_t pred;
7383
7384 pred = pg[H1(i >> 3)] >> (i & 7);
7385 if (pred & 1) {
7386 uint32_t nn = n[H4(i >> 2)];
7387
7388 for (j = 0; j <= i; j += 4) {
7389 pred = pg[H1(j >> 3)] >> (j & 7);
7390 if ((pred & 1) && nn == m[H4(j >> 2)]) {
7391 ++count;
7392 }
7393 }
7394 }
7395 d[H4(i >> 2)] = count;
7396 }
7397 }
7398
7399 void HELPER(sve2_histcnt_d)(void *vd, void *vn, void *vm, void *vg,
7400 uint32_t desc)
7401 {
7402 ARMVectorReg scratch;
7403 intptr_t i, j;
7404 intptr_t opr_sz = simd_oprsz(desc);
7405 uint64_t *d = vd, *n = vn, *m = vm;
7406 uint8_t *pg = vg;
7407
7408 if (d == n) {
7409 n = memcpy(&scratch, n, opr_sz);
7410 if (d == m) {
7411 m = n;
7412 }
7413 } else if (d == m) {
7414 m = memcpy(&scratch, m, opr_sz);
7415 }
7416
7417 for (i = 0; i < opr_sz / 8; ++i) {
7418 uint64_t count = 0;
7419 if (pg[H1(i)] & 1) {
7420 uint64_t nn = n[i];
7421 for (j = 0; j <= i; ++j) {
7422 if ((pg[H1(j)] & 1) && nn == m[j]) {
7423 ++count;
7424 }
7425 }
7426 }
7427 d[i] = count;
7428 }
7429 }
7430
7431 /*
7432 * Returns the number of bytes in m0 and m1 that match n.
7433 * Unlike do_match2 we don't just need true/false, we need an exact count.
7434 * This requires two extra logical operations.
7435 */
7436 static inline uint64_t do_histseg_cnt(uint8_t n, uint64_t m0, uint64_t m1)
7437 {
7438 const uint64_t mask = dup_const(MO_8, 0x7f);
7439 uint64_t cmp0, cmp1;
7440
7441 cmp1 = dup_const(MO_8, n);
7442 cmp0 = cmp1 ^ m0;
7443 cmp1 = cmp1 ^ m1;
7444
7445 /*
7446 * 1: clear msb of each byte to avoid carry to next byte (& mask)
7447 * 2: carry in to msb if byte != 0 (+ mask)
7448 * 3: set msb if cmp has msb set (| cmp)
7449 * 4: set ~msb to ignore them (| mask)
7450 * We now have 0xff for byte != 0 or 0x7f for byte == 0.
7451 * 5: invert, resulting in 0x80 if and only if byte == 0.
7452 */
7453 cmp0 = ~(((cmp0 & mask) + mask) | cmp0 | mask);
7454 cmp1 = ~(((cmp1 & mask) + mask) | cmp1 | mask);
7455
7456 /*
7457 * Combine the two compares in a way that the bits do
7458 * not overlap, and so preserves the count of set bits.
7459 * If the host has an efficient instruction for ctpop,
7460 * then ctpop(x) + ctpop(y) has the same number of
7461 * operations as ctpop(x | (y >> 1)). If the host does
7462 * not have an efficient ctpop, then we only want to
7463 * use it once.
7464 */
7465 return ctpop64(cmp0 | (cmp1 >> 1));
7466 }
7467
7468 void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
7469 {
7470 intptr_t i, j;
7471 intptr_t opr_sz = simd_oprsz(desc);
7472
7473 for (i = 0; i < opr_sz; i += 16) {
7474 uint64_t n0 = *(uint64_t *)(vn + i);
7475 uint64_t m0 = *(uint64_t *)(vm + i);
7476 uint64_t n1 = *(uint64_t *)(vn + i + 8);
7477 uint64_t m1 = *(uint64_t *)(vm + i + 8);
7478 uint64_t out0 = 0;
7479 uint64_t out1 = 0;
7480
7481 for (j = 0; j < 64; j += 8) {
7482 uint64_t cnt0 = do_histseg_cnt(n0 >> j, m0, m1);
7483 uint64_t cnt1 = do_histseg_cnt(n1 >> j, m0, m1);
7484 out0 |= cnt0 << j;
7485 out1 |= cnt1 << j;
7486 }
7487
7488 *(uint64_t *)(vd + i) = out0;
7489 *(uint64_t *)(vd + i + 8) = out1;
7490 }
7491 }
7492
7493 void HELPER(sve2_xar_b)(void *vd, void *vn, void *vm, uint32_t desc)
7494 {
7495 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7496 int shr = simd_data(desc);
7497 int shl = 8 - shr;
7498 uint64_t mask = dup_const(MO_8, 0xff >> shr);
7499 uint64_t *d = vd, *n = vn, *m = vm;
7500
7501 for (i = 0; i < opr_sz; ++i) {
7502 uint64_t t = n[i] ^ m[i];
7503 d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
7504 }
7505 }
7506
7507 void HELPER(sve2_xar_h)(void *vd, void *vn, void *vm, uint32_t desc)
7508 {
7509 intptr_t i, opr_sz = simd_oprsz(desc) / 8;
7510 int shr = simd_data(desc);
7511 int shl = 16 - shr;
7512 uint64_t mask = dup_const(MO_16, 0xffff >> shr);
7513 uint64_t *d = vd, *n = vn, *m = vm;
7514
7515 for (i = 0; i < opr_sz; ++i) {
7516 uint64_t t = n[i] ^ m[i];
7517 d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
7518 }
7519 }
7520
7521 void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
7522 {
7523 intptr_t i, opr_sz = simd_oprsz(desc) / 4;
7524 int shr = simd_data(desc);
7525 uint32_t *d = vd, *n = vn, *m = vm;
7526
7527 for (i = 0; i < opr_sz; ++i) {
7528 d[i] = ror32(n[i] ^ m[i], shr);
7529 }
7530 }
7531
7532 void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va,
7533 void *status, uint32_t desc)
7534 {
7535 intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4);
7536
7537 for (s = 0; s < opr_sz; ++s) {
7538 float32 *n = vn + s * sizeof(float32) * 4;
7539 float32 *m = vm + s * sizeof(float32) * 4;
7540 float32 *a = va + s * sizeof(float32) * 4;
7541 float32 *d = vd + s * sizeof(float32) * 4;
7542 float32 n00 = n[H4(0)], n01 = n[H4(1)];
7543 float32 n10 = n[H4(2)], n11 = n[H4(3)];
7544 float32 m00 = m[H4(0)], m01 = m[H4(1)];
7545 float32 m10 = m[H4(2)], m11 = m[H4(3)];
7546 float32 p0, p1;
7547
7548 /* i = 0, j = 0 */
7549 p0 = float32_mul(n00, m00, status);
7550 p1 = float32_mul(n01, m01, status);
7551 d[H4(0)] = float32_add(a[H4(0)], float32_add(p0, p1, status), status);
7552
7553 /* i = 0, j = 1 */
7554 p0 = float32_mul(n00, m10, status);
7555 p1 = float32_mul(n01, m11, status);
7556 d[H4(1)] = float32_add(a[H4(1)], float32_add(p0, p1, status), status);
7557
7558 /* i = 1, j = 0 */
7559 p0 = float32_mul(n10, m00, status);
7560 p1 = float32_mul(n11, m01, status);
7561 d[H4(2)] = float32_add(a[H4(2)], float32_add(p0, p1, status), status);
7562
7563 /* i = 1, j = 1 */
7564 p0 = float32_mul(n10, m10, status);
7565 p1 = float32_mul(n11, m11, status);
7566 d[H4(3)] = float32_add(a[H4(3)], float32_add(p0, p1, status), status);
7567 }
7568 }
7569
7570 void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
7571 void *status, uint32_t desc)
7572 {
7573 intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4);
7574
7575 for (s = 0; s < opr_sz; ++s) {
7576 float64 *n = vn + s * sizeof(float64) * 4;
7577 float64 *m = vm + s * sizeof(float64) * 4;
7578 float64 *a = va + s * sizeof(float64) * 4;
7579 float64 *d = vd + s * sizeof(float64) * 4;
7580 float64 n00 = n[0], n01 = n[1], n10 = n[2], n11 = n[3];
7581 float64 m00 = m[0], m01 = m[1], m10 = m[2], m11 = m[3];
7582 float64 p0, p1;
7583
7584 /* i = 0, j = 0 */
7585 p0 = float64_mul(n00, m00, status);
7586 p1 = float64_mul(n01, m01, status);
7587 d[0] = float64_add(a[0], float64_add(p0, p1, status), status);
7588
7589 /* i = 0, j = 1 */
7590 p0 = float64_mul(n00, m10, status);
7591 p1 = float64_mul(n01, m11, status);
7592 d[1] = float64_add(a[1], float64_add(p0, p1, status), status);
7593
7594 /* i = 1, j = 0 */
7595 p0 = float64_mul(n10, m00, status);
7596 p1 = float64_mul(n11, m01, status);
7597 d[2] = float64_add(a[2], float64_add(p0, p1, status), status);
7598
7599 /* i = 1, j = 1 */
7600 p0 = float64_mul(n10, m10, status);
7601 p1 = float64_mul(n11, m11, status);
7602 d[3] = float64_add(a[3], float64_add(p0, p1, status), status);
7603 }
7604 }
7605
7606 #define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
7607 void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
7608 { \
7609 intptr_t i = simd_oprsz(desc); \
7610 uint64_t *g = vg; \
7611 do { \
7612 uint64_t pg = g[(i - 1) >> 6]; \
7613 do { \
7614 i -= sizeof(TYPEW); \
7615 if (likely((pg >> (i & 63)) & 1)) { \
7616 TYPEW nn = *(TYPEW *)(vn + HW(i)); \
7617 *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, status); \
7618 } \
7619 } while (i & 63); \
7620 } while (i != 0); \
7621 }
7622
7623 DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
7624 DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, , H1_4, float64_to_float32)