]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate-sve.c
target/arm: Implement SVE2 WHILERW, WHILEWR
[mirror_qemu.git] / target / arm / translate-sve.c
CommitLineData
38388f7e
RH
1/*
2 * AArch64 SVE translation
3 *
4 * Copyright (c) 2018 Linaro, Ltd
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
50f57e09 9 * version 2.1 of the License, or (at your option) any later version.
38388f7e
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
dcb32f1d
PMD
23#include "tcg/tcg-op.h"
24#include "tcg/tcg-op-gvec.h"
25#include "tcg/tcg-gvec-desc.h"
38388f7e
RH
26#include "qemu/log.h"
27#include "arm_ldst.h"
28#include "translate.h"
29#include "internals.h"
30#include "exec/helper-proto.h"
31#include "exec/helper-gen.h"
32#include "exec/log.h"
33#include "trace-tcg.h"
34#include "translate-a64.h"
cc48affe 35#include "fpu/softfloat.h"
38388f7e 36
757f9cff 37
9ee3a611
RH
38typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
39 TCGv_i64, uint32_t, uint32_t);
40
38cadeba
RH
41typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
42 TCGv_ptr, TCGv_i32);
757f9cff
RH
43typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
44 TCGv_ptr, TCGv_ptr, TCGv_i32);
45
c4e7c493 46typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
f6dbf62a
RH
47typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr,
48 TCGv_ptr, TCGv_i64, TCGv_i32);
c4e7c493 49
ccd841c3
RH
50/*
51 * Helpers for extracting complex instruction fields.
52 */
53
54/* See e.g. ASR (immediate, predicated).
55 * Returns -1 for unallocated encoding; diagnose later.
56 */
451e4ffd 57static int tszimm_esz(DisasContext *s, int x)
ccd841c3
RH
58{
59 x >>= 3; /* discard imm3 */
60 return 31 - clz32(x);
61}
62
451e4ffd 63static int tszimm_shr(DisasContext *s, int x)
ccd841c3 64{
451e4ffd 65 return (16 << tszimm_esz(s, x)) - x;
ccd841c3
RH
66}
67
68/* See e.g. LSL (immediate, predicated). */
451e4ffd 69static int tszimm_shl(DisasContext *s, int x)
ccd841c3 70{
451e4ffd 71 return x - (8 << tszimm_esz(s, x));
ccd841c3
RH
72}
73
451e4ffd 74static inline int plus1(DisasContext *s, int x)
24e82e68
RH
75{
76 return x + 1;
77}
78
f25a2361 79/* The SH bit is in bit 8. Extract the low 8 and shift. */
451e4ffd 80static inline int expand_imm_sh8s(DisasContext *s, int x)
f25a2361
RH
81{
82 return (int8_t)x << (x & 0x100 ? 8 : 0);
83}
84
451e4ffd 85static inline int expand_imm_sh8u(DisasContext *s, int x)
6e6a157d
RH
86{
87 return (uint8_t)x << (x & 0x100 ? 8 : 0);
88}
89
c4e7c493
RH
90/* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
91 * with unsigned data. C.f. SVE Memory Contiguous Load Group.
92 */
451e4ffd 93static inline int msz_dtype(DisasContext *s, int msz)
c4e7c493
RH
94{
95 static const uint8_t dtype[4] = { 0, 5, 10, 15 };
96 return dtype[msz];
97}
98
38388f7e
RH
99/*
100 * Include the generated decoder.
101 */
102
139c1837 103#include "decode-sve.c.inc"
38388f7e
RH
104
105/*
106 * Implement all of the translator functions referenced by the decoder.
107 */
108
d1822297
RH
109/* Return the offset info CPUARMState of the predicate vector register Pn.
110 * Note for this purpose, FFR is P16.
111 */
112static inline int pred_full_reg_offset(DisasContext *s, int regno)
113{
114 return offsetof(CPUARMState, vfp.pregs[regno]);
115}
116
117/* Return the byte size of the whole predicate register, VL / 64. */
118static inline int pred_full_reg_size(DisasContext *s)
119{
120 return s->sve_len >> 3;
121}
122
516e246a
RH
123/* Round up the size of a register to a size allowed by
124 * the tcg vector infrastructure. Any operation which uses this
125 * size may assume that the bits above pred_full_reg_size are zero,
126 * and must leave them the same way.
127 *
128 * Note that this is not needed for the vector registers as they
129 * are always properly sized for tcg vectors.
130 */
131static int size_for_gvec(int size)
132{
133 if (size <= 8) {
134 return 8;
135 } else {
136 return QEMU_ALIGN_UP(size, 16);
137 }
138}
139
140static int pred_gvec_reg_size(DisasContext *s)
141{
142 return size_for_gvec(pred_full_reg_size(s));
143}
144
40e32e5a
RH
145/* Invoke an out-of-line helper on 2 Zregs. */
146static void gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
147 int rd, int rn, int data)
148{
149 unsigned vsz = vec_full_reg_size(s);
150 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
151 vec_full_reg_offset(s, rn),
152 vsz, vsz, data, fn);
153}
154
e645d1a1
RH
155/* Invoke an out-of-line helper on 3 Zregs. */
156static void gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
157 int rd, int rn, int rm, int data)
158{
159 unsigned vsz = vec_full_reg_size(s);
160 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
161 vec_full_reg_offset(s, rn),
162 vec_full_reg_offset(s, rm),
163 vsz, vsz, data, fn);
164}
165
38650638
RH
166/* Invoke an out-of-line helper on 4 Zregs. */
167static void gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
168 int rd, int rn, int rm, int ra, int data)
169{
170 unsigned vsz = vec_full_reg_size(s);
171 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
172 vec_full_reg_offset(s, rn),
173 vec_full_reg_offset(s, rm),
174 vec_full_reg_offset(s, ra),
175 vsz, vsz, data, fn);
176}
177
96a461f7
RH
178/* Invoke an out-of-line helper on 2 Zregs and a predicate. */
179static void gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
180 int rd, int rn, int pg, int data)
181{
182 unsigned vsz = vec_full_reg_size(s);
183 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
184 vec_full_reg_offset(s, rn),
185 pred_full_reg_offset(s, pg),
186 vsz, vsz, data, fn);
187}
188
36cbb7a8
RH
189/* Invoke an out-of-line helper on 3 Zregs and a predicate. */
190static void gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
191 int rd, int rn, int rm, int pg, int data)
192{
193 unsigned vsz = vec_full_reg_size(s);
194 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
195 vec_full_reg_offset(s, rn),
196 vec_full_reg_offset(s, rm),
197 pred_full_reg_offset(s, pg),
198 vsz, vsz, data, fn);
199}
f7d79c41 200
36cbb7a8 201/* Invoke a vector expander on two Zregs. */
f7d79c41
RH
202static void gen_gvec_fn_zz(DisasContext *s, GVecGen2Fn *gvec_fn,
203 int esz, int rd, int rn)
38388f7e 204{
f7d79c41
RH
205 unsigned vsz = vec_full_reg_size(s);
206 gvec_fn(esz, vec_full_reg_offset(s, rd),
207 vec_full_reg_offset(s, rn), vsz, vsz);
38388f7e
RH
208}
209
39eea561 210/* Invoke a vector expander on three Zregs. */
28c4da31
RH
211static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
212 int esz, int rd, int rn, int rm)
38388f7e 213{
28c4da31
RH
214 unsigned vsz = vec_full_reg_size(s);
215 gvec_fn(esz, vec_full_reg_offset(s, rd),
216 vec_full_reg_offset(s, rn),
217 vec_full_reg_offset(s, rm), vsz, vsz);
38388f7e
RH
218}
219
39eea561
RH
220/* Invoke a vector move on two Zregs. */
221static bool do_mov_z(DisasContext *s, int rd, int rn)
38388f7e 222{
f7d79c41
RH
223 if (sve_access_check(s)) {
224 gen_gvec_fn_zz(s, tcg_gen_gvec_mov, MO_8, rd, rn);
225 }
226 return true;
38388f7e
RH
227}
228
d9d78dcc
RH
229/* Initialize a Zreg with replications of a 64-bit immediate. */
230static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
231{
232 unsigned vsz = vec_full_reg_size(s);
8711e71f 233 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
d9d78dcc
RH
234}
235
516e246a 236/* Invoke a vector expander on three Pregs. */
dd81a8d7
RH
237static void gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
238 int rd, int rn, int rm)
516e246a 239{
dd81a8d7
RH
240 unsigned psz = pred_gvec_reg_size(s);
241 gvec_fn(MO_64, pred_full_reg_offset(s, rd),
242 pred_full_reg_offset(s, rn),
243 pred_full_reg_offset(s, rm), psz, psz);
516e246a
RH
244}
245
246/* Invoke a vector move on two Pregs. */
247static bool do_mov_p(DisasContext *s, int rd, int rn)
248{
d0b2df5a
RH
249 if (sve_access_check(s)) {
250 unsigned psz = pred_gvec_reg_size(s);
251 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
252 pred_full_reg_offset(s, rn), psz, psz);
253 }
254 return true;
516e246a
RH
255}
256
9e18d7a6
RH
257/* Set the cpu flags as per a return from an SVE helper. */
258static void do_pred_flags(TCGv_i32 t)
259{
260 tcg_gen_mov_i32(cpu_NF, t);
261 tcg_gen_andi_i32(cpu_ZF, t, 2);
262 tcg_gen_andi_i32(cpu_CF, t, 1);
263 tcg_gen_movi_i32(cpu_VF, 0);
264}
265
266/* Subroutines computing the ARM PredTest psuedofunction. */
267static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
268{
269 TCGv_i32 t = tcg_temp_new_i32();
270
271 gen_helper_sve_predtest1(t, d, g);
272 do_pred_flags(t);
273 tcg_temp_free_i32(t);
274}
275
276static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
277{
278 TCGv_ptr dptr = tcg_temp_new_ptr();
279 TCGv_ptr gptr = tcg_temp_new_ptr();
280 TCGv_i32 t;
281
282 tcg_gen_addi_ptr(dptr, cpu_env, dofs);
283 tcg_gen_addi_ptr(gptr, cpu_env, gofs);
284 t = tcg_const_i32(words);
285
286 gen_helper_sve_predtest(t, dptr, gptr, t);
287 tcg_temp_free_ptr(dptr);
288 tcg_temp_free_ptr(gptr);
289
290 do_pred_flags(t);
291 tcg_temp_free_i32(t);
292}
293
028e2a7b
RH
294/* For each element size, the bits within a predicate word that are active. */
295const uint64_t pred_esz_masks[4] = {
296 0xffffffffffffffffull, 0x5555555555555555ull,
297 0x1111111111111111ull, 0x0101010101010101ull
298};
299
39eea561
RH
300/*
301 *** SVE Logical - Unpredicated Group
302 */
303
28c4da31
RH
304static bool do_zzz_fn(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *gvec_fn)
305{
306 if (sve_access_check(s)) {
307 gen_gvec_fn_zzz(s, gvec_fn, a->esz, a->rd, a->rn, a->rm);
308 }
309 return true;
310}
311
3a7be554 312static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a)
39eea561 313{
28c4da31 314 return do_zzz_fn(s, a, tcg_gen_gvec_and);
39eea561
RH
315}
316
3a7be554 317static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a)
39eea561 318{
28c4da31 319 return do_zzz_fn(s, a, tcg_gen_gvec_or);
39eea561
RH
320}
321
3a7be554 322static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a)
39eea561 323{
28c4da31 324 return do_zzz_fn(s, a, tcg_gen_gvec_xor);
39eea561
RH
325}
326
3a7be554 327static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
38388f7e 328{
28c4da31 329 return do_zzz_fn(s, a, tcg_gen_gvec_andc);
38388f7e 330}
d1822297 331
fea98f9c
RH
332/*
333 *** SVE Integer Arithmetic - Unpredicated Group
334 */
335
3a7be554 336static bool trans_ADD_zzz(DisasContext *s, arg_rrr_esz *a)
fea98f9c 337{
28c4da31 338 return do_zzz_fn(s, a, tcg_gen_gvec_add);
fea98f9c
RH
339}
340
3a7be554 341static bool trans_SUB_zzz(DisasContext *s, arg_rrr_esz *a)
fea98f9c 342{
28c4da31 343 return do_zzz_fn(s, a, tcg_gen_gvec_sub);
fea98f9c
RH
344}
345
3a7be554 346static bool trans_SQADD_zzz(DisasContext *s, arg_rrr_esz *a)
fea98f9c 347{
28c4da31 348 return do_zzz_fn(s, a, tcg_gen_gvec_ssadd);
fea98f9c
RH
349}
350
3a7be554 351static bool trans_SQSUB_zzz(DisasContext *s, arg_rrr_esz *a)
fea98f9c 352{
28c4da31 353 return do_zzz_fn(s, a, tcg_gen_gvec_sssub);
fea98f9c
RH
354}
355
3a7be554 356static bool trans_UQADD_zzz(DisasContext *s, arg_rrr_esz *a)
fea98f9c 357{
28c4da31 358 return do_zzz_fn(s, a, tcg_gen_gvec_usadd);
fea98f9c
RH
359}
360
3a7be554 361static bool trans_UQSUB_zzz(DisasContext *s, arg_rrr_esz *a)
fea98f9c 362{
28c4da31 363 return do_zzz_fn(s, a, tcg_gen_gvec_ussub);
fea98f9c
RH
364}
365
f97cfd59
RH
366/*
367 *** SVE Integer Arithmetic - Binary Predicated Group
368 */
369
370static bool do_zpzz_ool(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4 *fn)
371{
f97cfd59
RH
372 if (fn == NULL) {
373 return false;
374 }
375 if (sve_access_check(s)) {
36cbb7a8 376 gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0);
f97cfd59
RH
377 }
378 return true;
379}
380
a2103582
RH
381/* Select active elememnts from Zn and inactive elements from Zm,
382 * storing the result in Zd.
383 */
384static void do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
385{
386 static gen_helper_gvec_4 * const fns[4] = {
387 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
388 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d
389 };
36cbb7a8 390 gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
a2103582
RH
391}
392
f97cfd59 393#define DO_ZPZZ(NAME, name) \
3a7be554 394static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a) \
f97cfd59
RH
395{ \
396 static gen_helper_gvec_4 * const fns[4] = { \
397 gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \
398 gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \
399 }; \
400 return do_zpzz_ool(s, a, fns[a->esz]); \
401}
402
403DO_ZPZZ(AND, and)
404DO_ZPZZ(EOR, eor)
405DO_ZPZZ(ORR, orr)
406DO_ZPZZ(BIC, bic)
407
408DO_ZPZZ(ADD, add)
409DO_ZPZZ(SUB, sub)
410
411DO_ZPZZ(SMAX, smax)
412DO_ZPZZ(UMAX, umax)
413DO_ZPZZ(SMIN, smin)
414DO_ZPZZ(UMIN, umin)
415DO_ZPZZ(SABD, sabd)
416DO_ZPZZ(UABD, uabd)
417
418DO_ZPZZ(MUL, mul)
419DO_ZPZZ(SMULH, smulh)
420DO_ZPZZ(UMULH, umulh)
421
27721dbb
RH
422DO_ZPZZ(ASR, asr)
423DO_ZPZZ(LSR, lsr)
424DO_ZPZZ(LSL, lsl)
425
3a7be554 426static bool trans_SDIV_zpzz(DisasContext *s, arg_rprr_esz *a)
f97cfd59
RH
427{
428 static gen_helper_gvec_4 * const fns[4] = {
429 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
430 };
431 return do_zpzz_ool(s, a, fns[a->esz]);
432}
433
3a7be554 434static bool trans_UDIV_zpzz(DisasContext *s, arg_rprr_esz *a)
f97cfd59
RH
435{
436 static gen_helper_gvec_4 * const fns[4] = {
437 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
438 };
439 return do_zpzz_ool(s, a, fns[a->esz]);
440}
441
3a7be554 442static bool trans_SEL_zpzz(DisasContext *s, arg_rprr_esz *a)
a2103582
RH
443{
444 if (sve_access_check(s)) {
445 do_sel_z(s, a->rd, a->rn, a->rm, a->pg, a->esz);
446 }
447 return true;
448}
d3fe4a29 449
f97cfd59
RH
450#undef DO_ZPZZ
451
afac6d04
RH
452/*
453 *** SVE Integer Arithmetic - Unary Predicated Group
454 */
455
456static bool do_zpz_ool(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3 *fn)
457{
458 if (fn == NULL) {
459 return false;
460 }
461 if (sve_access_check(s)) {
96a461f7 462 gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, 0);
afac6d04
RH
463 }
464 return true;
465}
466
467#define DO_ZPZ(NAME, name) \
3a7be554 468static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
afac6d04
RH
469{ \
470 static gen_helper_gvec_3 * const fns[4] = { \
471 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
472 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
473 }; \
474 return do_zpz_ool(s, a, fns[a->esz]); \
475}
476
477DO_ZPZ(CLS, cls)
478DO_ZPZ(CLZ, clz)
479DO_ZPZ(CNT_zpz, cnt_zpz)
480DO_ZPZ(CNOT, cnot)
481DO_ZPZ(NOT_zpz, not_zpz)
482DO_ZPZ(ABS, abs)
483DO_ZPZ(NEG, neg)
484
3a7be554 485static bool trans_FABS(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
486{
487 static gen_helper_gvec_3 * const fns[4] = {
488 NULL,
489 gen_helper_sve_fabs_h,
490 gen_helper_sve_fabs_s,
491 gen_helper_sve_fabs_d
492 };
493 return do_zpz_ool(s, a, fns[a->esz]);
494}
495
3a7be554 496static bool trans_FNEG(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
497{
498 static gen_helper_gvec_3 * const fns[4] = {
499 NULL,
500 gen_helper_sve_fneg_h,
501 gen_helper_sve_fneg_s,
502 gen_helper_sve_fneg_d
503 };
504 return do_zpz_ool(s, a, fns[a->esz]);
505}
506
3a7be554 507static bool trans_SXTB(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
508{
509 static gen_helper_gvec_3 * const fns[4] = {
510 NULL,
511 gen_helper_sve_sxtb_h,
512 gen_helper_sve_sxtb_s,
513 gen_helper_sve_sxtb_d
514 };
515 return do_zpz_ool(s, a, fns[a->esz]);
516}
517
3a7be554 518static bool trans_UXTB(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
519{
520 static gen_helper_gvec_3 * const fns[4] = {
521 NULL,
522 gen_helper_sve_uxtb_h,
523 gen_helper_sve_uxtb_s,
524 gen_helper_sve_uxtb_d
525 };
526 return do_zpz_ool(s, a, fns[a->esz]);
527}
528
3a7be554 529static bool trans_SXTH(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
530{
531 static gen_helper_gvec_3 * const fns[4] = {
532 NULL, NULL,
533 gen_helper_sve_sxth_s,
534 gen_helper_sve_sxth_d
535 };
536 return do_zpz_ool(s, a, fns[a->esz]);
537}
538
3a7be554 539static bool trans_UXTH(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
540{
541 static gen_helper_gvec_3 * const fns[4] = {
542 NULL, NULL,
543 gen_helper_sve_uxth_s,
544 gen_helper_sve_uxth_d
545 };
546 return do_zpz_ool(s, a, fns[a->esz]);
547}
548
3a7be554 549static bool trans_SXTW(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
550{
551 return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_sxtw_d : NULL);
552}
553
3a7be554 554static bool trans_UXTW(DisasContext *s, arg_rpr_esz *a)
afac6d04
RH
555{
556 return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_uxtw_d : NULL);
557}
558
559#undef DO_ZPZ
560
047cec97
RH
561/*
562 *** SVE Integer Reduction Group
563 */
564
565typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
566static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
567 gen_helper_gvec_reduc *fn)
568{
569 unsigned vsz = vec_full_reg_size(s);
570 TCGv_ptr t_zn, t_pg;
571 TCGv_i32 desc;
572 TCGv_i64 temp;
573
574 if (fn == NULL) {
575 return false;
576 }
577 if (!sve_access_check(s)) {
578 return true;
579 }
580
581 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
582 temp = tcg_temp_new_i64();
583 t_zn = tcg_temp_new_ptr();
584 t_pg = tcg_temp_new_ptr();
585
586 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
587 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
588 fn(temp, t_zn, t_pg, desc);
589 tcg_temp_free_ptr(t_zn);
590 tcg_temp_free_ptr(t_pg);
591 tcg_temp_free_i32(desc);
592
593 write_fp_dreg(s, a->rd, temp);
594 tcg_temp_free_i64(temp);
595 return true;
596}
597
598#define DO_VPZ(NAME, name) \
3a7be554 599static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
047cec97
RH
600{ \
601 static gen_helper_gvec_reduc * const fns[4] = { \
602 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
603 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
604 }; \
605 return do_vpz_ool(s, a, fns[a->esz]); \
606}
607
608DO_VPZ(ORV, orv)
609DO_VPZ(ANDV, andv)
610DO_VPZ(EORV, eorv)
611
612DO_VPZ(UADDV, uaddv)
613DO_VPZ(SMAXV, smaxv)
614DO_VPZ(UMAXV, umaxv)
615DO_VPZ(SMINV, sminv)
616DO_VPZ(UMINV, uminv)
617
3a7be554 618static bool trans_SADDV(DisasContext *s, arg_rpr_esz *a)
047cec97
RH
619{
620 static gen_helper_gvec_reduc * const fns[4] = {
621 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
622 gen_helper_sve_saddv_s, NULL
623 };
624 return do_vpz_ool(s, a, fns[a->esz]);
625}
626
627#undef DO_VPZ
628
ccd841c3
RH
629/*
630 *** SVE Shift by Immediate - Predicated Group
631 */
632
60245996
RH
633/*
634 * Copy Zn into Zd, storing zeros into inactive elements.
635 * If invert, store zeros into the active elements.
ccd841c3 636 */
60245996
RH
637static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
638 int esz, bool invert)
ccd841c3 639{
60245996
RH
640 static gen_helper_gvec_3 * const fns[4] = {
641 gen_helper_sve_movz_b, gen_helper_sve_movz_h,
642 gen_helper_sve_movz_s, gen_helper_sve_movz_d,
ccd841c3 643 };
60245996 644
ccd841c3 645 if (sve_access_check(s)) {
96a461f7 646 gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
ccd841c3
RH
647 }
648 return true;
649}
650
651static bool do_zpzi_ool(DisasContext *s, arg_rpri_esz *a,
652 gen_helper_gvec_3 *fn)
653{
654 if (sve_access_check(s)) {
96a461f7 655 gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
ccd841c3
RH
656 }
657 return true;
658}
659
3a7be554 660static bool trans_ASR_zpzi(DisasContext *s, arg_rpri_esz *a)
ccd841c3
RH
661{
662 static gen_helper_gvec_3 * const fns[4] = {
663 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h,
664 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d,
665 };
666 if (a->esz < 0) {
667 /* Invalid tsz encoding -- see tszimm_esz. */
668 return false;
669 }
670 /* Shift by element size is architecturally valid. For
671 arithmetic right-shift, it's the same as by one less. */
672 a->imm = MIN(a->imm, (8 << a->esz) - 1);
673 return do_zpzi_ool(s, a, fns[a->esz]);
674}
675
3a7be554 676static bool trans_LSR_zpzi(DisasContext *s, arg_rpri_esz *a)
ccd841c3
RH
677{
678 static gen_helper_gvec_3 * const fns[4] = {
679 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h,
680 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d,
681 };
682 if (a->esz < 0) {
683 return false;
684 }
685 /* Shift by element size is architecturally valid.
686 For logical shifts, it is a zeroing operation. */
687 if (a->imm >= (8 << a->esz)) {
60245996 688 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
ccd841c3
RH
689 } else {
690 return do_zpzi_ool(s, a, fns[a->esz]);
691 }
692}
693
3a7be554 694static bool trans_LSL_zpzi(DisasContext *s, arg_rpri_esz *a)
ccd841c3
RH
695{
696 static gen_helper_gvec_3 * const fns[4] = {
697 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h,
698 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d,
699 };
700 if (a->esz < 0) {
701 return false;
702 }
703 /* Shift by element size is architecturally valid.
704 For logical shifts, it is a zeroing operation. */
705 if (a->imm >= (8 << a->esz)) {
60245996 706 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
ccd841c3
RH
707 } else {
708 return do_zpzi_ool(s, a, fns[a->esz]);
709 }
710}
711
3a7be554 712static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a)
ccd841c3
RH
713{
714 static gen_helper_gvec_3 * const fns[4] = {
715 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h,
716 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d,
717 };
718 if (a->esz < 0) {
719 return false;
720 }
721 /* Shift by element size is architecturally valid. For arithmetic
722 right shift for division, it is a zeroing operation. */
723 if (a->imm >= (8 << a->esz)) {
60245996 724 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
ccd841c3
RH
725 } else {
726 return do_zpzi_ool(s, a, fns[a->esz]);
727 }
728}
729
fe7f8dfb
RH
730/*
731 *** SVE Bitwise Shift - Predicated Group
732 */
733
734#define DO_ZPZW(NAME, name) \
3a7be554 735static bool trans_##NAME##_zpzw(DisasContext *s, arg_rprr_esz *a) \
fe7f8dfb
RH
736{ \
737 static gen_helper_gvec_4 * const fns[3] = { \
738 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
739 gen_helper_sve_##name##_zpzw_s, \
740 }; \
741 if (a->esz < 0 || a->esz >= 3) { \
742 return false; \
743 } \
744 return do_zpzz_ool(s, a, fns[a->esz]); \
745}
746
747DO_ZPZW(ASR, asr)
748DO_ZPZW(LSR, lsr)
749DO_ZPZW(LSL, lsl)
750
751#undef DO_ZPZW
752
d9d78dcc
RH
753/*
754 *** SVE Bitwise Shift - Unpredicated Group
755 */
756
757static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
758 void (*gvec_fn)(unsigned, uint32_t, uint32_t,
759 int64_t, uint32_t, uint32_t))
760{
761 if (a->esz < 0) {
762 /* Invalid tsz encoding -- see tszimm_esz. */
763 return false;
764 }
765 if (sve_access_check(s)) {
766 unsigned vsz = vec_full_reg_size(s);
767 /* Shift by element size is architecturally valid. For
768 arithmetic right-shift, it's the same as by one less.
769 Otherwise it is a zeroing operation. */
770 if (a->imm >= 8 << a->esz) {
771 if (asr) {
772 a->imm = (8 << a->esz) - 1;
773 } else {
774 do_dupi_z(s, a->rd, 0);
775 return true;
776 }
777 }
778 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
779 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
780 }
781 return true;
782}
783
3a7be554 784static bool trans_ASR_zzi(DisasContext *s, arg_rri_esz *a)
d9d78dcc
RH
785{
786 return do_shift_imm(s, a, true, tcg_gen_gvec_sari);
787}
788
3a7be554 789static bool trans_LSR_zzi(DisasContext *s, arg_rri_esz *a)
d9d78dcc
RH
790{
791 return do_shift_imm(s, a, false, tcg_gen_gvec_shri);
792}
793
3a7be554 794static bool trans_LSL_zzi(DisasContext *s, arg_rri_esz *a)
d9d78dcc
RH
795{
796 return do_shift_imm(s, a, false, tcg_gen_gvec_shli);
797}
798
799static bool do_zzw_ool(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn)
800{
801 if (fn == NULL) {
802 return false;
803 }
804 if (sve_access_check(s)) {
e645d1a1 805 gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
d9d78dcc
RH
806 }
807 return true;
808}
809
810#define DO_ZZW(NAME, name) \
3a7be554 811static bool trans_##NAME##_zzw(DisasContext *s, arg_rrr_esz *a) \
d9d78dcc
RH
812{ \
813 static gen_helper_gvec_3 * const fns[4] = { \
814 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
815 gen_helper_sve_##name##_zzw_s, NULL \
816 }; \
817 return do_zzw_ool(s, a, fns[a->esz]); \
818}
819
820DO_ZZW(ASR, asr)
821DO_ZZW(LSR, lsr)
822DO_ZZW(LSL, lsl)
823
824#undef DO_ZZW
825
96a36e4a
RH
826/*
827 *** SVE Integer Multiply-Add Group
828 */
829
830static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
831 gen_helper_gvec_5 *fn)
832{
833 if (sve_access_check(s)) {
834 unsigned vsz = vec_full_reg_size(s);
835 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
836 vec_full_reg_offset(s, a->ra),
837 vec_full_reg_offset(s, a->rn),
838 vec_full_reg_offset(s, a->rm),
839 pred_full_reg_offset(s, a->pg),
840 vsz, vsz, 0, fn);
841 }
842 return true;
843}
844
845#define DO_ZPZZZ(NAME, name) \
3a7be554 846static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
96a36e4a
RH
847{ \
848 static gen_helper_gvec_5 * const fns[4] = { \
849 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
850 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
851 }; \
852 return do_zpzzz_ool(s, a, fns[a->esz]); \
853}
854
855DO_ZPZZZ(MLA, mla)
856DO_ZPZZZ(MLS, mls)
857
858#undef DO_ZPZZZ
859
9a56c9c3
RH
860/*
861 *** SVE Index Generation Group
862 */
863
864static void do_index(DisasContext *s, int esz, int rd,
865 TCGv_i64 start, TCGv_i64 incr)
866{
867 unsigned vsz = vec_full_reg_size(s);
868 TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
869 TCGv_ptr t_zd = tcg_temp_new_ptr();
870
871 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
872 if (esz == 3) {
873 gen_helper_sve_index_d(t_zd, start, incr, desc);
874 } else {
875 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
876 static index_fn * const fns[3] = {
877 gen_helper_sve_index_b,
878 gen_helper_sve_index_h,
879 gen_helper_sve_index_s,
880 };
881 TCGv_i32 s32 = tcg_temp_new_i32();
882 TCGv_i32 i32 = tcg_temp_new_i32();
883
884 tcg_gen_extrl_i64_i32(s32, start);
885 tcg_gen_extrl_i64_i32(i32, incr);
886 fns[esz](t_zd, s32, i32, desc);
887
888 tcg_temp_free_i32(s32);
889 tcg_temp_free_i32(i32);
890 }
891 tcg_temp_free_ptr(t_zd);
892 tcg_temp_free_i32(desc);
893}
894
3a7be554 895static bool trans_INDEX_ii(DisasContext *s, arg_INDEX_ii *a)
9a56c9c3
RH
896{
897 if (sve_access_check(s)) {
898 TCGv_i64 start = tcg_const_i64(a->imm1);
899 TCGv_i64 incr = tcg_const_i64(a->imm2);
900 do_index(s, a->esz, a->rd, start, incr);
901 tcg_temp_free_i64(start);
902 tcg_temp_free_i64(incr);
903 }
904 return true;
905}
906
3a7be554 907static bool trans_INDEX_ir(DisasContext *s, arg_INDEX_ir *a)
9a56c9c3
RH
908{
909 if (sve_access_check(s)) {
910 TCGv_i64 start = tcg_const_i64(a->imm);
911 TCGv_i64 incr = cpu_reg(s, a->rm);
912 do_index(s, a->esz, a->rd, start, incr);
913 tcg_temp_free_i64(start);
914 }
915 return true;
916}
917
3a7be554 918static bool trans_INDEX_ri(DisasContext *s, arg_INDEX_ri *a)
9a56c9c3
RH
919{
920 if (sve_access_check(s)) {
921 TCGv_i64 start = cpu_reg(s, a->rn);
922 TCGv_i64 incr = tcg_const_i64(a->imm);
923 do_index(s, a->esz, a->rd, start, incr);
924 tcg_temp_free_i64(incr);
925 }
926 return true;
927}
928
3a7be554 929static bool trans_INDEX_rr(DisasContext *s, arg_INDEX_rr *a)
9a56c9c3
RH
930{
931 if (sve_access_check(s)) {
932 TCGv_i64 start = cpu_reg(s, a->rn);
933 TCGv_i64 incr = cpu_reg(s, a->rm);
934 do_index(s, a->esz, a->rd, start, incr);
935 }
936 return true;
937}
938
96f922cc
RH
939/*
940 *** SVE Stack Allocation Group
941 */
942
3a7be554 943static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
96f922cc 944{
5de56742
AC
945 if (sve_access_check(s)) {
946 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
947 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
948 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
949 }
96f922cc
RH
950 return true;
951}
952
3a7be554 953static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
96f922cc 954{
5de56742
AC
955 if (sve_access_check(s)) {
956 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
957 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
958 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
959 }
96f922cc
RH
960 return true;
961}
962
3a7be554 963static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
96f922cc 964{
5de56742
AC
965 if (sve_access_check(s)) {
966 TCGv_i64 reg = cpu_reg(s, a->rd);
967 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
968 }
96f922cc
RH
969 return true;
970}
971
4b242d9c
RH
972/*
973 *** SVE Compute Vector Address Group
974 */
975
976static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
977{
978 if (sve_access_check(s)) {
e645d1a1 979 gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
4b242d9c
RH
980 }
981 return true;
982}
983
3a7be554 984static bool trans_ADR_p32(DisasContext *s, arg_rrri *a)
4b242d9c
RH
985{
986 return do_adr(s, a, gen_helper_sve_adr_p32);
987}
988
3a7be554 989static bool trans_ADR_p64(DisasContext *s, arg_rrri *a)
4b242d9c
RH
990{
991 return do_adr(s, a, gen_helper_sve_adr_p64);
992}
993
3a7be554 994static bool trans_ADR_s32(DisasContext *s, arg_rrri *a)
4b242d9c
RH
995{
996 return do_adr(s, a, gen_helper_sve_adr_s32);
997}
998
3a7be554 999static bool trans_ADR_u32(DisasContext *s, arg_rrri *a)
4b242d9c
RH
1000{
1001 return do_adr(s, a, gen_helper_sve_adr_u32);
1002}
1003
0762cd42
RH
1004/*
1005 *** SVE Integer Misc - Unpredicated Group
1006 */
1007
3a7be554 1008static bool trans_FEXPA(DisasContext *s, arg_rr_esz *a)
0762cd42
RH
1009{
1010 static gen_helper_gvec_2 * const fns[4] = {
1011 NULL,
1012 gen_helper_sve_fexpa_h,
1013 gen_helper_sve_fexpa_s,
1014 gen_helper_sve_fexpa_d,
1015 };
1016 if (a->esz == 0) {
1017 return false;
1018 }
1019 if (sve_access_check(s)) {
40e32e5a 1020 gen_gvec_ool_zz(s, fns[a->esz], a->rd, a->rn, 0);
0762cd42
RH
1021 }
1022 return true;
1023}
1024
3a7be554 1025static bool trans_FTSSEL(DisasContext *s, arg_rrr_esz *a)
a1f233f2
RH
1026{
1027 static gen_helper_gvec_3 * const fns[4] = {
1028 NULL,
1029 gen_helper_sve_ftssel_h,
1030 gen_helper_sve_ftssel_s,
1031 gen_helper_sve_ftssel_d,
1032 };
1033 if (a->esz == 0) {
1034 return false;
1035 }
1036 if (sve_access_check(s)) {
e645d1a1 1037 gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
a1f233f2
RH
1038 }
1039 return true;
1040}
1041
516e246a
RH
1042/*
1043 *** SVE Predicate Logical Operations Group
1044 */
1045
1046static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
1047 const GVecGen4 *gvec_op)
1048{
1049 if (!sve_access_check(s)) {
1050 return true;
1051 }
1052
1053 unsigned psz = pred_gvec_reg_size(s);
1054 int dofs = pred_full_reg_offset(s, a->rd);
1055 int nofs = pred_full_reg_offset(s, a->rn);
1056 int mofs = pred_full_reg_offset(s, a->rm);
1057 int gofs = pred_full_reg_offset(s, a->pg);
1058
dd81a8d7
RH
1059 if (!a->s) {
1060 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1061 return true;
1062 }
1063
516e246a
RH
1064 if (psz == 8) {
1065 /* Do the operation and the flags generation in temps. */
1066 TCGv_i64 pd = tcg_temp_new_i64();
1067 TCGv_i64 pn = tcg_temp_new_i64();
1068 TCGv_i64 pm = tcg_temp_new_i64();
1069 TCGv_i64 pg = tcg_temp_new_i64();
1070
1071 tcg_gen_ld_i64(pn, cpu_env, nofs);
1072 tcg_gen_ld_i64(pm, cpu_env, mofs);
1073 tcg_gen_ld_i64(pg, cpu_env, gofs);
1074
1075 gvec_op->fni8(pd, pn, pm, pg);
1076 tcg_gen_st_i64(pd, cpu_env, dofs);
1077
1078 do_predtest1(pd, pg);
1079
1080 tcg_temp_free_i64(pd);
1081 tcg_temp_free_i64(pn);
1082 tcg_temp_free_i64(pm);
1083 tcg_temp_free_i64(pg);
1084 } else {
1085 /* The operation and flags generation is large. The computation
1086 * of the flags depends on the original contents of the guarding
1087 * predicate. If the destination overwrites the guarding predicate,
1088 * then the easiest way to get this right is to save a copy.
1089 */
1090 int tofs = gofs;
1091 if (a->rd == a->pg) {
1092 tofs = offsetof(CPUARMState, vfp.preg_tmp);
1093 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
1094 }
1095
1096 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1097 do_predtest(s, dofs, tofs, psz / 8);
1098 }
1099 return true;
1100}
1101
1102static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1103{
1104 tcg_gen_and_i64(pd, pn, pm);
1105 tcg_gen_and_i64(pd, pd, pg);
1106}
1107
1108static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1109 TCGv_vec pm, TCGv_vec pg)
1110{
1111 tcg_gen_and_vec(vece, pd, pn, pm);
1112 tcg_gen_and_vec(vece, pd, pd, pg);
1113}
1114
3a7be554 1115static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1116{
1117 static const GVecGen4 op = {
1118 .fni8 = gen_and_pg_i64,
1119 .fniv = gen_and_pg_vec,
1120 .fno = gen_helper_sve_and_pppp,
1121 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1122 };
dd81a8d7
RH
1123
1124 if (!a->s) {
1125 if (!sve_access_check(s)) {
1126 return true;
1127 }
1128 if (a->rn == a->rm) {
1129 if (a->pg == a->rn) {
1130 do_mov_p(s, a->rd, a->rn);
1131 } else {
1132 gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
1133 }
1134 return true;
1135 } else if (a->pg == a->rn || a->pg == a->rm) {
1136 gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
1137 return true;
516e246a 1138 }
516e246a 1139 }
dd81a8d7 1140 return do_pppp_flags(s, a, &op);
516e246a
RH
1141}
1142
1143static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1144{
1145 tcg_gen_andc_i64(pd, pn, pm);
1146 tcg_gen_and_i64(pd, pd, pg);
1147}
1148
1149static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1150 TCGv_vec pm, TCGv_vec pg)
1151{
1152 tcg_gen_andc_vec(vece, pd, pn, pm);
1153 tcg_gen_and_vec(vece, pd, pd, pg);
1154}
1155
3a7be554 1156static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1157{
1158 static const GVecGen4 op = {
1159 .fni8 = gen_bic_pg_i64,
1160 .fniv = gen_bic_pg_vec,
1161 .fno = gen_helper_sve_bic_pppp,
1162 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1163 };
dd81a8d7
RH
1164
1165 if (!a->s && a->pg == a->rn) {
1166 if (sve_access_check(s)) {
1167 gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
1168 }
1169 return true;
516e246a 1170 }
dd81a8d7 1171 return do_pppp_flags(s, a, &op);
516e246a
RH
1172}
1173
1174static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1175{
1176 tcg_gen_xor_i64(pd, pn, pm);
1177 tcg_gen_and_i64(pd, pd, pg);
1178}
1179
1180static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1181 TCGv_vec pm, TCGv_vec pg)
1182{
1183 tcg_gen_xor_vec(vece, pd, pn, pm);
1184 tcg_gen_and_vec(vece, pd, pd, pg);
1185}
1186
3a7be554 1187static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1188{
1189 static const GVecGen4 op = {
1190 .fni8 = gen_eor_pg_i64,
1191 .fniv = gen_eor_pg_vec,
1192 .fno = gen_helper_sve_eor_pppp,
1193 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1194 };
dd81a8d7 1195 return do_pppp_flags(s, a, &op);
516e246a
RH
1196}
1197
3a7be554 1198static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
516e246a 1199{
516e246a
RH
1200 if (a->s) {
1201 return false;
516e246a 1202 }
d4bc6232
RH
1203 if (sve_access_check(s)) {
1204 unsigned psz = pred_gvec_reg_size(s);
1205 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
1206 pred_full_reg_offset(s, a->pg),
1207 pred_full_reg_offset(s, a->rn),
1208 pred_full_reg_offset(s, a->rm), psz, psz);
1209 }
1210 return true;
516e246a
RH
1211}
1212
1213static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1214{
1215 tcg_gen_or_i64(pd, pn, pm);
1216 tcg_gen_and_i64(pd, pd, pg);
1217}
1218
1219static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1220 TCGv_vec pm, TCGv_vec pg)
1221{
1222 tcg_gen_or_vec(vece, pd, pn, pm);
1223 tcg_gen_and_vec(vece, pd, pd, pg);
1224}
1225
3a7be554 1226static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1227{
1228 static const GVecGen4 op = {
1229 .fni8 = gen_orr_pg_i64,
1230 .fniv = gen_orr_pg_vec,
1231 .fno = gen_helper_sve_orr_pppp,
1232 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1233 };
dd81a8d7
RH
1234
1235 if (!a->s && a->pg == a->rn && a->rn == a->rm) {
516e246a 1236 return do_mov_p(s, a->rd, a->rn);
516e246a 1237 }
dd81a8d7 1238 return do_pppp_flags(s, a, &op);
516e246a
RH
1239}
1240
1241static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1242{
1243 tcg_gen_orc_i64(pd, pn, pm);
1244 tcg_gen_and_i64(pd, pd, pg);
1245}
1246
1247static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1248 TCGv_vec pm, TCGv_vec pg)
1249{
1250 tcg_gen_orc_vec(vece, pd, pn, pm);
1251 tcg_gen_and_vec(vece, pd, pd, pg);
1252}
1253
3a7be554 1254static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1255{
1256 static const GVecGen4 op = {
1257 .fni8 = gen_orn_pg_i64,
1258 .fniv = gen_orn_pg_vec,
1259 .fno = gen_helper_sve_orn_pppp,
1260 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1261 };
dd81a8d7 1262 return do_pppp_flags(s, a, &op);
516e246a
RH
1263}
1264
1265static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1266{
1267 tcg_gen_or_i64(pd, pn, pm);
1268 tcg_gen_andc_i64(pd, pg, pd);
1269}
1270
1271static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1272 TCGv_vec pm, TCGv_vec pg)
1273{
1274 tcg_gen_or_vec(vece, pd, pn, pm);
1275 tcg_gen_andc_vec(vece, pd, pg, pd);
1276}
1277
3a7be554 1278static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1279{
1280 static const GVecGen4 op = {
1281 .fni8 = gen_nor_pg_i64,
1282 .fniv = gen_nor_pg_vec,
1283 .fno = gen_helper_sve_nor_pppp,
1284 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1285 };
dd81a8d7 1286 return do_pppp_flags(s, a, &op);
516e246a
RH
1287}
1288
1289static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1290{
1291 tcg_gen_and_i64(pd, pn, pm);
1292 tcg_gen_andc_i64(pd, pg, pd);
1293}
1294
1295static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1296 TCGv_vec pm, TCGv_vec pg)
1297{
1298 tcg_gen_and_vec(vece, pd, pn, pm);
1299 tcg_gen_andc_vec(vece, pd, pg, pd);
1300}
1301
3a7be554 1302static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
516e246a
RH
1303{
1304 static const GVecGen4 op = {
1305 .fni8 = gen_nand_pg_i64,
1306 .fniv = gen_nand_pg_vec,
1307 .fno = gen_helper_sve_nand_pppp,
1308 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1309 };
dd81a8d7 1310 return do_pppp_flags(s, a, &op);
516e246a
RH
1311}
1312
9e18d7a6
RH
1313/*
1314 *** SVE Predicate Misc Group
1315 */
1316
3a7be554 1317static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
9e18d7a6
RH
1318{
1319 if (sve_access_check(s)) {
1320 int nofs = pred_full_reg_offset(s, a->rn);
1321 int gofs = pred_full_reg_offset(s, a->pg);
1322 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
1323
1324 if (words == 1) {
1325 TCGv_i64 pn = tcg_temp_new_i64();
1326 TCGv_i64 pg = tcg_temp_new_i64();
1327
1328 tcg_gen_ld_i64(pn, cpu_env, nofs);
1329 tcg_gen_ld_i64(pg, cpu_env, gofs);
1330 do_predtest1(pn, pg);
1331
1332 tcg_temp_free_i64(pn);
1333 tcg_temp_free_i64(pg);
1334 } else {
1335 do_predtest(s, nofs, gofs, words);
1336 }
1337 }
1338 return true;
1339}
1340
028e2a7b
RH
1341/* See the ARM pseudocode DecodePredCount. */
1342static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
1343{
1344 unsigned elements = fullsz >> esz;
1345 unsigned bound;
1346
1347 switch (pattern) {
1348 case 0x0: /* POW2 */
1349 return pow2floor(elements);
1350 case 0x1: /* VL1 */
1351 case 0x2: /* VL2 */
1352 case 0x3: /* VL3 */
1353 case 0x4: /* VL4 */
1354 case 0x5: /* VL5 */
1355 case 0x6: /* VL6 */
1356 case 0x7: /* VL7 */
1357 case 0x8: /* VL8 */
1358 bound = pattern;
1359 break;
1360 case 0x9: /* VL16 */
1361 case 0xa: /* VL32 */
1362 case 0xb: /* VL64 */
1363 case 0xc: /* VL128 */
1364 case 0xd: /* VL256 */
1365 bound = 16 << (pattern - 9);
1366 break;
1367 case 0x1d: /* MUL4 */
1368 return elements - elements % 4;
1369 case 0x1e: /* MUL3 */
1370 return elements - elements % 3;
1371 case 0x1f: /* ALL */
1372 return elements;
1373 default: /* #uimm5 */
1374 return 0;
1375 }
1376 return elements >= bound ? bound : 0;
1377}
1378
1379/* This handles all of the predicate initialization instructions,
1380 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
1381 * so that decode_pred_count returns 0. For SETFFR, we will have
1382 * set RD == 16 == FFR.
1383 */
1384static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
1385{
1386 if (!sve_access_check(s)) {
1387 return true;
1388 }
1389
1390 unsigned fullsz = vec_full_reg_size(s);
1391 unsigned ofs = pred_full_reg_offset(s, rd);
1392 unsigned numelem, setsz, i;
1393 uint64_t word, lastword;
1394 TCGv_i64 t;
1395
1396 numelem = decode_pred_count(fullsz, pat, esz);
1397
1398 /* Determine what we must store into each bit, and how many. */
1399 if (numelem == 0) {
1400 lastword = word = 0;
1401 setsz = fullsz;
1402 } else {
1403 setsz = numelem << esz;
1404 lastword = word = pred_esz_masks[esz];
1405 if (setsz % 64) {
973558a3 1406 lastword &= MAKE_64BIT_MASK(0, setsz % 64);
028e2a7b
RH
1407 }
1408 }
1409
1410 t = tcg_temp_new_i64();
1411 if (fullsz <= 64) {
1412 tcg_gen_movi_i64(t, lastword);
1413 tcg_gen_st_i64(t, cpu_env, ofs);
1414 goto done;
1415 }
1416
1417 if (word == lastword) {
1418 unsigned maxsz = size_for_gvec(fullsz / 8);
1419 unsigned oprsz = size_for_gvec(setsz / 8);
1420
1421 if (oprsz * 8 == setsz) {
8711e71f 1422 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
028e2a7b
RH
1423 goto done;
1424 }
028e2a7b
RH
1425 }
1426
1427 setsz /= 8;
1428 fullsz /= 8;
1429
1430 tcg_gen_movi_i64(t, word);
973558a3 1431 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
028e2a7b
RH
1432 tcg_gen_st_i64(t, cpu_env, ofs + i);
1433 }
1434 if (lastword != word) {
1435 tcg_gen_movi_i64(t, lastword);
1436 tcg_gen_st_i64(t, cpu_env, ofs + i);
1437 i += 8;
1438 }
1439 if (i < fullsz) {
1440 tcg_gen_movi_i64(t, 0);
1441 for (; i < fullsz; i += 8) {
1442 tcg_gen_st_i64(t, cpu_env, ofs + i);
1443 }
1444 }
1445
1446 done:
1447 tcg_temp_free_i64(t);
1448
1449 /* PTRUES */
1450 if (setflag) {
1451 tcg_gen_movi_i32(cpu_NF, -(word != 0));
1452 tcg_gen_movi_i32(cpu_CF, word == 0);
1453 tcg_gen_movi_i32(cpu_VF, 0);
1454 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
1455 }
1456 return true;
1457}
1458
3a7be554 1459static bool trans_PTRUE(DisasContext *s, arg_PTRUE *a)
028e2a7b
RH
1460{
1461 return do_predset(s, a->esz, a->rd, a->pat, a->s);
1462}
1463
3a7be554 1464static bool trans_SETFFR(DisasContext *s, arg_SETFFR *a)
028e2a7b
RH
1465{
1466 /* Note pat == 31 is #all, to set all elements. */
1467 return do_predset(s, 0, FFR_PRED_NUM, 31, false);
1468}
1469
3a7be554 1470static bool trans_PFALSE(DisasContext *s, arg_PFALSE *a)
028e2a7b
RH
1471{
1472 /* Note pat == 32 is #unimp, to set no elements. */
1473 return do_predset(s, 0, a->rd, 32, false);
1474}
1475
3a7be554 1476static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
028e2a7b
RH
1477{
1478 /* The path through do_pppp_flags is complicated enough to want to avoid
1479 * duplication. Frob the arguments into the form of a predicated AND.
1480 */
1481 arg_rprr_s alt_a = {
1482 .rd = a->rd, .pg = a->pg, .s = a->s,
1483 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
1484 };
3a7be554 1485 return trans_AND_pppp(s, &alt_a);
028e2a7b
RH
1486}
1487
3a7be554 1488static bool trans_RDFFR(DisasContext *s, arg_RDFFR *a)
028e2a7b
RH
1489{
1490 return do_mov_p(s, a->rd, FFR_PRED_NUM);
1491}
1492
3a7be554 1493static bool trans_WRFFR(DisasContext *s, arg_WRFFR *a)
028e2a7b
RH
1494{
1495 return do_mov_p(s, FFR_PRED_NUM, a->rn);
1496}
1497
1498static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
1499 void (*gen_fn)(TCGv_i32, TCGv_ptr,
1500 TCGv_ptr, TCGv_i32))
1501{
1502 if (!sve_access_check(s)) {
1503 return true;
1504 }
1505
1506 TCGv_ptr t_pd = tcg_temp_new_ptr();
1507 TCGv_ptr t_pg = tcg_temp_new_ptr();
1508 TCGv_i32 t;
86300b5d 1509 unsigned desc = 0;
028e2a7b 1510
86300b5d
RH
1511 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
1512 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
028e2a7b
RH
1513
1514 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
1515 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
1516 t = tcg_const_i32(desc);
1517
1518 gen_fn(t, t_pd, t_pg, t);
1519 tcg_temp_free_ptr(t_pd);
1520 tcg_temp_free_ptr(t_pg);
1521
1522 do_pred_flags(t);
1523 tcg_temp_free_i32(t);
1524 return true;
1525}
1526
3a7be554 1527static bool trans_PFIRST(DisasContext *s, arg_rr_esz *a)
028e2a7b
RH
1528{
1529 return do_pfirst_pnext(s, a, gen_helper_sve_pfirst);
1530}
1531
3a7be554 1532static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a)
028e2a7b
RH
1533{
1534 return do_pfirst_pnext(s, a, gen_helper_sve_pnext);
1535}
1536
24e82e68
RH
1537/*
1538 *** SVE Element Count Group
1539 */
1540
1541/* Perform an inline saturating addition of a 32-bit value within
1542 * a 64-bit register. The second operand is known to be positive,
1543 * which halves the comparisions we must perform to bound the result.
1544 */
1545static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1546{
1547 int64_t ibound;
1548 TCGv_i64 bound;
1549 TCGCond cond;
1550
1551 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1552 if (u) {
1553 tcg_gen_ext32u_i64(reg, reg);
1554 } else {
1555 tcg_gen_ext32s_i64(reg, reg);
1556 }
1557 if (d) {
1558 tcg_gen_sub_i64(reg, reg, val);
1559 ibound = (u ? 0 : INT32_MIN);
1560 cond = TCG_COND_LT;
1561 } else {
1562 tcg_gen_add_i64(reg, reg, val);
1563 ibound = (u ? UINT32_MAX : INT32_MAX);
1564 cond = TCG_COND_GT;
1565 }
1566 bound = tcg_const_i64(ibound);
1567 tcg_gen_movcond_i64(cond, reg, reg, bound, bound, reg);
1568 tcg_temp_free_i64(bound);
1569}
1570
1571/* Similarly with 64-bit values. */
1572static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1573{
1574 TCGv_i64 t0 = tcg_temp_new_i64();
1575 TCGv_i64 t1 = tcg_temp_new_i64();
1576 TCGv_i64 t2;
1577
1578 if (u) {
1579 if (d) {
1580 tcg_gen_sub_i64(t0, reg, val);
1581 tcg_gen_movi_i64(t1, 0);
1582 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t1, t0);
1583 } else {
1584 tcg_gen_add_i64(t0, reg, val);
1585 tcg_gen_movi_i64(t1, -1);
1586 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t1, t0);
1587 }
1588 } else {
1589 if (d) {
1590 /* Detect signed overflow for subtraction. */
1591 tcg_gen_xor_i64(t0, reg, val);
1592 tcg_gen_sub_i64(t1, reg, val);
7a31e0c6 1593 tcg_gen_xor_i64(reg, reg, t1);
24e82e68
RH
1594 tcg_gen_and_i64(t0, t0, reg);
1595
1596 /* Bound the result. */
1597 tcg_gen_movi_i64(reg, INT64_MIN);
1598 t2 = tcg_const_i64(0);
1599 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
1600 } else {
1601 /* Detect signed overflow for addition. */
1602 tcg_gen_xor_i64(t0, reg, val);
1603 tcg_gen_add_i64(reg, reg, val);
1604 tcg_gen_xor_i64(t1, reg, val);
1605 tcg_gen_andc_i64(t0, t1, t0);
1606
1607 /* Bound the result. */
1608 tcg_gen_movi_i64(t1, INT64_MAX);
1609 t2 = tcg_const_i64(0);
1610 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
1611 }
1612 tcg_temp_free_i64(t2);
1613 }
1614 tcg_temp_free_i64(t0);
1615 tcg_temp_free_i64(t1);
1616}
1617
1618/* Similarly with a vector and a scalar operand. */
1619static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
1620 TCGv_i64 val, bool u, bool d)
1621{
1622 unsigned vsz = vec_full_reg_size(s);
1623 TCGv_ptr dptr, nptr;
1624 TCGv_i32 t32, desc;
1625 TCGv_i64 t64;
1626
1627 dptr = tcg_temp_new_ptr();
1628 nptr = tcg_temp_new_ptr();
1629 tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
1630 tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
1631 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
1632
1633 switch (esz) {
1634 case MO_8:
1635 t32 = tcg_temp_new_i32();
1636 tcg_gen_extrl_i64_i32(t32, val);
1637 if (d) {
1638 tcg_gen_neg_i32(t32, t32);
1639 }
1640 if (u) {
1641 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc);
1642 } else {
1643 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc);
1644 }
1645 tcg_temp_free_i32(t32);
1646 break;
1647
1648 case MO_16:
1649 t32 = tcg_temp_new_i32();
1650 tcg_gen_extrl_i64_i32(t32, val);
1651 if (d) {
1652 tcg_gen_neg_i32(t32, t32);
1653 }
1654 if (u) {
1655 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc);
1656 } else {
1657 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc);
1658 }
1659 tcg_temp_free_i32(t32);
1660 break;
1661
1662 case MO_32:
1663 t64 = tcg_temp_new_i64();
1664 if (d) {
1665 tcg_gen_neg_i64(t64, val);
1666 } else {
1667 tcg_gen_mov_i64(t64, val);
1668 }
1669 if (u) {
1670 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc);
1671 } else {
1672 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc);
1673 }
1674 tcg_temp_free_i64(t64);
1675 break;
1676
1677 case MO_64:
1678 if (u) {
1679 if (d) {
1680 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc);
1681 } else {
1682 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc);
1683 }
1684 } else if (d) {
1685 t64 = tcg_temp_new_i64();
1686 tcg_gen_neg_i64(t64, val);
1687 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc);
1688 tcg_temp_free_i64(t64);
1689 } else {
1690 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc);
1691 }
1692 break;
1693
1694 default:
1695 g_assert_not_reached();
1696 }
1697
1698 tcg_temp_free_ptr(dptr);
1699 tcg_temp_free_ptr(nptr);
1700 tcg_temp_free_i32(desc);
1701}
1702
3a7be554 1703static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
24e82e68
RH
1704{
1705 if (sve_access_check(s)) {
1706 unsigned fullsz = vec_full_reg_size(s);
1707 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1708 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
1709 }
1710 return true;
1711}
1712
3a7be554 1713static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
24e82e68
RH
1714{
1715 if (sve_access_check(s)) {
1716 unsigned fullsz = vec_full_reg_size(s);
1717 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1718 int inc = numelem * a->imm * (a->d ? -1 : 1);
1719 TCGv_i64 reg = cpu_reg(s, a->rd);
1720
1721 tcg_gen_addi_i64(reg, reg, inc);
1722 }
1723 return true;
1724}
1725
3a7be554 1726static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
24e82e68
RH
1727{
1728 if (!sve_access_check(s)) {
1729 return true;
1730 }
1731
1732 unsigned fullsz = vec_full_reg_size(s);
1733 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1734 int inc = numelem * a->imm;
1735 TCGv_i64 reg = cpu_reg(s, a->rd);
1736
1737 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1738 if (inc == 0) {
1739 if (a->u) {
1740 tcg_gen_ext32u_i64(reg, reg);
1741 } else {
1742 tcg_gen_ext32s_i64(reg, reg);
1743 }
1744 } else {
1745 TCGv_i64 t = tcg_const_i64(inc);
1746 do_sat_addsub_32(reg, t, a->u, a->d);
1747 tcg_temp_free_i64(t);
1748 }
1749 return true;
1750}
1751
3a7be554 1752static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
24e82e68
RH
1753{
1754 if (!sve_access_check(s)) {
1755 return true;
1756 }
1757
1758 unsigned fullsz = vec_full_reg_size(s);
1759 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1760 int inc = numelem * a->imm;
1761 TCGv_i64 reg = cpu_reg(s, a->rd);
1762
1763 if (inc != 0) {
1764 TCGv_i64 t = tcg_const_i64(inc);
1765 do_sat_addsub_64(reg, t, a->u, a->d);
1766 tcg_temp_free_i64(t);
1767 }
1768 return true;
1769}
1770
3a7be554 1771static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
24e82e68
RH
1772{
1773 if (a->esz == 0) {
1774 return false;
1775 }
1776
1777 unsigned fullsz = vec_full_reg_size(s);
1778 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1779 int inc = numelem * a->imm;
1780
1781 if (inc != 0) {
1782 if (sve_access_check(s)) {
1783 TCGv_i64 t = tcg_const_i64(a->d ? -inc : inc);
1784 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
1785 vec_full_reg_offset(s, a->rn),
1786 t, fullsz, fullsz);
1787 tcg_temp_free_i64(t);
1788 }
1789 } else {
1790 do_mov_z(s, a->rd, a->rn);
1791 }
1792 return true;
1793}
1794
3a7be554 1795static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
24e82e68
RH
1796{
1797 if (a->esz == 0) {
1798 return false;
1799 }
1800
1801 unsigned fullsz = vec_full_reg_size(s);
1802 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1803 int inc = numelem * a->imm;
1804
1805 if (inc != 0) {
1806 if (sve_access_check(s)) {
1807 TCGv_i64 t = tcg_const_i64(inc);
1808 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, t, a->u, a->d);
1809 tcg_temp_free_i64(t);
1810 }
1811 } else {
1812 do_mov_z(s, a->rd, a->rn);
1813 }
1814 return true;
1815}
1816
e1fa1164
RH
1817/*
1818 *** SVE Bitwise Immediate Group
1819 */
1820
1821static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
1822{
1823 uint64_t imm;
1824 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
1825 extract32(a->dbm, 0, 6),
1826 extract32(a->dbm, 6, 6))) {
1827 return false;
1828 }
1829 if (sve_access_check(s)) {
1830 unsigned vsz = vec_full_reg_size(s);
1831 gvec_fn(MO_64, vec_full_reg_offset(s, a->rd),
1832 vec_full_reg_offset(s, a->rn), imm, vsz, vsz);
1833 }
1834 return true;
1835}
1836
3a7be554 1837static bool trans_AND_zzi(DisasContext *s, arg_rr_dbm *a)
e1fa1164
RH
1838{
1839 return do_zz_dbm(s, a, tcg_gen_gvec_andi);
1840}
1841
3a7be554 1842static bool trans_ORR_zzi(DisasContext *s, arg_rr_dbm *a)
e1fa1164
RH
1843{
1844 return do_zz_dbm(s, a, tcg_gen_gvec_ori);
1845}
1846
3a7be554 1847static bool trans_EOR_zzi(DisasContext *s, arg_rr_dbm *a)
e1fa1164
RH
1848{
1849 return do_zz_dbm(s, a, tcg_gen_gvec_xori);
1850}
1851
3a7be554 1852static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
e1fa1164
RH
1853{
1854 uint64_t imm;
1855 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
1856 extract32(a->dbm, 0, 6),
1857 extract32(a->dbm, 6, 6))) {
1858 return false;
1859 }
1860 if (sve_access_check(s)) {
1861 do_dupi_z(s, a->rd, imm);
1862 }
1863 return true;
1864}
1865
f25a2361
RH
1866/*
1867 *** SVE Integer Wide Immediate - Predicated Group
1868 */
1869
1870/* Implement all merging copies. This is used for CPY (immediate),
1871 * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
1872 */
1873static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
1874 TCGv_i64 val)
1875{
1876 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
1877 static gen_cpy * const fns[4] = {
1878 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h,
1879 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d,
1880 };
1881 unsigned vsz = vec_full_reg_size(s);
1882 TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
1883 TCGv_ptr t_zd = tcg_temp_new_ptr();
1884 TCGv_ptr t_zn = tcg_temp_new_ptr();
1885 TCGv_ptr t_pg = tcg_temp_new_ptr();
1886
1887 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
1888 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
1889 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
1890
1891 fns[esz](t_zd, t_zn, t_pg, val, desc);
1892
1893 tcg_temp_free_ptr(t_zd);
1894 tcg_temp_free_ptr(t_zn);
1895 tcg_temp_free_ptr(t_pg);
1896 tcg_temp_free_i32(desc);
1897}
1898
3a7be554 1899static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
f25a2361
RH
1900{
1901 if (a->esz == 0) {
1902 return false;
1903 }
1904 if (sve_access_check(s)) {
1905 /* Decode the VFP immediate. */
1906 uint64_t imm = vfp_expand_imm(a->esz, a->imm);
1907 TCGv_i64 t_imm = tcg_const_i64(imm);
1908 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm);
1909 tcg_temp_free_i64(t_imm);
1910 }
1911 return true;
1912}
1913
3a7be554 1914static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
f25a2361 1915{
3a7be554 1916 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
f25a2361
RH
1917 return false;
1918 }
1919 if (sve_access_check(s)) {
1920 TCGv_i64 t_imm = tcg_const_i64(a->imm);
1921 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm);
1922 tcg_temp_free_i64(t_imm);
1923 }
1924 return true;
1925}
1926
3a7be554 1927static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
f25a2361
RH
1928{
1929 static gen_helper_gvec_2i * const fns[4] = {
1930 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h,
1931 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d,
1932 };
1933
3a7be554 1934 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
f25a2361
RH
1935 return false;
1936 }
1937 if (sve_access_check(s)) {
1938 unsigned vsz = vec_full_reg_size(s);
1939 TCGv_i64 t_imm = tcg_const_i64(a->imm);
1940 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
1941 pred_full_reg_offset(s, a->pg),
1942 t_imm, vsz, vsz, 0, fns[a->esz]);
1943 tcg_temp_free_i64(t_imm);
1944 }
1945 return true;
1946}
1947
b94f8f60
RH
1948/*
1949 *** SVE Permute Extract Group
1950 */
1951
3a7be554 1952static bool trans_EXT(DisasContext *s, arg_EXT *a)
b94f8f60
RH
1953{
1954 if (!sve_access_check(s)) {
1955 return true;
1956 }
1957
1958 unsigned vsz = vec_full_reg_size(s);
1959 unsigned n_ofs = a->imm >= vsz ? 0 : a->imm;
1960 unsigned n_siz = vsz - n_ofs;
1961 unsigned d = vec_full_reg_offset(s, a->rd);
1962 unsigned n = vec_full_reg_offset(s, a->rn);
1963 unsigned m = vec_full_reg_offset(s, a->rm);
1964
1965 /* Use host vector move insns if we have appropriate sizes
1966 * and no unfortunate overlap.
1967 */
1968 if (m != d
1969 && n_ofs == size_for_gvec(n_ofs)
1970 && n_siz == size_for_gvec(n_siz)
1971 && (d != n || n_siz <= n_ofs)) {
1972 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz);
1973 if (n_ofs != 0) {
1974 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs);
1975 }
1976 } else {
1977 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext);
1978 }
1979 return true;
1980}
1981
30562ab7
RH
1982/*
1983 *** SVE Permute - Unpredicated Group
1984 */
1985
3a7be554 1986static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
30562ab7
RH
1987{
1988 if (sve_access_check(s)) {
1989 unsigned vsz = vec_full_reg_size(s);
1990 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
1991 vsz, vsz, cpu_reg_sp(s, a->rn));
1992 }
1993 return true;
1994}
1995
3a7be554 1996static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
30562ab7
RH
1997{
1998 if ((a->imm & 0x1f) == 0) {
1999 return false;
2000 }
2001 if (sve_access_check(s)) {
2002 unsigned vsz = vec_full_reg_size(s);
2003 unsigned dofs = vec_full_reg_offset(s, a->rd);
2004 unsigned esz, index;
2005
2006 esz = ctz32(a->imm);
2007 index = a->imm >> (esz + 1);
2008
2009 if ((index << esz) < vsz) {
2010 unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
2011 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
2012 } else {
7e17d50e
RH
2013 /*
2014 * While dup_mem handles 128-bit elements, dup_imm does not.
2015 * Thankfully element size doesn't matter for splatting zero.
2016 */
2017 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
30562ab7
RH
2018 }
2019 }
2020 return true;
2021}
2022
2023static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
2024{
2025 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2026 static gen_insr * const fns[4] = {
2027 gen_helper_sve_insr_b, gen_helper_sve_insr_h,
2028 gen_helper_sve_insr_s, gen_helper_sve_insr_d,
2029 };
2030 unsigned vsz = vec_full_reg_size(s);
2031 TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
2032 TCGv_ptr t_zd = tcg_temp_new_ptr();
2033 TCGv_ptr t_zn = tcg_temp_new_ptr();
2034
2035 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
2036 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
2037
2038 fns[a->esz](t_zd, t_zn, val, desc);
2039
2040 tcg_temp_free_ptr(t_zd);
2041 tcg_temp_free_ptr(t_zn);
2042 tcg_temp_free_i32(desc);
2043}
2044
3a7be554 2045static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
30562ab7
RH
2046{
2047 if (sve_access_check(s)) {
2048 TCGv_i64 t = tcg_temp_new_i64();
2049 tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
2050 do_insr_i64(s, a, t);
2051 tcg_temp_free_i64(t);
2052 }
2053 return true;
2054}
2055
3a7be554 2056static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
30562ab7
RH
2057{
2058 if (sve_access_check(s)) {
2059 do_insr_i64(s, a, cpu_reg(s, a->rm));
2060 }
2061 return true;
2062}
2063
3a7be554 2064static bool trans_REV_v(DisasContext *s, arg_rr_esz *a)
30562ab7
RH
2065{
2066 static gen_helper_gvec_2 * const fns[4] = {
2067 gen_helper_sve_rev_b, gen_helper_sve_rev_h,
2068 gen_helper_sve_rev_s, gen_helper_sve_rev_d
2069 };
2070
2071 if (sve_access_check(s)) {
40e32e5a 2072 gen_gvec_ool_zz(s, fns[a->esz], a->rd, a->rn, 0);
30562ab7
RH
2073 }
2074 return true;
2075}
2076
3a7be554 2077static bool trans_TBL(DisasContext *s, arg_rrr_esz *a)
30562ab7
RH
2078{
2079 static gen_helper_gvec_3 * const fns[4] = {
2080 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h,
2081 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d
2082 };
2083
2084 if (sve_access_check(s)) {
e645d1a1 2085 gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
30562ab7
RH
2086 }
2087 return true;
2088}
2089
3a7be554 2090static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
30562ab7
RH
2091{
2092 static gen_helper_gvec_2 * const fns[4][2] = {
2093 { NULL, NULL },
2094 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h },
2095 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s },
2096 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d },
2097 };
2098
2099 if (a->esz == 0) {
2100 return false;
2101 }
2102 if (sve_access_check(s)) {
2103 unsigned vsz = vec_full_reg_size(s);
2104 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2105 vec_full_reg_offset(s, a->rn)
2106 + (a->h ? vsz / 2 : 0),
2107 vsz, vsz, 0, fns[a->esz][a->u]);
2108 }
2109 return true;
2110}
2111
d731d8cb
RH
2112/*
2113 *** SVE Permute - Predicates Group
2114 */
2115
2116static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
2117 gen_helper_gvec_3 *fn)
2118{
2119 if (!sve_access_check(s)) {
2120 return true;
2121 }
2122
2123 unsigned vsz = pred_full_reg_size(s);
2124
d731d8cb
RH
2125 TCGv_ptr t_d = tcg_temp_new_ptr();
2126 TCGv_ptr t_n = tcg_temp_new_ptr();
2127 TCGv_ptr t_m = tcg_temp_new_ptr();
2128 TCGv_i32 t_desc;
f9b0fcce 2129 uint32_t desc = 0;
d731d8cb 2130
f9b0fcce
RH
2131 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2132 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2133 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
d731d8cb
RH
2134
2135 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2136 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2137 tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
2138 t_desc = tcg_const_i32(desc);
2139
2140 fn(t_d, t_n, t_m, t_desc);
2141
2142 tcg_temp_free_ptr(t_d);
2143 tcg_temp_free_ptr(t_n);
2144 tcg_temp_free_ptr(t_m);
2145 tcg_temp_free_i32(t_desc);
2146 return true;
2147}
2148
2149static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
2150 gen_helper_gvec_2 *fn)
2151{
2152 if (!sve_access_check(s)) {
2153 return true;
2154 }
2155
2156 unsigned vsz = pred_full_reg_size(s);
2157 TCGv_ptr t_d = tcg_temp_new_ptr();
2158 TCGv_ptr t_n = tcg_temp_new_ptr();
2159 TCGv_i32 t_desc;
70acaafe 2160 uint32_t desc = 0;
d731d8cb
RH
2161
2162 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2163 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2164
70acaafe
RH
2165 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2166 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2167 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
d731d8cb
RH
2168 t_desc = tcg_const_i32(desc);
2169
2170 fn(t_d, t_n, t_desc);
2171
2172 tcg_temp_free_i32(t_desc);
2173 tcg_temp_free_ptr(t_d);
2174 tcg_temp_free_ptr(t_n);
2175 return true;
2176}
2177
3a7be554 2178static bool trans_ZIP1_p(DisasContext *s, arg_rrr_esz *a)
d731d8cb
RH
2179{
2180 return do_perm_pred3(s, a, 0, gen_helper_sve_zip_p);
2181}
2182
3a7be554 2183static bool trans_ZIP2_p(DisasContext *s, arg_rrr_esz *a)
d731d8cb
RH
2184{
2185 return do_perm_pred3(s, a, 1, gen_helper_sve_zip_p);
2186}
2187
3a7be554 2188static bool trans_UZP1_p(DisasContext *s, arg_rrr_esz *a)
d731d8cb
RH
2189{
2190 return do_perm_pred3(s, a, 0, gen_helper_sve_uzp_p);
2191}
2192
3a7be554 2193static bool trans_UZP2_p(DisasContext *s, arg_rrr_esz *a)
d731d8cb
RH
2194{
2195 return do_perm_pred3(s, a, 1, gen_helper_sve_uzp_p);
2196}
2197
3a7be554 2198static bool trans_TRN1_p(DisasContext *s, arg_rrr_esz *a)
d731d8cb
RH
2199{
2200 return do_perm_pred3(s, a, 0, gen_helper_sve_trn_p);
2201}
2202
3a7be554 2203static bool trans_TRN2_p(DisasContext *s, arg_rrr_esz *a)
d731d8cb
RH
2204{
2205 return do_perm_pred3(s, a, 1, gen_helper_sve_trn_p);
2206}
2207
3a7be554 2208static bool trans_REV_p(DisasContext *s, arg_rr_esz *a)
d731d8cb
RH
2209{
2210 return do_perm_pred2(s, a, 0, gen_helper_sve_rev_p);
2211}
2212
3a7be554 2213static bool trans_PUNPKLO(DisasContext *s, arg_PUNPKLO *a)
d731d8cb
RH
2214{
2215 return do_perm_pred2(s, a, 0, gen_helper_sve_punpk_p);
2216}
2217
3a7be554 2218static bool trans_PUNPKHI(DisasContext *s, arg_PUNPKHI *a)
d731d8cb
RH
2219{
2220 return do_perm_pred2(s, a, 1, gen_helper_sve_punpk_p);
2221}
2222
234b48e9
RH
2223/*
2224 *** SVE Permute - Interleaving Group
2225 */
2226
2227static bool do_zip(DisasContext *s, arg_rrr_esz *a, bool high)
2228{
2229 static gen_helper_gvec_3 * const fns[4] = {
2230 gen_helper_sve_zip_b, gen_helper_sve_zip_h,
2231 gen_helper_sve_zip_s, gen_helper_sve_zip_d,
2232 };
2233
2234 if (sve_access_check(s)) {
2235 unsigned vsz = vec_full_reg_size(s);
2236 unsigned high_ofs = high ? vsz / 2 : 0;
2237 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
2238 vec_full_reg_offset(s, a->rn) + high_ofs,
2239 vec_full_reg_offset(s, a->rm) + high_ofs,
2240 vsz, vsz, 0, fns[a->esz]);
2241 }
2242 return true;
2243}
2244
2245static bool do_zzz_data_ool(DisasContext *s, arg_rrr_esz *a, int data,
2246 gen_helper_gvec_3 *fn)
2247{
2248 if (sve_access_check(s)) {
e645d1a1 2249 gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
234b48e9
RH
2250 }
2251 return true;
2252}
2253
3a7be554 2254static bool trans_ZIP1_z(DisasContext *s, arg_rrr_esz *a)
234b48e9
RH
2255{
2256 return do_zip(s, a, false);
2257}
2258
3a7be554 2259static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a)
234b48e9
RH
2260{
2261 return do_zip(s, a, true);
2262}
2263
2264static gen_helper_gvec_3 * const uzp_fns[4] = {
2265 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
2266 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
2267};
2268
3a7be554 2269static bool trans_UZP1_z(DisasContext *s, arg_rrr_esz *a)
234b48e9
RH
2270{
2271 return do_zzz_data_ool(s, a, 0, uzp_fns[a->esz]);
2272}
2273
3a7be554 2274static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a)
234b48e9
RH
2275{
2276 return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]);
2277}
2278
2279static gen_helper_gvec_3 * const trn_fns[4] = {
2280 gen_helper_sve_trn_b, gen_helper_sve_trn_h,
2281 gen_helper_sve_trn_s, gen_helper_sve_trn_d,
2282};
2283
3a7be554 2284static bool trans_TRN1_z(DisasContext *s, arg_rrr_esz *a)
234b48e9
RH
2285{
2286 return do_zzz_data_ool(s, a, 0, trn_fns[a->esz]);
2287}
2288
3a7be554 2289static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a)
234b48e9
RH
2290{
2291 return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]);
2292}
2293
3ca879ae
RH
2294/*
2295 *** SVE Permute Vector - Predicated Group
2296 */
2297
3a7be554 2298static bool trans_COMPACT(DisasContext *s, arg_rpr_esz *a)
3ca879ae
RH
2299{
2300 static gen_helper_gvec_3 * const fns[4] = {
2301 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
2302 };
2303 return do_zpz_ool(s, a, fns[a->esz]);
2304}
2305
ef23cb72
RH
2306/* Call the helper that computes the ARM LastActiveElement pseudocode
2307 * function, scaled by the element size. This includes the not found
2308 * indication; e.g. not found for esz=3 is -8.
2309 */
2310static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
2311{
2312 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
2313 * round up, as we do elsewhere, because we need the exact size.
2314 */
2315 TCGv_ptr t_p = tcg_temp_new_ptr();
2316 TCGv_i32 t_desc;
2acbfbe4 2317 unsigned desc = 0;
ef23cb72 2318
2acbfbe4
RH
2319 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
2320 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
ef23cb72
RH
2321
2322 tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
2323 t_desc = tcg_const_i32(desc);
2324
2325 gen_helper_sve_last_active_element(ret, t_p, t_desc);
2326
2327 tcg_temp_free_i32(t_desc);
2328 tcg_temp_free_ptr(t_p);
2329}
2330
2331/* Increment LAST to the offset of the next element in the vector,
2332 * wrapping around to 0.
2333 */
2334static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
2335{
2336 unsigned vsz = vec_full_reg_size(s);
2337
2338 tcg_gen_addi_i32(last, last, 1 << esz);
2339 if (is_power_of_2(vsz)) {
2340 tcg_gen_andi_i32(last, last, vsz - 1);
2341 } else {
2342 TCGv_i32 max = tcg_const_i32(vsz);
2343 TCGv_i32 zero = tcg_const_i32(0);
2344 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last);
2345 tcg_temp_free_i32(max);
2346 tcg_temp_free_i32(zero);
2347 }
2348}
2349
2350/* If LAST < 0, set LAST to the offset of the last element in the vector. */
2351static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
2352{
2353 unsigned vsz = vec_full_reg_size(s);
2354
2355 if (is_power_of_2(vsz)) {
2356 tcg_gen_andi_i32(last, last, vsz - 1);
2357 } else {
2358 TCGv_i32 max = tcg_const_i32(vsz - (1 << esz));
2359 TCGv_i32 zero = tcg_const_i32(0);
2360 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last);
2361 tcg_temp_free_i32(max);
2362 tcg_temp_free_i32(zero);
2363 }
2364}
2365
2366/* Load an unsigned element of ESZ from BASE+OFS. */
2367static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz)
2368{
2369 TCGv_i64 r = tcg_temp_new_i64();
2370
2371 switch (esz) {
2372 case 0:
2373 tcg_gen_ld8u_i64(r, base, ofs);
2374 break;
2375 case 1:
2376 tcg_gen_ld16u_i64(r, base, ofs);
2377 break;
2378 case 2:
2379 tcg_gen_ld32u_i64(r, base, ofs);
2380 break;
2381 case 3:
2382 tcg_gen_ld_i64(r, base, ofs);
2383 break;
2384 default:
2385 g_assert_not_reached();
2386 }
2387 return r;
2388}
2389
2390/* Load an unsigned element of ESZ from RM[LAST]. */
2391static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
2392 int rm, int esz)
2393{
2394 TCGv_ptr p = tcg_temp_new_ptr();
2395 TCGv_i64 r;
2396
2397 /* Convert offset into vector into offset into ENV.
2398 * The final adjustment for the vector register base
2399 * is added via constant offset to the load.
2400 */
2401#ifdef HOST_WORDS_BIGENDIAN
2402 /* Adjust for element ordering. See vec_reg_offset. */
2403 if (esz < 3) {
2404 tcg_gen_xori_i32(last, last, 8 - (1 << esz));
2405 }
2406#endif
2407 tcg_gen_ext_i32_ptr(p, last);
2408 tcg_gen_add_ptr(p, p, cpu_env);
2409
2410 r = load_esz(p, vec_full_reg_offset(s, rm), esz);
2411 tcg_temp_free_ptr(p);
2412
2413 return r;
2414}
2415
2416/* Compute CLAST for a Zreg. */
2417static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
2418{
2419 TCGv_i32 last;
2420 TCGLabel *over;
2421 TCGv_i64 ele;
2422 unsigned vsz, esz = a->esz;
2423
2424 if (!sve_access_check(s)) {
2425 return true;
2426 }
2427
2428 last = tcg_temp_local_new_i32();
2429 over = gen_new_label();
2430
2431 find_last_active(s, last, esz, a->pg);
2432
2433 /* There is of course no movcond for a 2048-bit vector,
2434 * so we must branch over the actual store.
2435 */
2436 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over);
2437
2438 if (!before) {
2439 incr_last_active(s, last, esz);
2440 }
2441
2442 ele = load_last_active(s, last, a->rm, esz);
2443 tcg_temp_free_i32(last);
2444
2445 vsz = vec_full_reg_size(s);
2446 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
2447 tcg_temp_free_i64(ele);
2448
2449 /* If this insn used MOVPRFX, we may need a second move. */
2450 if (a->rd != a->rn) {
2451 TCGLabel *done = gen_new_label();
2452 tcg_gen_br(done);
2453
2454 gen_set_label(over);
2455 do_mov_z(s, a->rd, a->rn);
2456
2457 gen_set_label(done);
2458 } else {
2459 gen_set_label(over);
2460 }
2461 return true;
2462}
2463
3a7be554 2464static bool trans_CLASTA_z(DisasContext *s, arg_rprr_esz *a)
ef23cb72
RH
2465{
2466 return do_clast_vector(s, a, false);
2467}
2468
3a7be554 2469static bool trans_CLASTB_z(DisasContext *s, arg_rprr_esz *a)
ef23cb72
RH
2470{
2471 return do_clast_vector(s, a, true);
2472}
2473
2474/* Compute CLAST for a scalar. */
2475static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
2476 bool before, TCGv_i64 reg_val)
2477{
2478 TCGv_i32 last = tcg_temp_new_i32();
2479 TCGv_i64 ele, cmp, zero;
2480
2481 find_last_active(s, last, esz, pg);
2482
2483 /* Extend the original value of last prior to incrementing. */
2484 cmp = tcg_temp_new_i64();
2485 tcg_gen_ext_i32_i64(cmp, last);
2486
2487 if (!before) {
2488 incr_last_active(s, last, esz);
2489 }
2490
2491 /* The conceit here is that while last < 0 indicates not found, after
2492 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
2493 * from which we can load garbage. We then discard the garbage with
2494 * a conditional move.
2495 */
2496 ele = load_last_active(s, last, rm, esz);
2497 tcg_temp_free_i32(last);
2498
2499 zero = tcg_const_i64(0);
2500 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, zero, ele, reg_val);
2501
2502 tcg_temp_free_i64(zero);
2503 tcg_temp_free_i64(cmp);
2504 tcg_temp_free_i64(ele);
2505}
2506
2507/* Compute CLAST for a Vreg. */
2508static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2509{
2510 if (sve_access_check(s)) {
2511 int esz = a->esz;
2512 int ofs = vec_reg_offset(s, a->rd, 0, esz);
2513 TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
2514
2515 do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
2516 write_fp_dreg(s, a->rd, reg);
2517 tcg_temp_free_i64(reg);
2518 }
2519 return true;
2520}
2521
3a7be554 2522static bool trans_CLASTA_v(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2523{
2524 return do_clast_fp(s, a, false);
2525}
2526
3a7be554 2527static bool trans_CLASTB_v(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2528{
2529 return do_clast_fp(s, a, true);
2530}
2531
2532/* Compute CLAST for a Xreg. */
2533static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
2534{
2535 TCGv_i64 reg;
2536
2537 if (!sve_access_check(s)) {
2538 return true;
2539 }
2540
2541 reg = cpu_reg(s, a->rd);
2542 switch (a->esz) {
2543 case 0:
2544 tcg_gen_ext8u_i64(reg, reg);
2545 break;
2546 case 1:
2547 tcg_gen_ext16u_i64(reg, reg);
2548 break;
2549 case 2:
2550 tcg_gen_ext32u_i64(reg, reg);
2551 break;
2552 case 3:
2553 break;
2554 default:
2555 g_assert_not_reached();
2556 }
2557
2558 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
2559 return true;
2560}
2561
3a7be554 2562static bool trans_CLASTA_r(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2563{
2564 return do_clast_general(s, a, false);
2565}
2566
3a7be554 2567static bool trans_CLASTB_r(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2568{
2569 return do_clast_general(s, a, true);
2570}
2571
2572/* Compute LAST for a scalar. */
2573static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
2574 int pg, int rm, bool before)
2575{
2576 TCGv_i32 last = tcg_temp_new_i32();
2577 TCGv_i64 ret;
2578
2579 find_last_active(s, last, esz, pg);
2580 if (before) {
2581 wrap_last_active(s, last, esz);
2582 } else {
2583 incr_last_active(s, last, esz);
2584 }
2585
2586 ret = load_last_active(s, last, rm, esz);
2587 tcg_temp_free_i32(last);
2588 return ret;
2589}
2590
2591/* Compute LAST for a Vreg. */
2592static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2593{
2594 if (sve_access_check(s)) {
2595 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2596 write_fp_dreg(s, a->rd, val);
2597 tcg_temp_free_i64(val);
2598 }
2599 return true;
2600}
2601
3a7be554 2602static bool trans_LASTA_v(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2603{
2604 return do_last_fp(s, a, false);
2605}
2606
3a7be554 2607static bool trans_LASTB_v(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2608{
2609 return do_last_fp(s, a, true);
2610}
2611
2612/* Compute LAST for a Xreg. */
2613static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
2614{
2615 if (sve_access_check(s)) {
2616 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2617 tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
2618 tcg_temp_free_i64(val);
2619 }
2620 return true;
2621}
2622
3a7be554 2623static bool trans_LASTA_r(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2624{
2625 return do_last_general(s, a, false);
2626}
2627
3a7be554 2628static bool trans_LASTB_r(DisasContext *s, arg_rpr_esz *a)
ef23cb72
RH
2629{
2630 return do_last_general(s, a, true);
2631}
2632
3a7be554 2633static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
792a5578
RH
2634{
2635 if (sve_access_check(s)) {
2636 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
2637 }
2638 return true;
2639}
2640
3a7be554 2641static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
792a5578
RH
2642{
2643 if (sve_access_check(s)) {
2644 int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
2645 TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
2646 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
2647 tcg_temp_free_i64(t);
2648 }
2649 return true;
2650}
2651
3a7be554 2652static bool trans_REVB(DisasContext *s, arg_rpr_esz *a)
dae8fb90
RH
2653{
2654 static gen_helper_gvec_3 * const fns[4] = {
2655 NULL,
2656 gen_helper_sve_revb_h,
2657 gen_helper_sve_revb_s,
2658 gen_helper_sve_revb_d,
2659 };
2660 return do_zpz_ool(s, a, fns[a->esz]);
2661}
2662
3a7be554 2663static bool trans_REVH(DisasContext *s, arg_rpr_esz *a)
dae8fb90
RH
2664{
2665 static gen_helper_gvec_3 * const fns[4] = {
2666 NULL,
2667 NULL,
2668 gen_helper_sve_revh_s,
2669 gen_helper_sve_revh_d,
2670 };
2671 return do_zpz_ool(s, a, fns[a->esz]);
2672}
2673
3a7be554 2674static bool trans_REVW(DisasContext *s, arg_rpr_esz *a)
dae8fb90
RH
2675{
2676 return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_revw_d : NULL);
2677}
2678
3a7be554 2679static bool trans_RBIT(DisasContext *s, arg_rpr_esz *a)
dae8fb90
RH
2680{
2681 static gen_helper_gvec_3 * const fns[4] = {
2682 gen_helper_sve_rbit_b,
2683 gen_helper_sve_rbit_h,
2684 gen_helper_sve_rbit_s,
2685 gen_helper_sve_rbit_d,
2686 };
2687 return do_zpz_ool(s, a, fns[a->esz]);
2688}
2689
3a7be554 2690static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a)
b48ff240
RH
2691{
2692 if (sve_access_check(s)) {
36cbb7a8 2693 gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
dd701faf 2694 a->rd, a->rn, a->rm, a->pg, a->esz);
b48ff240
RH
2695 }
2696 return true;
2697}
2698
757f9cff
RH
2699/*
2700 *** SVE Integer Compare - Vectors Group
2701 */
2702
2703static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
2704 gen_helper_gvec_flags_4 *gen_fn)
2705{
2706 TCGv_ptr pd, zn, zm, pg;
2707 unsigned vsz;
2708 TCGv_i32 t;
2709
2710 if (gen_fn == NULL) {
2711 return false;
2712 }
2713 if (!sve_access_check(s)) {
2714 return true;
2715 }
2716
2717 vsz = vec_full_reg_size(s);
2718 t = tcg_const_i32(simd_desc(vsz, vsz, 0));
2719 pd = tcg_temp_new_ptr();
2720 zn = tcg_temp_new_ptr();
2721 zm = tcg_temp_new_ptr();
2722 pg = tcg_temp_new_ptr();
2723
2724 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2725 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2726 tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
2727 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2728
2729 gen_fn(t, pd, zn, zm, pg, t);
2730
2731 tcg_temp_free_ptr(pd);
2732 tcg_temp_free_ptr(zn);
2733 tcg_temp_free_ptr(zm);
2734 tcg_temp_free_ptr(pg);
2735
2736 do_pred_flags(t);
2737
2738 tcg_temp_free_i32(t);
2739 return true;
2740}
2741
2742#define DO_PPZZ(NAME, name) \
3a7be554 2743static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
757f9cff
RH
2744{ \
2745 static gen_helper_gvec_flags_4 * const fns[4] = { \
2746 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
2747 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
2748 }; \
2749 return do_ppzz_flags(s, a, fns[a->esz]); \
2750}
2751
2752DO_PPZZ(CMPEQ, cmpeq)
2753DO_PPZZ(CMPNE, cmpne)
2754DO_PPZZ(CMPGT, cmpgt)
2755DO_PPZZ(CMPGE, cmpge)
2756DO_PPZZ(CMPHI, cmphi)
2757DO_PPZZ(CMPHS, cmphs)
2758
2759#undef DO_PPZZ
2760
2761#define DO_PPZW(NAME, name) \
3a7be554 2762static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \
757f9cff
RH
2763{ \
2764 static gen_helper_gvec_flags_4 * const fns[4] = { \
2765 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
2766 gen_helper_sve_##name##_ppzw_s, NULL \
2767 }; \
2768 return do_ppzz_flags(s, a, fns[a->esz]); \
2769}
2770
2771DO_PPZW(CMPEQ, cmpeq)
2772DO_PPZW(CMPNE, cmpne)
2773DO_PPZW(CMPGT, cmpgt)
2774DO_PPZW(CMPGE, cmpge)
2775DO_PPZW(CMPHI, cmphi)
2776DO_PPZW(CMPHS, cmphs)
2777DO_PPZW(CMPLT, cmplt)
2778DO_PPZW(CMPLE, cmple)
2779DO_PPZW(CMPLO, cmplo)
2780DO_PPZW(CMPLS, cmpls)
2781
2782#undef DO_PPZW
2783
38cadeba
RH
2784/*
2785 *** SVE Integer Compare - Immediate Groups
2786 */
2787
2788static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
2789 gen_helper_gvec_flags_3 *gen_fn)
2790{
2791 TCGv_ptr pd, zn, pg;
2792 unsigned vsz;
2793 TCGv_i32 t;
2794
2795 if (gen_fn == NULL) {
2796 return false;
2797 }
2798 if (!sve_access_check(s)) {
2799 return true;
2800 }
2801
2802 vsz = vec_full_reg_size(s);
2803 t = tcg_const_i32(simd_desc(vsz, vsz, a->imm));
2804 pd = tcg_temp_new_ptr();
2805 zn = tcg_temp_new_ptr();
2806 pg = tcg_temp_new_ptr();
2807
2808 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2809 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2810 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2811
2812 gen_fn(t, pd, zn, pg, t);
2813
2814 tcg_temp_free_ptr(pd);
2815 tcg_temp_free_ptr(zn);
2816 tcg_temp_free_ptr(pg);
2817
2818 do_pred_flags(t);
2819
2820 tcg_temp_free_i32(t);
2821 return true;
2822}
2823
2824#define DO_PPZI(NAME, name) \
3a7be554 2825static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \
38cadeba
RH
2826{ \
2827 static gen_helper_gvec_flags_3 * const fns[4] = { \
2828 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
2829 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
2830 }; \
2831 return do_ppzi_flags(s, a, fns[a->esz]); \
2832}
2833
2834DO_PPZI(CMPEQ, cmpeq)
2835DO_PPZI(CMPNE, cmpne)
2836DO_PPZI(CMPGT, cmpgt)
2837DO_PPZI(CMPGE, cmpge)
2838DO_PPZI(CMPHI, cmphi)
2839DO_PPZI(CMPHS, cmphs)
2840DO_PPZI(CMPLT, cmplt)
2841DO_PPZI(CMPLE, cmple)
2842DO_PPZI(CMPLO, cmplo)
2843DO_PPZI(CMPLS, cmpls)
2844
2845#undef DO_PPZI
2846
35da316f
RH
2847/*
2848 *** SVE Partition Break Group
2849 */
2850
2851static bool do_brk3(DisasContext *s, arg_rprr_s *a,
2852 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s)
2853{
2854 if (!sve_access_check(s)) {
2855 return true;
2856 }
2857
2858 unsigned vsz = pred_full_reg_size(s);
2859
2860 /* Predicate sizes may be smaller and cannot use simd_desc. */
2861 TCGv_ptr d = tcg_temp_new_ptr();
2862 TCGv_ptr n = tcg_temp_new_ptr();
2863 TCGv_ptr m = tcg_temp_new_ptr();
2864 TCGv_ptr g = tcg_temp_new_ptr();
04c774a2 2865 TCGv_i32 t = tcg_const_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
35da316f
RH
2866
2867 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2868 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2869 tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
2870 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2871
2872 if (a->s) {
2873 fn_s(t, d, n, m, g, t);
2874 do_pred_flags(t);
2875 } else {
2876 fn(d, n, m, g, t);
2877 }
2878 tcg_temp_free_ptr(d);
2879 tcg_temp_free_ptr(n);
2880 tcg_temp_free_ptr(m);
2881 tcg_temp_free_ptr(g);
2882 tcg_temp_free_i32(t);
2883 return true;
2884}
2885
2886static bool do_brk2(DisasContext *s, arg_rpr_s *a,
2887 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s)
2888{
2889 if (!sve_access_check(s)) {
2890 return true;
2891 }
2892
2893 unsigned vsz = pred_full_reg_size(s);
2894
2895 /* Predicate sizes may be smaller and cannot use simd_desc. */
2896 TCGv_ptr d = tcg_temp_new_ptr();
2897 TCGv_ptr n = tcg_temp_new_ptr();
2898 TCGv_ptr g = tcg_temp_new_ptr();
04c774a2 2899 TCGv_i32 t = tcg_const_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
35da316f
RH
2900
2901 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2902 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2903 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2904
2905 if (a->s) {
2906 fn_s(t, d, n, g, t);
2907 do_pred_flags(t);
2908 } else {
2909 fn(d, n, g, t);
2910 }
2911 tcg_temp_free_ptr(d);
2912 tcg_temp_free_ptr(n);
2913 tcg_temp_free_ptr(g);
2914 tcg_temp_free_i32(t);
2915 return true;
2916}
2917
3a7be554 2918static bool trans_BRKPA(DisasContext *s, arg_rprr_s *a)
35da316f
RH
2919{
2920 return do_brk3(s, a, gen_helper_sve_brkpa, gen_helper_sve_brkpas);
2921}
2922
3a7be554 2923static bool trans_BRKPB(DisasContext *s, arg_rprr_s *a)
35da316f
RH
2924{
2925 return do_brk3(s, a, gen_helper_sve_brkpb, gen_helper_sve_brkpbs);
2926}
2927
3a7be554 2928static bool trans_BRKA_m(DisasContext *s, arg_rpr_s *a)
35da316f
RH
2929{
2930 return do_brk2(s, a, gen_helper_sve_brka_m, gen_helper_sve_brkas_m);
2931}
2932
3a7be554 2933static bool trans_BRKB_m(DisasContext *s, arg_rpr_s *a)
35da316f
RH
2934{
2935 return do_brk2(s, a, gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m);
2936}
2937
3a7be554 2938static bool trans_BRKA_z(DisasContext *s, arg_rpr_s *a)
35da316f
RH
2939{
2940 return do_brk2(s, a, gen_helper_sve_brka_z, gen_helper_sve_brkas_z);
2941}
2942
3a7be554 2943static bool trans_BRKB_z(DisasContext *s, arg_rpr_s *a)
35da316f
RH
2944{
2945 return do_brk2(s, a, gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z);
2946}
2947
3a7be554 2948static bool trans_BRKN(DisasContext *s, arg_rpr_s *a)
35da316f
RH
2949{
2950 return do_brk2(s, a, gen_helper_sve_brkn, gen_helper_sve_brkns);
2951}
2952
9ee3a611
RH
2953/*
2954 *** SVE Predicate Count Group
2955 */
2956
2957static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
2958{
2959 unsigned psz = pred_full_reg_size(s);
2960
2961 if (psz <= 8) {
2962 uint64_t psz_mask;
2963
2964 tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
2965 if (pn != pg) {
2966 TCGv_i64 g = tcg_temp_new_i64();
2967 tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
2968 tcg_gen_and_i64(val, val, g);
2969 tcg_temp_free_i64(g);
2970 }
2971
2972 /* Reduce the pred_esz_masks value simply to reduce the
2973 * size of the code generated here.
2974 */
2975 psz_mask = MAKE_64BIT_MASK(0, psz * 8);
2976 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask);
2977
2978 tcg_gen_ctpop_i64(val, val);
2979 } else {
2980 TCGv_ptr t_pn = tcg_temp_new_ptr();
2981 TCGv_ptr t_pg = tcg_temp_new_ptr();
f556a201 2982 unsigned desc = 0;
9ee3a611
RH
2983 TCGv_i32 t_desc;
2984
f556a201
RH
2985 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
2986 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
9ee3a611
RH
2987
2988 tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
2989 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
2990 t_desc = tcg_const_i32(desc);
2991
2992 gen_helper_sve_cntp(val, t_pn, t_pg, t_desc);
2993 tcg_temp_free_ptr(t_pn);
2994 tcg_temp_free_ptr(t_pg);
2995 tcg_temp_free_i32(t_desc);
2996 }
2997}
2998
3a7be554 2999static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
9ee3a611
RH
3000{
3001 if (sve_access_check(s)) {
3002 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
3003 }
3004 return true;
3005}
3006
3a7be554 3007static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
9ee3a611
RH
3008{
3009 if (sve_access_check(s)) {
3010 TCGv_i64 reg = cpu_reg(s, a->rd);
3011 TCGv_i64 val = tcg_temp_new_i64();
3012
3013 do_cntp(s, val, a->esz, a->pg, a->pg);
3014 if (a->d) {
3015 tcg_gen_sub_i64(reg, reg, val);
3016 } else {
3017 tcg_gen_add_i64(reg, reg, val);
3018 }
3019 tcg_temp_free_i64(val);
3020 }
3021 return true;
3022}
3023
3a7be554 3024static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
9ee3a611
RH
3025{
3026 if (a->esz == 0) {
3027 return false;
3028 }
3029 if (sve_access_check(s)) {
3030 unsigned vsz = vec_full_reg_size(s);
3031 TCGv_i64 val = tcg_temp_new_i64();
3032 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds;
3033
3034 do_cntp(s, val, a->esz, a->pg, a->pg);
3035 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
3036 vec_full_reg_offset(s, a->rn), val, vsz, vsz);
3037 }
3038 return true;
3039}
3040
3a7be554 3041static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
9ee3a611
RH
3042{
3043 if (sve_access_check(s)) {
3044 TCGv_i64 reg = cpu_reg(s, a->rd);
3045 TCGv_i64 val = tcg_temp_new_i64();
3046
3047 do_cntp(s, val, a->esz, a->pg, a->pg);
3048 do_sat_addsub_32(reg, val, a->u, a->d);
3049 }
3050 return true;
3051}
3052
3a7be554 3053static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
9ee3a611
RH
3054{
3055 if (sve_access_check(s)) {
3056 TCGv_i64 reg = cpu_reg(s, a->rd);
3057 TCGv_i64 val = tcg_temp_new_i64();
3058
3059 do_cntp(s, val, a->esz, a->pg, a->pg);
3060 do_sat_addsub_64(reg, val, a->u, a->d);
3061 }
3062 return true;
3063}
3064
3a7be554 3065static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
9ee3a611
RH
3066{
3067 if (a->esz == 0) {
3068 return false;
3069 }
3070 if (sve_access_check(s)) {
3071 TCGv_i64 val = tcg_temp_new_i64();
3072 do_cntp(s, val, a->esz, a->pg, a->pg);
3073 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
3074 }
3075 return true;
3076}
3077
caf1cefc
RH
3078/*
3079 *** SVE Integer Compare Scalars Group
3080 */
3081
3a7be554 3082static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
caf1cefc
RH
3083{
3084 if (!sve_access_check(s)) {
3085 return true;
3086 }
3087
3088 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ);
3089 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
3090 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
3091 TCGv_i64 cmp = tcg_temp_new_i64();
3092
3093 tcg_gen_setcond_i64(cond, cmp, rn, rm);
3094 tcg_gen_extrl_i64_i32(cpu_NF, cmp);
3095 tcg_temp_free_i64(cmp);
3096
3097 /* VF = !NF & !CF. */
3098 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1);
3099 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF);
3100
3101 /* Both NF and VF actually look at bit 31. */
3102 tcg_gen_neg_i32(cpu_NF, cpu_NF);
3103 tcg_gen_neg_i32(cpu_VF, cpu_VF);
3104 return true;
3105}
3106
3a7be554 3107static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
caf1cefc 3108{
bbd0968c 3109 TCGv_i64 op0, op1, t0, t1, tmax;
caf1cefc
RH
3110 TCGv_i32 t2, t3;
3111 TCGv_ptr ptr;
e610906c
RH
3112 unsigned vsz = vec_full_reg_size(s);
3113 unsigned desc = 0;
caf1cefc 3114 TCGCond cond;
34688dbc
RH
3115 uint64_t maxval;
3116 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
3117 bool eq = a->eq == a->lt;
caf1cefc 3118
34688dbc
RH
3119 /* The greater-than conditions are all SVE2. */
3120 if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
3121 return false;
3122 }
bbd0968c
RH
3123 if (!sve_access_check(s)) {
3124 return true;
3125 }
3126
3127 op0 = read_cpu_reg(s, a->rn, 1);
3128 op1 = read_cpu_reg(s, a->rm, 1);
3129
caf1cefc
RH
3130 if (!a->sf) {
3131 if (a->u) {
3132 tcg_gen_ext32u_i64(op0, op0);
3133 tcg_gen_ext32u_i64(op1, op1);
3134 } else {
3135 tcg_gen_ext32s_i64(op0, op0);
3136 tcg_gen_ext32s_i64(op1, op1);
3137 }
3138 }
3139
3140 /* For the helper, compress the different conditions into a computation
3141 * of how many iterations for which the condition is true.
caf1cefc 3142 */
bbd0968c
RH
3143 t0 = tcg_temp_new_i64();
3144 t1 = tcg_temp_new_i64();
34688dbc
RH
3145
3146 if (a->lt) {
3147 tcg_gen_sub_i64(t0, op1, op0);
3148 if (a->u) {
3149 maxval = a->sf ? UINT64_MAX : UINT32_MAX;
3150 cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
3151 } else {
3152 maxval = a->sf ? INT64_MAX : INT32_MAX;
3153 cond = eq ? TCG_COND_LE : TCG_COND_LT;
3154 }
3155 } else {
3156 tcg_gen_sub_i64(t0, op0, op1);
3157 if (a->u) {
3158 maxval = 0;
3159 cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
3160 } else {
3161 maxval = a->sf ? INT64_MIN : INT32_MIN;
3162 cond = eq ? TCG_COND_GE : TCG_COND_GT;
3163 }
3164 }
caf1cefc 3165
bbd0968c 3166 tmax = tcg_const_i64(vsz >> a->esz);
34688dbc 3167 if (eq) {
caf1cefc
RH
3168 /* Equality means one more iteration. */
3169 tcg_gen_addi_i64(t0, t0, 1);
bbd0968c 3170
34688dbc
RH
3171 /*
3172 * For the less-than while, if op1 is maxval (and the only time
3173 * the addition above could overflow), then we produce an all-true
3174 * predicate by setting the count to the vector length. This is
3175 * because the pseudocode is described as an increment + compare
3176 * loop, and the maximum integer would always compare true.
3177 * Similarly, the greater-than while has the same issue with the
3178 * minimum integer due to the decrement + compare loop.
bbd0968c 3179 */
34688dbc 3180 tcg_gen_movi_i64(t1, maxval);
bbd0968c 3181 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
caf1cefc
RH
3182 }
3183
bbd0968c
RH
3184 /* Bound to the maximum. */
3185 tcg_gen_umin_i64(t0, t0, tmax);
3186 tcg_temp_free_i64(tmax);
3187
3188 /* Set the count to zero if the condition is false. */
caf1cefc
RH
3189 tcg_gen_movi_i64(t1, 0);
3190 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
bbd0968c 3191 tcg_temp_free_i64(t1);
caf1cefc 3192
bbd0968c 3193 /* Since we're bounded, pass as a 32-bit type. */
caf1cefc
RH
3194 t2 = tcg_temp_new_i32();
3195 tcg_gen_extrl_i64_i32(t2, t0);
3196 tcg_temp_free_i64(t0);
bbd0968c
RH
3197
3198 /* Scale elements to bits. */
3199 tcg_gen_shli_i32(t2, t2, a->esz);
caf1cefc 3200
e610906c
RH
3201 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3202 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
caf1cefc
RH
3203 t3 = tcg_const_i32(desc);
3204
3205 ptr = tcg_temp_new_ptr();
3206 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3207
34688dbc
RH
3208 if (a->lt) {
3209 gen_helper_sve_whilel(t2, ptr, t2, t3);
3210 } else {
3211 gen_helper_sve_whileg(t2, ptr, t2, t3);
3212 }
caf1cefc
RH
3213 do_pred_flags(t2);
3214
3215 tcg_temp_free_ptr(ptr);
3216 tcg_temp_free_i32(t2);
3217 tcg_temp_free_i32(t3);
3218 return true;
3219}
3220
14f6dad1
RH
3221static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
3222{
3223 TCGv_i64 op0, op1, diff, t1, tmax;
3224 TCGv_i32 t2, t3;
3225 TCGv_ptr ptr;
3226 unsigned vsz = vec_full_reg_size(s);
3227 unsigned desc = 0;
3228
3229 if (!dc_isar_feature(aa64_sve2, s)) {
3230 return false;
3231 }
3232 if (!sve_access_check(s)) {
3233 return true;
3234 }
3235
3236 op0 = read_cpu_reg(s, a->rn, 1);
3237 op1 = read_cpu_reg(s, a->rm, 1);
3238
3239 tmax = tcg_const_i64(vsz);
3240 diff = tcg_temp_new_i64();
3241
3242 if (a->rw) {
3243 /* WHILERW */
3244 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
3245 t1 = tcg_temp_new_i64();
3246 tcg_gen_sub_i64(diff, op0, op1);
3247 tcg_gen_sub_i64(t1, op1, op0);
3248 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
3249 tcg_temp_free_i64(t1);
3250 /* Round down to a multiple of ESIZE. */
3251 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3252 /* If op1 == op0, diff == 0, and the condition is always true. */
3253 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
3254 } else {
3255 /* WHILEWR */
3256 tcg_gen_sub_i64(diff, op1, op0);
3257 /* Round down to a multiple of ESIZE. */
3258 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3259 /* If op0 >= op1, diff <= 0, the condition is always true. */
3260 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
3261 }
3262
3263 /* Bound to the maximum. */
3264 tcg_gen_umin_i64(diff, diff, tmax);
3265 tcg_temp_free_i64(tmax);
3266
3267 /* Since we're bounded, pass as a 32-bit type. */
3268 t2 = tcg_temp_new_i32();
3269 tcg_gen_extrl_i64_i32(t2, diff);
3270 tcg_temp_free_i64(diff);
3271
3272 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3273 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3274 t3 = tcg_const_i32(desc);
3275
3276 ptr = tcg_temp_new_ptr();
3277 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3278
3279 gen_helper_sve_whilel(t2, ptr, t2, t3);
3280 do_pred_flags(t2);
3281
3282 tcg_temp_free_ptr(ptr);
3283 tcg_temp_free_i32(t2);
3284 tcg_temp_free_i32(t3);
3285 return true;
3286}
3287
ed491961
RH
3288/*
3289 *** SVE Integer Wide Immediate - Unpredicated Group
3290 */
3291
3a7be554 3292static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
ed491961
RH
3293{
3294 if (a->esz == 0) {
3295 return false;
3296 }
3297 if (sve_access_check(s)) {
3298 unsigned vsz = vec_full_reg_size(s);
3299 int dofs = vec_full_reg_offset(s, a->rd);
3300 uint64_t imm;
3301
3302 /* Decode the VFP immediate. */
3303 imm = vfp_expand_imm(a->esz, a->imm);
8711e71f 3304 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
ed491961
RH
3305 }
3306 return true;
3307}
3308
3a7be554 3309static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
ed491961 3310{
3a7be554 3311 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
ed491961
RH
3312 return false;
3313 }
3314 if (sve_access_check(s)) {
3315 unsigned vsz = vec_full_reg_size(s);
3316 int dofs = vec_full_reg_offset(s, a->rd);
3317
8711e71f 3318 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
ed491961
RH
3319 }
3320 return true;
3321}
3322
3a7be554 3323static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d 3324{
3a7be554 3325 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
6e6a157d
RH
3326 return false;
3327 }
3328 if (sve_access_check(s)) {
3329 unsigned vsz = vec_full_reg_size(s);
3330 tcg_gen_gvec_addi(a->esz, vec_full_reg_offset(s, a->rd),
3331 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
3332 }
3333 return true;
3334}
3335
3a7be554 3336static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d
RH
3337{
3338 a->imm = -a->imm;
3a7be554 3339 return trans_ADD_zzi(s, a);
6e6a157d
RH
3340}
3341
3a7be554 3342static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d 3343{
53229a77 3344 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
6e6a157d
RH
3345 static const GVecGen2s op[4] = {
3346 { .fni8 = tcg_gen_vec_sub8_i64,
3347 .fniv = tcg_gen_sub_vec,
3348 .fno = gen_helper_sve_subri_b,
53229a77 3349 .opt_opc = vecop_list,
6e6a157d
RH
3350 .vece = MO_8,
3351 .scalar_first = true },
3352 { .fni8 = tcg_gen_vec_sub16_i64,
3353 .fniv = tcg_gen_sub_vec,
3354 .fno = gen_helper_sve_subri_h,
53229a77 3355 .opt_opc = vecop_list,
6e6a157d
RH
3356 .vece = MO_16,
3357 .scalar_first = true },
3358 { .fni4 = tcg_gen_sub_i32,
3359 .fniv = tcg_gen_sub_vec,
3360 .fno = gen_helper_sve_subri_s,
53229a77 3361 .opt_opc = vecop_list,
6e6a157d
RH
3362 .vece = MO_32,
3363 .scalar_first = true },
3364 { .fni8 = tcg_gen_sub_i64,
3365 .fniv = tcg_gen_sub_vec,
3366 .fno = gen_helper_sve_subri_d,
53229a77 3367 .opt_opc = vecop_list,
6e6a157d
RH
3368 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3369 .vece = MO_64,
3370 .scalar_first = true }
3371 };
3372
3a7be554 3373 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
6e6a157d
RH
3374 return false;
3375 }
3376 if (sve_access_check(s)) {
3377 unsigned vsz = vec_full_reg_size(s);
3378 TCGv_i64 c = tcg_const_i64(a->imm);
3379 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
3380 vec_full_reg_offset(s, a->rn),
3381 vsz, vsz, c, &op[a->esz]);
3382 tcg_temp_free_i64(c);
3383 }
3384 return true;
3385}
3386
3a7be554 3387static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d
RH
3388{
3389 if (sve_access_check(s)) {
3390 unsigned vsz = vec_full_reg_size(s);
3391 tcg_gen_gvec_muli(a->esz, vec_full_reg_offset(s, a->rd),
3392 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
3393 }
3394 return true;
3395}
3396
3a7be554 3397static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
6e6a157d 3398{
3a7be554 3399 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
6e6a157d
RH
3400 return false;
3401 }
3402 if (sve_access_check(s)) {
3403 TCGv_i64 val = tcg_const_i64(a->imm);
3404 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, u, d);
3405 tcg_temp_free_i64(val);
3406 }
3407 return true;
3408}
3409
3a7be554 3410static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d 3411{
3a7be554 3412 return do_zzi_sat(s, a, false, false);
6e6a157d
RH
3413}
3414
3a7be554 3415static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d 3416{
3a7be554 3417 return do_zzi_sat(s, a, true, false);
6e6a157d
RH
3418}
3419
3a7be554 3420static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d 3421{
3a7be554 3422 return do_zzi_sat(s, a, false, true);
6e6a157d
RH
3423}
3424
3a7be554 3425static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a)
6e6a157d 3426{
3a7be554 3427 return do_zzi_sat(s, a, true, true);
6e6a157d
RH
3428}
3429
3430static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
3431{
3432 if (sve_access_check(s)) {
3433 unsigned vsz = vec_full_reg_size(s);
3434 TCGv_i64 c = tcg_const_i64(a->imm);
3435
3436 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
3437 vec_full_reg_offset(s, a->rn),
3438 c, vsz, vsz, 0, fn);
3439 tcg_temp_free_i64(c);
3440 }
3441 return true;
3442}
3443
3444#define DO_ZZI(NAME, name) \
3a7be554 3445static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \
6e6a157d
RH
3446{ \
3447 static gen_helper_gvec_2i * const fns[4] = { \
3448 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
3449 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
3450 }; \
3451 return do_zzi_ool(s, a, fns[a->esz]); \
3452}
3453
3454DO_ZZI(SMAX, smax)
3455DO_ZZI(UMAX, umax)
3456DO_ZZI(SMIN, smin)
3457DO_ZZI(UMIN, umin)
3458
3459#undef DO_ZZI
3460
3a7be554 3461static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a)
d730ecaa
RH
3462{
3463 static gen_helper_gvec_3 * const fns[2][2] = {
3464 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
3465 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
3466 };
3467
3468 if (sve_access_check(s)) {
e645d1a1 3469 gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, 0);
d730ecaa
RH
3470 }
3471 return true;
3472}
3473
3a7be554 3474static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a)
16fcfdc7
RH
3475{
3476 static gen_helper_gvec_3 * const fns[2][2] = {
3477 { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },
3478 { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }
3479 };
3480
3481 if (sve_access_check(s)) {
e645d1a1 3482 gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->index);
16fcfdc7
RH
3483 }
3484 return true;
3485}
3486
3487
ca40a6e6
RH
3488/*
3489 *** SVE Floating Point Multiply-Add Indexed Group
3490 */
3491
3a7be554 3492static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
ca40a6e6
RH
3493{
3494 static gen_helper_gvec_4_ptr * const fns[3] = {
3495 gen_helper_gvec_fmla_idx_h,
3496 gen_helper_gvec_fmla_idx_s,
3497 gen_helper_gvec_fmla_idx_d,
3498 };
3499
3500 if (sve_access_check(s)) {
3501 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3502 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
ca40a6e6
RH
3503 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3504 vec_full_reg_offset(s, a->rn),
3505 vec_full_reg_offset(s, a->rm),
3506 vec_full_reg_offset(s, a->ra),
3507 status, vsz, vsz, (a->index << 1) | a->sub,
3508 fns[a->esz - 1]);
3509 tcg_temp_free_ptr(status);
3510 }
3511 return true;
3512}
3513
3514/*
3515 *** SVE Floating Point Multiply Indexed Group
3516 */
3517
3a7be554 3518static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a)
ca40a6e6
RH
3519{
3520 static gen_helper_gvec_3_ptr * const fns[3] = {
3521 gen_helper_gvec_fmul_idx_h,
3522 gen_helper_gvec_fmul_idx_s,
3523 gen_helper_gvec_fmul_idx_d,
3524 };
3525
3526 if (sve_access_check(s)) {
3527 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3528 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
ca40a6e6
RH
3529 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3530 vec_full_reg_offset(s, a->rn),
3531 vec_full_reg_offset(s, a->rm),
3532 status, vsz, vsz, a->index, fns[a->esz - 1]);
3533 tcg_temp_free_ptr(status);
3534 }
3535 return true;
3536}
3537
23fbe79f
RH
3538/*
3539 *** SVE Floating Point Fast Reduction Group
3540 */
3541
3542typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr,
3543 TCGv_ptr, TCGv_i32);
3544
3545static void do_reduce(DisasContext *s, arg_rpr_esz *a,
3546 gen_helper_fp_reduce *fn)
3547{
3548 unsigned vsz = vec_full_reg_size(s);
3549 unsigned p2vsz = pow2ceil(vsz);
c648c9b7 3550 TCGv_i32 t_desc = tcg_const_i32(simd_desc(vsz, vsz, p2vsz));
23fbe79f
RH
3551 TCGv_ptr t_zn, t_pg, status;
3552 TCGv_i64 temp;
3553
3554 temp = tcg_temp_new_i64();
3555 t_zn = tcg_temp_new_ptr();
3556 t_pg = tcg_temp_new_ptr();
3557
3558 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
3559 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
cdfb22bb 3560 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
23fbe79f
RH
3561
3562 fn(temp, t_zn, t_pg, status, t_desc);
3563 tcg_temp_free_ptr(t_zn);
3564 tcg_temp_free_ptr(t_pg);
3565 tcg_temp_free_ptr(status);
3566 tcg_temp_free_i32(t_desc);
3567
3568 write_fp_dreg(s, a->rd, temp);
3569 tcg_temp_free_i64(temp);
3570}
3571
3572#define DO_VPZ(NAME, name) \
3a7be554 3573static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
23fbe79f
RH
3574{ \
3575 static gen_helper_fp_reduce * const fns[3] = { \
3576 gen_helper_sve_##name##_h, \
3577 gen_helper_sve_##name##_s, \
3578 gen_helper_sve_##name##_d, \
3579 }; \
3580 if (a->esz == 0) { \
3581 return false; \
3582 } \
3583 if (sve_access_check(s)) { \
3584 do_reduce(s, a, fns[a->esz - 1]); \
3585 } \
3586 return true; \
3587}
3588
3589DO_VPZ(FADDV, faddv)
3590DO_VPZ(FMINNMV, fminnmv)
3591DO_VPZ(FMAXNMV, fmaxnmv)
3592DO_VPZ(FMINV, fminv)
3593DO_VPZ(FMAXV, fmaxv)
3594
3887c038
RH
3595/*
3596 *** SVE Floating Point Unary Operations - Unpredicated Group
3597 */
3598
3599static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn)
3600{
3601 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3602 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3887c038
RH
3603
3604 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd),
3605 vec_full_reg_offset(s, a->rn),
3606 status, vsz, vsz, 0, fn);
3607 tcg_temp_free_ptr(status);
3608}
3609
3a7be554 3610static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a)
3887c038
RH
3611{
3612 static gen_helper_gvec_2_ptr * const fns[3] = {
3613 gen_helper_gvec_frecpe_h,
3614 gen_helper_gvec_frecpe_s,
3615 gen_helper_gvec_frecpe_d,
3616 };
3617 if (a->esz == 0) {
3618 return false;
3619 }
3620 if (sve_access_check(s)) {
3621 do_zz_fp(s, a, fns[a->esz - 1]);
3622 }
3623 return true;
3624}
3625
3a7be554 3626static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a)
3887c038
RH
3627{
3628 static gen_helper_gvec_2_ptr * const fns[3] = {
3629 gen_helper_gvec_frsqrte_h,
3630 gen_helper_gvec_frsqrte_s,
3631 gen_helper_gvec_frsqrte_d,
3632 };
3633 if (a->esz == 0) {
3634 return false;
3635 }
3636 if (sve_access_check(s)) {
3637 do_zz_fp(s, a, fns[a->esz - 1]);
3638 }
3639 return true;
3640}
3641
4d2e2a03
RH
3642/*
3643 *** SVE Floating Point Compare with Zero Group
3644 */
3645
3646static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
3647 gen_helper_gvec_3_ptr *fn)
3648{
3649 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3650 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4d2e2a03
RH
3651
3652 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
3653 vec_full_reg_offset(s, a->rn),
3654 pred_full_reg_offset(s, a->pg),
3655 status, vsz, vsz, 0, fn);
3656 tcg_temp_free_ptr(status);
3657}
3658
3659#define DO_PPZ(NAME, name) \
3a7be554 3660static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
4d2e2a03
RH
3661{ \
3662 static gen_helper_gvec_3_ptr * const fns[3] = { \
3663 gen_helper_sve_##name##_h, \
3664 gen_helper_sve_##name##_s, \
3665 gen_helper_sve_##name##_d, \
3666 }; \
3667 if (a->esz == 0) { \
3668 return false; \
3669 } \
3670 if (sve_access_check(s)) { \
3671 do_ppz_fp(s, a, fns[a->esz - 1]); \
3672 } \
3673 return true; \
3674}
3675
3676DO_PPZ(FCMGE_ppz0, fcmge0)
3677DO_PPZ(FCMGT_ppz0, fcmgt0)
3678DO_PPZ(FCMLE_ppz0, fcmle0)
3679DO_PPZ(FCMLT_ppz0, fcmlt0)
3680DO_PPZ(FCMEQ_ppz0, fcmeq0)
3681DO_PPZ(FCMNE_ppz0, fcmne0)
3682
3683#undef DO_PPZ
3684
67fcd9ad
RH
3685/*
3686 *** SVE floating-point trig multiply-add coefficient
3687 */
3688
3a7be554 3689static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a)
67fcd9ad
RH
3690{
3691 static gen_helper_gvec_3_ptr * const fns[3] = {
3692 gen_helper_sve_ftmad_h,
3693 gen_helper_sve_ftmad_s,
3694 gen_helper_sve_ftmad_d,
3695 };
3696
3697 if (a->esz == 0) {
3698 return false;
3699 }
3700 if (sve_access_check(s)) {
3701 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3702 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
67fcd9ad
RH
3703 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3704 vec_full_reg_offset(s, a->rn),
3705 vec_full_reg_offset(s, a->rm),
3706 status, vsz, vsz, a->imm, fns[a->esz - 1]);
3707 tcg_temp_free_ptr(status);
3708 }
3709 return true;
3710}
3711
7f9ddf64
RH
3712/*
3713 *** SVE Floating Point Accumulating Reduction Group
3714 */
3715
3a7be554 3716static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
7f9ddf64
RH
3717{
3718 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr,
3719 TCGv_ptr, TCGv_ptr, TCGv_i32);
3720 static fadda_fn * const fns[3] = {
3721 gen_helper_sve_fadda_h,
3722 gen_helper_sve_fadda_s,
3723 gen_helper_sve_fadda_d,
3724 };
3725 unsigned vsz = vec_full_reg_size(s);
3726 TCGv_ptr t_rm, t_pg, t_fpst;
3727 TCGv_i64 t_val;
3728 TCGv_i32 t_desc;
3729
3730 if (a->esz == 0) {
3731 return false;
3732 }
3733 if (!sve_access_check(s)) {
3734 return true;
3735 }
3736
3737 t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
3738 t_rm = tcg_temp_new_ptr();
3739 t_pg = tcg_temp_new_ptr();
3740 tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
3741 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
cdfb22bb 3742 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7f9ddf64
RH
3743 t_desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
3744
3745 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
3746
3747 tcg_temp_free_i32(t_desc);
3748 tcg_temp_free_ptr(t_fpst);
3749 tcg_temp_free_ptr(t_pg);
3750 tcg_temp_free_ptr(t_rm);
3751
3752 write_fp_dreg(s, a->rd, t_val);
3753 tcg_temp_free_i64(t_val);
3754 return true;
3755}
3756
29b80469
RH
3757/*
3758 *** SVE Floating Point Arithmetic - Unpredicated Group
3759 */
3760
3761static bool do_zzz_fp(DisasContext *s, arg_rrr_esz *a,
3762 gen_helper_gvec_3_ptr *fn)
3763{
3764 if (fn == NULL) {
3765 return false;
3766 }
3767 if (sve_access_check(s)) {
3768 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3769 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
29b80469
RH
3770 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3771 vec_full_reg_offset(s, a->rn),
3772 vec_full_reg_offset(s, a->rm),
3773 status, vsz, vsz, 0, fn);
3774 tcg_temp_free_ptr(status);
3775 }
3776 return true;
3777}
3778
3779
3780#define DO_FP3(NAME, name) \
3a7be554 3781static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
29b80469
RH
3782{ \
3783 static gen_helper_gvec_3_ptr * const fns[4] = { \
3784 NULL, gen_helper_gvec_##name##_h, \
3785 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
3786 }; \
3787 return do_zzz_fp(s, a, fns[a->esz]); \
3788}
3789
3790DO_FP3(FADD_zzz, fadd)
3791DO_FP3(FSUB_zzz, fsub)
3792DO_FP3(FMUL_zzz, fmul)
3793DO_FP3(FTSMUL, ftsmul)
3794DO_FP3(FRECPS, recps)
3795DO_FP3(FRSQRTS, rsqrts)
3796
3797#undef DO_FP3
3798
ec3b87c2
RH
3799/*
3800 *** SVE Floating Point Arithmetic - Predicated Group
3801 */
3802
3803static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
3804 gen_helper_gvec_4_ptr *fn)
3805{
3806 if (fn == NULL) {
3807 return false;
3808 }
3809 if (sve_access_check(s)) {
3810 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3811 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
ec3b87c2
RH
3812 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3813 vec_full_reg_offset(s, a->rn),
3814 vec_full_reg_offset(s, a->rm),
3815 pred_full_reg_offset(s, a->pg),
3816 status, vsz, vsz, 0, fn);
3817 tcg_temp_free_ptr(status);
3818 }
3819 return true;
3820}
3821
3822#define DO_FP3(NAME, name) \
3a7be554 3823static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
ec3b87c2
RH
3824{ \
3825 static gen_helper_gvec_4_ptr * const fns[4] = { \
3826 NULL, gen_helper_sve_##name##_h, \
3827 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3828 }; \
3829 return do_zpzz_fp(s, a, fns[a->esz]); \
3830}
3831
3832DO_FP3(FADD_zpzz, fadd)
3833DO_FP3(FSUB_zpzz, fsub)
3834DO_FP3(FMUL_zpzz, fmul)
3835DO_FP3(FMIN_zpzz, fmin)
3836DO_FP3(FMAX_zpzz, fmax)
3837DO_FP3(FMINNM_zpzz, fminnum)
3838DO_FP3(FMAXNM_zpzz, fmaxnum)
3839DO_FP3(FABD, fabd)
3840DO_FP3(FSCALE, fscalbn)
3841DO_FP3(FDIV, fdiv)
3842DO_FP3(FMULX, fmulx)
3843
3844#undef DO_FP3
8092c6a3 3845
cc48affe
RH
3846typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr,
3847 TCGv_i64, TCGv_ptr, TCGv_i32);
3848
3849static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
3850 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn)
3851{
3852 unsigned vsz = vec_full_reg_size(s);
3853 TCGv_ptr t_zd, t_zn, t_pg, status;
3854 TCGv_i32 desc;
3855
3856 t_zd = tcg_temp_new_ptr();
3857 t_zn = tcg_temp_new_ptr();
3858 t_pg = tcg_temp_new_ptr();
3859 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
3860 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
3861 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
3862
cdfb22bb 3863 status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
cc48affe
RH
3864 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
3865 fn(t_zd, t_zn, t_pg, scalar, status, desc);
3866
3867 tcg_temp_free_i32(desc);
3868 tcg_temp_free_ptr(status);
3869 tcg_temp_free_ptr(t_pg);
3870 tcg_temp_free_ptr(t_zn);
3871 tcg_temp_free_ptr(t_zd);
3872}
3873
3874static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
3875 gen_helper_sve_fp2scalar *fn)
3876{
3877 TCGv_i64 temp = tcg_const_i64(imm);
3878 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, temp, fn);
3879 tcg_temp_free_i64(temp);
3880}
3881
3882#define DO_FP_IMM(NAME, name, const0, const1) \
3a7be554 3883static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
cc48affe
RH
3884{ \
3885 static gen_helper_sve_fp2scalar * const fns[3] = { \
3886 gen_helper_sve_##name##_h, \
3887 gen_helper_sve_##name##_s, \
3888 gen_helper_sve_##name##_d \
3889 }; \
3890 static uint64_t const val[3][2] = { \
3891 { float16_##const0, float16_##const1 }, \
3892 { float32_##const0, float32_##const1 }, \
3893 { float64_##const0, float64_##const1 }, \
3894 }; \
3895 if (a->esz == 0) { \
3896 return false; \
3897 } \
3898 if (sve_access_check(s)) { \
3899 do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \
3900 } \
3901 return true; \
3902}
3903
cc48affe
RH
3904DO_FP_IMM(FADD, fadds, half, one)
3905DO_FP_IMM(FSUB, fsubs, half, one)
3906DO_FP_IMM(FMUL, fmuls, half, two)
3907DO_FP_IMM(FSUBR, fsubrs, half, one)
3908DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
3909DO_FP_IMM(FMINNM, fminnms, zero, one)
3910DO_FP_IMM(FMAX, fmaxs, zero, one)
3911DO_FP_IMM(FMIN, fmins, zero, one)
3912
3913#undef DO_FP_IMM
3914
abfdefd5
RH
3915static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
3916 gen_helper_gvec_4_ptr *fn)
3917{
3918 if (fn == NULL) {
3919 return false;
3920 }
3921 if (sve_access_check(s)) {
3922 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3923 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
abfdefd5
RH
3924 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
3925 vec_full_reg_offset(s, a->rn),
3926 vec_full_reg_offset(s, a->rm),
3927 pred_full_reg_offset(s, a->pg),
3928 status, vsz, vsz, 0, fn);
3929 tcg_temp_free_ptr(status);
3930 }
3931 return true;
3932}
3933
3934#define DO_FPCMP(NAME, name) \
3a7be554 3935static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
abfdefd5
RH
3936{ \
3937 static gen_helper_gvec_4_ptr * const fns[4] = { \
3938 NULL, gen_helper_sve_##name##_h, \
3939 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3940 }; \
3941 return do_fp_cmp(s, a, fns[a->esz]); \
3942}
3943
3944DO_FPCMP(FCMGE, fcmge)
3945DO_FPCMP(FCMGT, fcmgt)
3946DO_FPCMP(FCMEQ, fcmeq)
3947DO_FPCMP(FCMNE, fcmne)
3948DO_FPCMP(FCMUO, fcmuo)
3949DO_FPCMP(FACGE, facge)
3950DO_FPCMP(FACGT, facgt)
3951
3952#undef DO_FPCMP
3953
3a7be554 3954static bool trans_FCADD(DisasContext *s, arg_FCADD *a)
76a9d9cd
RH
3955{
3956 static gen_helper_gvec_4_ptr * const fns[3] = {
3957 gen_helper_sve_fcadd_h,
3958 gen_helper_sve_fcadd_s,
3959 gen_helper_sve_fcadd_d
3960 };
3961
3962 if (a->esz == 0) {
3963 return false;
3964 }
3965 if (sve_access_check(s)) {
3966 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3967 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
76a9d9cd
RH
3968 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3969 vec_full_reg_offset(s, a->rn),
3970 vec_full_reg_offset(s, a->rm),
3971 pred_full_reg_offset(s, a->pg),
3972 status, vsz, vsz, a->rot, fns[a->esz - 1]);
3973 tcg_temp_free_ptr(status);
3974 }
3975 return true;
3976}
3977
08975da9
RH
3978static bool do_fmla(DisasContext *s, arg_rprrr_esz *a,
3979 gen_helper_gvec_5_ptr *fn)
6ceabaad 3980{
08975da9 3981 if (a->esz == 0) {
6ceabaad
RH
3982 return false;
3983 }
08975da9
RH
3984 if (sve_access_check(s)) {
3985 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 3986 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
08975da9
RH
3987 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
3988 vec_full_reg_offset(s, a->rn),
3989 vec_full_reg_offset(s, a->rm),
3990 vec_full_reg_offset(s, a->ra),
3991 pred_full_reg_offset(s, a->pg),
3992 status, vsz, vsz, 0, fn);
3993 tcg_temp_free_ptr(status);
6ceabaad 3994 }
6ceabaad
RH
3995 return true;
3996}
3997
3998#define DO_FMLA(NAME, name) \
3a7be554 3999static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
6ceabaad 4000{ \
08975da9 4001 static gen_helper_gvec_5_ptr * const fns[4] = { \
6ceabaad
RH
4002 NULL, gen_helper_sve_##name##_h, \
4003 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4004 }; \
4005 return do_fmla(s, a, fns[a->esz]); \
4006}
4007
4008DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
4009DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
4010DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
4011DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
4012
4013#undef DO_FMLA
4014
3a7be554 4015static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
05f48bab 4016{
08975da9
RH
4017 static gen_helper_gvec_5_ptr * const fns[4] = {
4018 NULL,
05f48bab
RH
4019 gen_helper_sve_fcmla_zpzzz_h,
4020 gen_helper_sve_fcmla_zpzzz_s,
4021 gen_helper_sve_fcmla_zpzzz_d,
4022 };
4023
4024 if (a->esz == 0) {
4025 return false;
4026 }
4027 if (sve_access_check(s)) {
4028 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 4029 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
08975da9
RH
4030 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
4031 vec_full_reg_offset(s, a->rn),
4032 vec_full_reg_offset(s, a->rm),
4033 vec_full_reg_offset(s, a->ra),
4034 pred_full_reg_offset(s, a->pg),
4035 status, vsz, vsz, a->rot, fns[a->esz]);
4036 tcg_temp_free_ptr(status);
05f48bab
RH
4037 }
4038 return true;
4039}
4040
3a7be554 4041static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
18fc2405
RH
4042{
4043 static gen_helper_gvec_3_ptr * const fns[2] = {
4044 gen_helper_gvec_fcmlah_idx,
4045 gen_helper_gvec_fcmlas_idx,
4046 };
4047
4048 tcg_debug_assert(a->esz == 1 || a->esz == 2);
4049 tcg_debug_assert(a->rd == a->ra);
4050 if (sve_access_check(s)) {
4051 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 4052 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
18fc2405
RH
4053 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4054 vec_full_reg_offset(s, a->rn),
4055 vec_full_reg_offset(s, a->rm),
4056 status, vsz, vsz,
4057 a->index * 4 + a->rot,
4058 fns[a->esz - 1]);
4059 tcg_temp_free_ptr(status);
4060 }
4061 return true;
4062}
4063
8092c6a3
RH
4064/*
4065 *** SVE Floating Point Unary Operations Predicated Group
4066 */
4067
4068static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg,
4069 bool is_fp16, gen_helper_gvec_3_ptr *fn)
4070{
4071 if (sve_access_check(s)) {
4072 unsigned vsz = vec_full_reg_size(s);
cdfb22bb 4073 TCGv_ptr status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
8092c6a3
RH
4074 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
4075 vec_full_reg_offset(s, rn),
4076 pred_full_reg_offset(s, pg),
4077 status, vsz, vsz, 0, fn);
4078 tcg_temp_free_ptr(status);
4079 }
4080 return true;
4081}
4082
3a7be554 4083static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a)
46d33d1e 4084{
e4ab5124 4085 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh);
46d33d1e
RH
4086}
4087
3a7be554 4088static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a)
46d33d1e
RH
4089{
4090 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs);
4091}
4092
3a7be554 4093static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a)
46d33d1e 4094{
e4ab5124 4095 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh);
46d33d1e
RH
4096}
4097
3a7be554 4098static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a)
46d33d1e
RH
4099{
4100 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd);
4101}
4102
3a7be554 4103static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a)
46d33d1e
RH
4104{
4105 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds);
4106}
4107
3a7be554 4108static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a)
46d33d1e
RH
4109{
4110 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd);
4111}
4112
3a7be554 4113static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4114{
4115 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh);
4116}
4117
3a7be554 4118static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4119{
4120 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh);
4121}
4122
3a7be554 4123static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4124{
4125 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs);
4126}
4127
3a7be554 4128static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4129{
4130 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs);
4131}
4132
3a7be554 4133static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4134{
4135 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd);
4136}
4137
3a7be554 4138static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4139{
4140 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd);
4141}
4142
3a7be554 4143static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4144{
4145 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss);
4146}
4147
3a7be554 4148static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4149{
4150 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss);
4151}
4152
3a7be554 4153static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4154{
4155 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd);
4156}
4157
3a7be554 4158static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4159{
4160 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd);
4161}
4162
3a7be554 4163static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4164{
4165 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds);
4166}
4167
3a7be554 4168static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4169{
4170 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds);
4171}
4172
3a7be554 4173static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4174{
4175 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd);
4176}
4177
3a7be554 4178static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a)
df4de1af
RH
4179{
4180 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd);
4181}
4182
cda3c753
RH
4183static gen_helper_gvec_3_ptr * const frint_fns[3] = {
4184 gen_helper_sve_frint_h,
4185 gen_helper_sve_frint_s,
4186 gen_helper_sve_frint_d
4187};
4188
3a7be554 4189static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4190{
4191 if (a->esz == 0) {
4192 return false;
4193 }
4194 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16,
4195 frint_fns[a->esz - 1]);
4196}
4197
3a7be554 4198static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4199{
4200 static gen_helper_gvec_3_ptr * const fns[3] = {
4201 gen_helper_sve_frintx_h,
4202 gen_helper_sve_frintx_s,
4203 gen_helper_sve_frintx_d
4204 };
4205 if (a->esz == 0) {
4206 return false;
4207 }
4208 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4209}
4210
4211static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode)
4212{
4213 if (a->esz == 0) {
4214 return false;
4215 }
4216 if (sve_access_check(s)) {
4217 unsigned vsz = vec_full_reg_size(s);
4218 TCGv_i32 tmode = tcg_const_i32(mode);
cdfb22bb 4219 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
cda3c753
RH
4220
4221 gen_helper_set_rmode(tmode, tmode, status);
4222
4223 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4224 vec_full_reg_offset(s, a->rn),
4225 pred_full_reg_offset(s, a->pg),
4226 status, vsz, vsz, 0, frint_fns[a->esz - 1]);
4227
4228 gen_helper_set_rmode(tmode, tmode, status);
4229 tcg_temp_free_i32(tmode);
4230 tcg_temp_free_ptr(status);
4231 }
4232 return true;
4233}
4234
3a7be554 4235static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4236{
4237 return do_frint_mode(s, a, float_round_nearest_even);
4238}
4239
3a7be554 4240static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4241{
4242 return do_frint_mode(s, a, float_round_up);
4243}
4244
3a7be554 4245static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4246{
4247 return do_frint_mode(s, a, float_round_down);
4248}
4249
3a7be554 4250static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4251{
4252 return do_frint_mode(s, a, float_round_to_zero);
4253}
4254
3a7be554 4255static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a)
cda3c753
RH
4256{
4257 return do_frint_mode(s, a, float_round_ties_away);
4258}
4259
3a7be554 4260static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a)
ec5b375b
RH
4261{
4262 static gen_helper_gvec_3_ptr * const fns[3] = {
4263 gen_helper_sve_frecpx_h,
4264 gen_helper_sve_frecpx_s,
4265 gen_helper_sve_frecpx_d
4266 };
4267 if (a->esz == 0) {
4268 return false;
4269 }
4270 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4271}
4272
3a7be554 4273static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a)
ec5b375b
RH
4274{
4275 static gen_helper_gvec_3_ptr * const fns[3] = {
4276 gen_helper_sve_fsqrt_h,
4277 gen_helper_sve_fsqrt_s,
4278 gen_helper_sve_fsqrt_d
4279 };
4280 if (a->esz == 0) {
4281 return false;
4282 }
4283 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4284}
4285
3a7be554 4286static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4287{
4288 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh);
4289}
4290
3a7be554 4291static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4292{
4293 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh);
4294}
4295
3a7be554 4296static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4297{
4298 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh);
4299}
4300
3a7be554 4301static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4302{
4303 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss);
4304}
4305
3a7be554 4306static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4307{
4308 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds);
4309}
4310
3a7be554 4311static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4312{
4313 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd);
4314}
4315
3a7be554 4316static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4317{
4318 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd);
4319}
4320
3a7be554 4321static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4322{
4323 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh);
4324}
4325
3a7be554 4326static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4327{
4328 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh);
4329}
4330
3a7be554 4331static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4332{
4333 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh);
4334}
4335
3a7be554 4336static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4337{
4338 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss);
4339}
4340
3a7be554 4341static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4342{
4343 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds);
4344}
4345
3a7be554 4346static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4347{
4348 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd);
4349}
4350
3a7be554 4351static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a)
8092c6a3
RH
4352{
4353 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd);
4354}
4355
d1822297
RH
4356/*
4357 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
4358 */
4359
4360/* Subroutine loading a vector register at VOFS of LEN bytes.
4361 * The load should begin at the address Rn + IMM.
4362 */
4363
19f2acc9 4364static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
d1822297 4365{
19f2acc9
RH
4366 int len_align = QEMU_ALIGN_DOWN(len, 8);
4367 int len_remain = len % 8;
4368 int nparts = len / 8 + ctpop8(len_remain);
d1822297 4369 int midx = get_mem_index(s);
b2aa8879 4370 TCGv_i64 dirty_addr, clean_addr, t0, t1;
d1822297 4371
b2aa8879
RH
4372 dirty_addr = tcg_temp_new_i64();
4373 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
33e74c31 4374 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
b2aa8879 4375 tcg_temp_free_i64(dirty_addr);
d1822297 4376
b2aa8879
RH
4377 /*
4378 * Note that unpredicated load/store of vector/predicate registers
d1822297 4379 * are defined as a stream of bytes, which equates to little-endian
b2aa8879 4380 * operations on larger quantities.
d1822297
RH
4381 * Attempt to keep code expansion to a minimum by limiting the
4382 * amount of unrolling done.
4383 */
4384 if (nparts <= 4) {
4385 int i;
4386
b2aa8879 4387 t0 = tcg_temp_new_i64();
d1822297 4388 for (i = 0; i < len_align; i += 8) {
b2aa8879 4389 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
d1822297 4390 tcg_gen_st_i64(t0, cpu_env, vofs + i);
d8227b09 4391 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
d1822297 4392 }
b2aa8879 4393 tcg_temp_free_i64(t0);
d1822297
RH
4394 } else {
4395 TCGLabel *loop = gen_new_label();
4396 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4397
b2aa8879
RH
4398 /* Copy the clean address into a local temp, live across the loop. */
4399 t0 = clean_addr;
4b4dc975 4400 clean_addr = new_tmp_a64_local(s);
b2aa8879 4401 tcg_gen_mov_i64(clean_addr, t0);
d1822297 4402
b2aa8879 4403 gen_set_label(loop);
d1822297 4404
b2aa8879
RH
4405 t0 = tcg_temp_new_i64();
4406 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
4407 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
d1822297 4408
b2aa8879 4409 tp = tcg_temp_new_ptr();
d1822297
RH
4410 tcg_gen_add_ptr(tp, cpu_env, i);
4411 tcg_gen_addi_ptr(i, i, 8);
4412 tcg_gen_st_i64(t0, tp, vofs);
4413 tcg_temp_free_ptr(tp);
b2aa8879 4414 tcg_temp_free_i64(t0);
d1822297
RH
4415
4416 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4417 tcg_temp_free_ptr(i);
4418 }
4419
b2aa8879
RH
4420 /*
4421 * Predicate register loads can be any multiple of 2.
d1822297
RH
4422 * Note that we still store the entire 64-bit unit into cpu_env.
4423 */
4424 if (len_remain) {
b2aa8879 4425 t0 = tcg_temp_new_i64();
d1822297
RH
4426 switch (len_remain) {
4427 case 2:
4428 case 4:
4429 case 8:
b2aa8879
RH
4430 tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
4431 MO_LE | ctz32(len_remain));
d1822297
RH
4432 break;
4433
4434 case 6:
4435 t1 = tcg_temp_new_i64();
b2aa8879
RH
4436 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL);
4437 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4438 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW);
d1822297
RH
4439 tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
4440 tcg_temp_free_i64(t1);
4441 break;
4442
4443 default:
4444 g_assert_not_reached();
4445 }
4446 tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
b2aa8879 4447 tcg_temp_free_i64(t0);
d1822297 4448 }
d1822297
RH
4449}
4450
5047c204 4451/* Similarly for stores. */
19f2acc9 4452static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
5047c204 4453{
19f2acc9
RH
4454 int len_align = QEMU_ALIGN_DOWN(len, 8);
4455 int len_remain = len % 8;
4456 int nparts = len / 8 + ctpop8(len_remain);
5047c204 4457 int midx = get_mem_index(s);
bba87d0a 4458 TCGv_i64 dirty_addr, clean_addr, t0;
5047c204 4459
bba87d0a
RH
4460 dirty_addr = tcg_temp_new_i64();
4461 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
33e74c31 4462 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
bba87d0a 4463 tcg_temp_free_i64(dirty_addr);
5047c204
RH
4464
4465 /* Note that unpredicated load/store of vector/predicate registers
4466 * are defined as a stream of bytes, which equates to little-endian
4467 * operations on larger quantities. There is no nice way to force
4468 * a little-endian store for aarch64_be-linux-user out of line.
4469 *
4470 * Attempt to keep code expansion to a minimum by limiting the
4471 * amount of unrolling done.
4472 */
4473 if (nparts <= 4) {
4474 int i;
4475
bba87d0a 4476 t0 = tcg_temp_new_i64();
5047c204
RH
4477 for (i = 0; i < len_align; i += 8) {
4478 tcg_gen_ld_i64(t0, cpu_env, vofs + i);
bba87d0a 4479 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
d8227b09 4480 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
5047c204 4481 }
bba87d0a 4482 tcg_temp_free_i64(t0);
5047c204
RH
4483 } else {
4484 TCGLabel *loop = gen_new_label();
bba87d0a 4485 TCGv_ptr tp, i = tcg_const_local_ptr(0);
5047c204 4486
bba87d0a
RH
4487 /* Copy the clean address into a local temp, live across the loop. */
4488 t0 = clean_addr;
4b4dc975 4489 clean_addr = new_tmp_a64_local(s);
bba87d0a 4490 tcg_gen_mov_i64(clean_addr, t0);
5047c204 4491
bba87d0a 4492 gen_set_label(loop);
5047c204 4493
bba87d0a
RH
4494 t0 = tcg_temp_new_i64();
4495 tp = tcg_temp_new_ptr();
4496 tcg_gen_add_ptr(tp, cpu_env, i);
4497 tcg_gen_ld_i64(t0, tp, vofs);
5047c204 4498 tcg_gen_addi_ptr(i, i, 8);
bba87d0a
RH
4499 tcg_temp_free_ptr(tp);
4500
4501 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
4502 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4503 tcg_temp_free_i64(t0);
5047c204
RH
4504
4505 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4506 tcg_temp_free_ptr(i);
4507 }
4508
4509 /* Predicate register stores can be any multiple of 2. */
4510 if (len_remain) {
bba87d0a 4511 t0 = tcg_temp_new_i64();
5047c204 4512 tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
5047c204
RH
4513
4514 switch (len_remain) {
4515 case 2:
4516 case 4:
4517 case 8:
bba87d0a
RH
4518 tcg_gen_qemu_st_i64(t0, clean_addr, midx,
4519 MO_LE | ctz32(len_remain));
5047c204
RH
4520 break;
4521
4522 case 6:
bba87d0a
RH
4523 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL);
4524 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
5047c204 4525 tcg_gen_shri_i64(t0, t0, 32);
bba87d0a 4526 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW);
5047c204
RH
4527 break;
4528
4529 default:
4530 g_assert_not_reached();
4531 }
bba87d0a 4532 tcg_temp_free_i64(t0);
5047c204 4533 }
5047c204
RH
4534}
4535
3a7be554 4536static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
d1822297
RH
4537{
4538 if (sve_access_check(s)) {
4539 int size = vec_full_reg_size(s);
4540 int off = vec_full_reg_offset(s, a->rd);
4541 do_ldr(s, off, size, a->rn, a->imm * size);
4542 }
4543 return true;
4544}
4545
3a7be554 4546static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
d1822297
RH
4547{
4548 if (sve_access_check(s)) {
4549 int size = pred_full_reg_size(s);
4550 int off = pred_full_reg_offset(s, a->rd);
4551 do_ldr(s, off, size, a->rn, a->imm * size);
4552 }
4553 return true;
4554}
c4e7c493 4555
3a7be554 4556static bool trans_STR_zri(DisasContext *s, arg_rri *a)
5047c204
RH
4557{
4558 if (sve_access_check(s)) {
4559 int size = vec_full_reg_size(s);
4560 int off = vec_full_reg_offset(s, a->rd);
4561 do_str(s, off, size, a->rn, a->imm * size);
4562 }
4563 return true;
4564}
4565
3a7be554 4566static bool trans_STR_pri(DisasContext *s, arg_rri *a)
5047c204
RH
4567{
4568 if (sve_access_check(s)) {
4569 int size = pred_full_reg_size(s);
4570 int off = pred_full_reg_offset(s, a->rd);
4571 do_str(s, off, size, a->rn, a->imm * size);
4572 }
4573 return true;
4574}
4575
c4e7c493
RH
4576/*
4577 *** SVE Memory - Contiguous Load Group
4578 */
4579
4580/* The memory mode of the dtype. */
14776ab5 4581static const MemOp dtype_mop[16] = {
c4e7c493
RH
4582 MO_UB, MO_UB, MO_UB, MO_UB,
4583 MO_SL, MO_UW, MO_UW, MO_UW,
4584 MO_SW, MO_SW, MO_UL, MO_UL,
4585 MO_SB, MO_SB, MO_SB, MO_Q
4586};
4587
4588#define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
4589
4590/* The vector element size of dtype. */
4591static const uint8_t dtype_esz[16] = {
4592 0, 1, 2, 3,
4593 3, 1, 2, 3,
4594 3, 2, 2, 3,
4595 3, 2, 1, 3
4596};
4597
4598static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
206adacf
RH
4599 int dtype, uint32_t mte_n, bool is_write,
4600 gen_helper_gvec_mem *fn)
c4e7c493
RH
4601{
4602 unsigned vsz = vec_full_reg_size(s);
4603 TCGv_ptr t_pg;
500d0484 4604 TCGv_i32 t_desc;
206adacf 4605 int desc = 0;
c4e7c493 4606
206adacf
RH
4607 /*
4608 * For e.g. LD4, there are not enough arguments to pass all 4
c4e7c493
RH
4609 * registers as pointers, so encode the regno into the data field.
4610 * For consistency, do this even for LD1.
4611 */
9473d0ec 4612 if (s->mte_active[0]) {
206adacf
RH
4613 int msz = dtype_msz(dtype);
4614
4615 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
4616 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4617 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4618 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
28f32503 4619 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
206adacf 4620 desc <<= SVE_MTEDESC_SHIFT;
9473d0ec
RH
4621 } else {
4622 addr = clean_data_tbi(s, addr);
206adacf 4623 }
9473d0ec 4624
206adacf 4625 desc = simd_desc(vsz, vsz, zt | desc);
500d0484 4626 t_desc = tcg_const_i32(desc);
c4e7c493
RH
4627 t_pg = tcg_temp_new_ptr();
4628
4629 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
500d0484 4630 fn(cpu_env, t_pg, addr, t_desc);
c4e7c493
RH
4631
4632 tcg_temp_free_ptr(t_pg);
500d0484 4633 tcg_temp_free_i32(t_desc);
c4e7c493
RH
4634}
4635
4636static void do_ld_zpa(DisasContext *s, int zt, int pg,
4637 TCGv_i64 addr, int dtype, int nreg)
4638{
206adacf
RH
4639 static gen_helper_gvec_mem * const fns[2][2][16][4] = {
4640 { /* mte inactive, little-endian */
4641 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
7d0a57a2 4642 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
206adacf
RH
4643 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4644 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4645 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4646
4647 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
4648 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
4649 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
4650 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
4651 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
4652
4653 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
4654 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
4655 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
4656 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
4657 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
4658
4659 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4660 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4661 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4662 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
4663 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
4664
4665 /* mte inactive, big-endian */
4666 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4667 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4668 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4669 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4670 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4671
4672 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
4673 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
4674 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
4675 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
4676 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
4677
4678 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
4679 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
4680 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
4681 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
4682 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
4683
4684 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4685 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4686 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4687 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
4688 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
4689
4690 { /* mte active, little-endian */
4691 { { gen_helper_sve_ld1bb_r_mte,
4692 gen_helper_sve_ld2bb_r_mte,
4693 gen_helper_sve_ld3bb_r_mte,
4694 gen_helper_sve_ld4bb_r_mte },
4695 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4696 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4697 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4698
4699 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
4700 { gen_helper_sve_ld1hh_le_r_mte,
4701 gen_helper_sve_ld2hh_le_r_mte,
4702 gen_helper_sve_ld3hh_le_r_mte,
4703 gen_helper_sve_ld4hh_le_r_mte },
4704 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
4705 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
4706
4707 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
4708 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
4709 { gen_helper_sve_ld1ss_le_r_mte,
4710 gen_helper_sve_ld2ss_le_r_mte,
4711 gen_helper_sve_ld3ss_le_r_mte,
4712 gen_helper_sve_ld4ss_le_r_mte },
4713 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
4714
4715 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4716 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4717 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4718 { gen_helper_sve_ld1dd_le_r_mte,
4719 gen_helper_sve_ld2dd_le_r_mte,
4720 gen_helper_sve_ld3dd_le_r_mte,
4721 gen_helper_sve_ld4dd_le_r_mte } },
4722
4723 /* mte active, big-endian */
4724 { { gen_helper_sve_ld1bb_r_mte,
4725 gen_helper_sve_ld2bb_r_mte,
4726 gen_helper_sve_ld3bb_r_mte,
4727 gen_helper_sve_ld4bb_r_mte },
4728 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4729 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4730 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4731
4732 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
4733 { gen_helper_sve_ld1hh_be_r_mte,
4734 gen_helper_sve_ld2hh_be_r_mte,
4735 gen_helper_sve_ld3hh_be_r_mte,
4736 gen_helper_sve_ld4hh_be_r_mte },
4737 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
4738 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
4739
4740 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
4741 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
4742 { gen_helper_sve_ld1ss_be_r_mte,
4743 gen_helper_sve_ld2ss_be_r_mte,
4744 gen_helper_sve_ld3ss_be_r_mte,
4745 gen_helper_sve_ld4ss_be_r_mte },
4746 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
4747
4748 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4749 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4750 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4751 { gen_helper_sve_ld1dd_be_r_mte,
4752 gen_helper_sve_ld2dd_be_r_mte,
4753 gen_helper_sve_ld3dd_be_r_mte,
4754 gen_helper_sve_ld4dd_be_r_mte } } },
c4e7c493 4755 };
206adacf
RH
4756 gen_helper_gvec_mem *fn
4757 = fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
c4e7c493 4758
206adacf
RH
4759 /*
4760 * While there are holes in the table, they are not
c4e7c493
RH
4761 * accessible via the instruction encoding.
4762 */
4763 assert(fn != NULL);
206adacf 4764 do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
c4e7c493
RH
4765}
4766
3a7be554 4767static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
c4e7c493
RH
4768{
4769 if (a->rm == 31) {
4770 return false;
4771 }
4772 if (sve_access_check(s)) {
4773 TCGv_i64 addr = new_tmp_a64(s);
50ef1cbf 4774 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
c4e7c493
RH
4775 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4776 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4777 }
4778 return true;
4779}
4780
3a7be554 4781static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
c4e7c493
RH
4782{
4783 if (sve_access_check(s)) {
4784 int vsz = vec_full_reg_size(s);
4785 int elements = vsz >> dtype_esz[a->dtype];
4786 TCGv_i64 addr = new_tmp_a64(s);
4787
4788 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
4789 (a->imm * elements * (a->nreg + 1))
4790 << dtype_msz(a->dtype));
4791 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4792 }
4793 return true;
4794}
e2654d75 4795
3a7be554 4796static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
e2654d75 4797{
aa13f7c3
RH
4798 static gen_helper_gvec_mem * const fns[2][2][16] = {
4799 { /* mte inactive, little-endian */
4800 { gen_helper_sve_ldff1bb_r,
4801 gen_helper_sve_ldff1bhu_r,
4802 gen_helper_sve_ldff1bsu_r,
4803 gen_helper_sve_ldff1bdu_r,
4804
4805 gen_helper_sve_ldff1sds_le_r,
4806 gen_helper_sve_ldff1hh_le_r,
4807 gen_helper_sve_ldff1hsu_le_r,
4808 gen_helper_sve_ldff1hdu_le_r,
4809
4810 gen_helper_sve_ldff1hds_le_r,
4811 gen_helper_sve_ldff1hss_le_r,
4812 gen_helper_sve_ldff1ss_le_r,
4813 gen_helper_sve_ldff1sdu_le_r,
4814
4815 gen_helper_sve_ldff1bds_r,
4816 gen_helper_sve_ldff1bss_r,
4817 gen_helper_sve_ldff1bhs_r,
4818 gen_helper_sve_ldff1dd_le_r },
4819
4820 /* mte inactive, big-endian */
4821 { gen_helper_sve_ldff1bb_r,
4822 gen_helper_sve_ldff1bhu_r,
4823 gen_helper_sve_ldff1bsu_r,
4824 gen_helper_sve_ldff1bdu_r,
4825
4826 gen_helper_sve_ldff1sds_be_r,
4827 gen_helper_sve_ldff1hh_be_r,
4828 gen_helper_sve_ldff1hsu_be_r,
4829 gen_helper_sve_ldff1hdu_be_r,
4830
4831 gen_helper_sve_ldff1hds_be_r,
4832 gen_helper_sve_ldff1hss_be_r,
4833 gen_helper_sve_ldff1ss_be_r,
4834 gen_helper_sve_ldff1sdu_be_r,
4835
4836 gen_helper_sve_ldff1bds_r,
4837 gen_helper_sve_ldff1bss_r,
4838 gen_helper_sve_ldff1bhs_r,
4839 gen_helper_sve_ldff1dd_be_r } },
4840
4841 { /* mte active, little-endian */
4842 { gen_helper_sve_ldff1bb_r_mte,
4843 gen_helper_sve_ldff1bhu_r_mte,
4844 gen_helper_sve_ldff1bsu_r_mte,
4845 gen_helper_sve_ldff1bdu_r_mte,
4846
4847 gen_helper_sve_ldff1sds_le_r_mte,
4848 gen_helper_sve_ldff1hh_le_r_mte,
4849 gen_helper_sve_ldff1hsu_le_r_mte,
4850 gen_helper_sve_ldff1hdu_le_r_mte,
4851
4852 gen_helper_sve_ldff1hds_le_r_mte,
4853 gen_helper_sve_ldff1hss_le_r_mte,
4854 gen_helper_sve_ldff1ss_le_r_mte,
4855 gen_helper_sve_ldff1sdu_le_r_mte,
4856
4857 gen_helper_sve_ldff1bds_r_mte,
4858 gen_helper_sve_ldff1bss_r_mte,
4859 gen_helper_sve_ldff1bhs_r_mte,
4860 gen_helper_sve_ldff1dd_le_r_mte },
4861
4862 /* mte active, big-endian */
4863 { gen_helper_sve_ldff1bb_r_mte,
4864 gen_helper_sve_ldff1bhu_r_mte,
4865 gen_helper_sve_ldff1bsu_r_mte,
4866 gen_helper_sve_ldff1bdu_r_mte,
4867
4868 gen_helper_sve_ldff1sds_be_r_mte,
4869 gen_helper_sve_ldff1hh_be_r_mte,
4870 gen_helper_sve_ldff1hsu_be_r_mte,
4871 gen_helper_sve_ldff1hdu_be_r_mte,
4872
4873 gen_helper_sve_ldff1hds_be_r_mte,
4874 gen_helper_sve_ldff1hss_be_r_mte,
4875 gen_helper_sve_ldff1ss_be_r_mte,
4876 gen_helper_sve_ldff1sdu_be_r_mte,
4877
4878 gen_helper_sve_ldff1bds_r_mte,
4879 gen_helper_sve_ldff1bss_r_mte,
4880 gen_helper_sve_ldff1bhs_r_mte,
4881 gen_helper_sve_ldff1dd_be_r_mte } },
e2654d75
RH
4882 };
4883
4884 if (sve_access_check(s)) {
4885 TCGv_i64 addr = new_tmp_a64(s);
4886 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4887 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
aa13f7c3
RH
4888 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4889 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
e2654d75
RH
4890 }
4891 return true;
4892}
4893
3a7be554 4894static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
e2654d75 4895{
aa13f7c3
RH
4896 static gen_helper_gvec_mem * const fns[2][2][16] = {
4897 { /* mte inactive, little-endian */
4898 { gen_helper_sve_ldnf1bb_r,
4899 gen_helper_sve_ldnf1bhu_r,
4900 gen_helper_sve_ldnf1bsu_r,
4901 gen_helper_sve_ldnf1bdu_r,
4902
4903 gen_helper_sve_ldnf1sds_le_r,
4904 gen_helper_sve_ldnf1hh_le_r,
4905 gen_helper_sve_ldnf1hsu_le_r,
4906 gen_helper_sve_ldnf1hdu_le_r,
4907
4908 gen_helper_sve_ldnf1hds_le_r,
4909 gen_helper_sve_ldnf1hss_le_r,
4910 gen_helper_sve_ldnf1ss_le_r,
4911 gen_helper_sve_ldnf1sdu_le_r,
4912
4913 gen_helper_sve_ldnf1bds_r,
4914 gen_helper_sve_ldnf1bss_r,
4915 gen_helper_sve_ldnf1bhs_r,
4916 gen_helper_sve_ldnf1dd_le_r },
4917
4918 /* mte inactive, big-endian */
4919 { gen_helper_sve_ldnf1bb_r,
4920 gen_helper_sve_ldnf1bhu_r,
4921 gen_helper_sve_ldnf1bsu_r,
4922 gen_helper_sve_ldnf1bdu_r,
4923
4924 gen_helper_sve_ldnf1sds_be_r,
4925 gen_helper_sve_ldnf1hh_be_r,
4926 gen_helper_sve_ldnf1hsu_be_r,
4927 gen_helper_sve_ldnf1hdu_be_r,
4928
4929 gen_helper_sve_ldnf1hds_be_r,
4930 gen_helper_sve_ldnf1hss_be_r,
4931 gen_helper_sve_ldnf1ss_be_r,
4932 gen_helper_sve_ldnf1sdu_be_r,
4933
4934 gen_helper_sve_ldnf1bds_r,
4935 gen_helper_sve_ldnf1bss_r,
4936 gen_helper_sve_ldnf1bhs_r,
4937 gen_helper_sve_ldnf1dd_be_r } },
4938
4939 { /* mte inactive, little-endian */
4940 { gen_helper_sve_ldnf1bb_r_mte,
4941 gen_helper_sve_ldnf1bhu_r_mte,
4942 gen_helper_sve_ldnf1bsu_r_mte,
4943 gen_helper_sve_ldnf1bdu_r_mte,
4944
4945 gen_helper_sve_ldnf1sds_le_r_mte,
4946 gen_helper_sve_ldnf1hh_le_r_mte,
4947 gen_helper_sve_ldnf1hsu_le_r_mte,
4948 gen_helper_sve_ldnf1hdu_le_r_mte,
4949
4950 gen_helper_sve_ldnf1hds_le_r_mte,
4951 gen_helper_sve_ldnf1hss_le_r_mte,
4952 gen_helper_sve_ldnf1ss_le_r_mte,
4953 gen_helper_sve_ldnf1sdu_le_r_mte,
4954
4955 gen_helper_sve_ldnf1bds_r_mte,
4956 gen_helper_sve_ldnf1bss_r_mte,
4957 gen_helper_sve_ldnf1bhs_r_mte,
4958 gen_helper_sve_ldnf1dd_le_r_mte },
4959
4960 /* mte inactive, big-endian */
4961 { gen_helper_sve_ldnf1bb_r_mte,
4962 gen_helper_sve_ldnf1bhu_r_mte,
4963 gen_helper_sve_ldnf1bsu_r_mte,
4964 gen_helper_sve_ldnf1bdu_r_mte,
4965
4966 gen_helper_sve_ldnf1sds_be_r_mte,
4967 gen_helper_sve_ldnf1hh_be_r_mte,
4968 gen_helper_sve_ldnf1hsu_be_r_mte,
4969 gen_helper_sve_ldnf1hdu_be_r_mte,
4970
4971 gen_helper_sve_ldnf1hds_be_r_mte,
4972 gen_helper_sve_ldnf1hss_be_r_mte,
4973 gen_helper_sve_ldnf1ss_be_r_mte,
4974 gen_helper_sve_ldnf1sdu_be_r_mte,
4975
4976 gen_helper_sve_ldnf1bds_r_mte,
4977 gen_helper_sve_ldnf1bss_r_mte,
4978 gen_helper_sve_ldnf1bhs_r_mte,
4979 gen_helper_sve_ldnf1dd_be_r_mte } },
e2654d75
RH
4980 };
4981
4982 if (sve_access_check(s)) {
4983 int vsz = vec_full_reg_size(s);
4984 int elements = vsz >> dtype_esz[a->dtype];
4985 int off = (a->imm * elements) << dtype_msz(a->dtype);
4986 TCGv_i64 addr = new_tmp_a64(s);
4987
4988 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
aa13f7c3
RH
4989 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4990 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
e2654d75
RH
4991 }
4992 return true;
4993}
1a039c7e 4994
05abe304
RH
4995static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
4996{
7d0a57a2
RH
4997 static gen_helper_gvec_mem * const fns[2][4] = {
4998 { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_le_r,
4999 gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld1dd_le_r },
5000 { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_be_r,
5001 gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld1dd_be_r },
05abe304
RH
5002 };
5003 unsigned vsz = vec_full_reg_size(s);
5004 TCGv_ptr t_pg;
500d0484
RH
5005 TCGv_i32 t_desc;
5006 int desc, poff;
05abe304
RH
5007
5008 /* Load the first quadword using the normal predicated load helpers. */
ba080b86 5009 desc = simd_desc(16, 16, zt);
500d0484 5010 t_desc = tcg_const_i32(desc);
2a99ab2b
RH
5011
5012 poff = pred_full_reg_offset(s, pg);
5013 if (vsz > 16) {
5014 /*
5015 * Zero-extend the first 16 bits of the predicate into a temporary.
5016 * This avoids triggering an assert making sure we don't have bits
5017 * set within a predicate beyond VQ, but we have lowered VQ to 1
5018 * for this load operation.
5019 */
5020 TCGv_i64 tmp = tcg_temp_new_i64();
5021#ifdef HOST_WORDS_BIGENDIAN
5022 poff += 6;
5023#endif
5024 tcg_gen_ld16u_i64(tmp, cpu_env, poff);
5025
5026 poff = offsetof(CPUARMState, vfp.preg_tmp);
5027 tcg_gen_st_i64(tmp, cpu_env, poff);
5028 tcg_temp_free_i64(tmp);
5029 }
5030
05abe304 5031 t_pg = tcg_temp_new_ptr();
2a99ab2b 5032 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
05abe304 5033
500d0484 5034 fns[s->be_data == MO_BE][msz](cpu_env, t_pg, addr, t_desc);
05abe304
RH
5035
5036 tcg_temp_free_ptr(t_pg);
500d0484 5037 tcg_temp_free_i32(t_desc);
05abe304
RH
5038
5039 /* Replicate that first quadword. */
5040 if (vsz > 16) {
5041 unsigned dofs = vec_full_reg_offset(s, zt);
5042 tcg_gen_gvec_dup_mem(4, dofs + 16, dofs, vsz - 16, vsz - 16);
5043 }
5044}
5045
3a7be554 5046static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
05abe304
RH
5047{
5048 if (a->rm == 31) {
5049 return false;
5050 }
5051 if (sve_access_check(s)) {
5052 int msz = dtype_msz(a->dtype);
5053 TCGv_i64 addr = new_tmp_a64(s);
5054 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
5055 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5056 do_ldrq(s, a->rd, a->pg, addr, msz);
5057 }
5058 return true;
5059}
5060
3a7be554 5061static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
05abe304
RH
5062{
5063 if (sve_access_check(s)) {
5064 TCGv_i64 addr = new_tmp_a64(s);
5065 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
5066 do_ldrq(s, a->rd, a->pg, addr, dtype_msz(a->dtype));
5067 }
5068 return true;
5069}
5070
68459864 5071/* Load and broadcast element. */
3a7be554 5072static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
68459864 5073{
68459864
RH
5074 unsigned vsz = vec_full_reg_size(s);
5075 unsigned psz = pred_full_reg_size(s);
5076 unsigned esz = dtype_esz[a->dtype];
d0e372b0 5077 unsigned msz = dtype_msz(a->dtype);
c0ed9166 5078 TCGLabel *over;
4ac430e1 5079 TCGv_i64 temp, clean_addr;
68459864 5080
c0ed9166
RH
5081 if (!sve_access_check(s)) {
5082 return true;
5083 }
5084
5085 over = gen_new_label();
5086
68459864
RH
5087 /* If the guarding predicate has no bits set, no load occurs. */
5088 if (psz <= 8) {
5089 /* Reduce the pred_esz_masks value simply to reduce the
5090 * size of the code generated here.
5091 */
5092 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
5093 temp = tcg_temp_new_i64();
5094 tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
5095 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
5096 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
5097 tcg_temp_free_i64(temp);
5098 } else {
5099 TCGv_i32 t32 = tcg_temp_new_i32();
5100 find_last_active(s, t32, esz, a->pg);
5101 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over);
5102 tcg_temp_free_i32(t32);
5103 }
5104
5105 /* Load the data. */
5106 temp = tcg_temp_new_i64();
d0e372b0 5107 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
4ac430e1
RH
5108 clean_addr = gen_mte_check1(s, temp, false, true, msz);
5109
5110 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
0ca0f872 5111 finalize_memop(s, dtype_mop[a->dtype]));
68459864
RH
5112
5113 /* Broadcast to *all* elements. */
5114 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
5115 vsz, vsz, temp);
5116 tcg_temp_free_i64(temp);
5117
5118 /* Zero the inactive elements. */
5119 gen_set_label(over);
60245996 5120 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
68459864
RH
5121}
5122
1a039c7e
RH
5123static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
5124 int msz, int esz, int nreg)
5125{
71b9f394
RH
5126 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
5127 { { { gen_helper_sve_st1bb_r,
5128 gen_helper_sve_st1bh_r,
5129 gen_helper_sve_st1bs_r,
5130 gen_helper_sve_st1bd_r },
5131 { NULL,
5132 gen_helper_sve_st1hh_le_r,
5133 gen_helper_sve_st1hs_le_r,
5134 gen_helper_sve_st1hd_le_r },
5135 { NULL, NULL,
5136 gen_helper_sve_st1ss_le_r,
5137 gen_helper_sve_st1sd_le_r },
5138 { NULL, NULL, NULL,
5139 gen_helper_sve_st1dd_le_r } },
5140 { { gen_helper_sve_st1bb_r,
5141 gen_helper_sve_st1bh_r,
5142 gen_helper_sve_st1bs_r,
5143 gen_helper_sve_st1bd_r },
5144 { NULL,
5145 gen_helper_sve_st1hh_be_r,
5146 gen_helper_sve_st1hs_be_r,
5147 gen_helper_sve_st1hd_be_r },
5148 { NULL, NULL,
5149 gen_helper_sve_st1ss_be_r,
5150 gen_helper_sve_st1sd_be_r },
5151 { NULL, NULL, NULL,
5152 gen_helper_sve_st1dd_be_r } } },
5153
5154 { { { gen_helper_sve_st1bb_r_mte,
5155 gen_helper_sve_st1bh_r_mte,
5156 gen_helper_sve_st1bs_r_mte,
5157 gen_helper_sve_st1bd_r_mte },
5158 { NULL,
5159 gen_helper_sve_st1hh_le_r_mte,
5160 gen_helper_sve_st1hs_le_r_mte,
5161 gen_helper_sve_st1hd_le_r_mte },
5162 { NULL, NULL,
5163 gen_helper_sve_st1ss_le_r_mte,
5164 gen_helper_sve_st1sd_le_r_mte },
5165 { NULL, NULL, NULL,
5166 gen_helper_sve_st1dd_le_r_mte } },
5167 { { gen_helper_sve_st1bb_r_mte,
5168 gen_helper_sve_st1bh_r_mte,
5169 gen_helper_sve_st1bs_r_mte,
5170 gen_helper_sve_st1bd_r_mte },
5171 { NULL,
5172 gen_helper_sve_st1hh_be_r_mte,
5173 gen_helper_sve_st1hs_be_r_mte,
5174 gen_helper_sve_st1hd_be_r_mte },
5175 { NULL, NULL,
5176 gen_helper_sve_st1ss_be_r_mte,
5177 gen_helper_sve_st1sd_be_r_mte },
5178 { NULL, NULL, NULL,
5179 gen_helper_sve_st1dd_be_r_mte } } },
1a039c7e 5180 };
71b9f394
RH
5181 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
5182 { { { gen_helper_sve_st2bb_r,
5183 gen_helper_sve_st2hh_le_r,
5184 gen_helper_sve_st2ss_le_r,
5185 gen_helper_sve_st2dd_le_r },
5186 { gen_helper_sve_st3bb_r,
5187 gen_helper_sve_st3hh_le_r,
5188 gen_helper_sve_st3ss_le_r,
5189 gen_helper_sve_st3dd_le_r },
5190 { gen_helper_sve_st4bb_r,
5191 gen_helper_sve_st4hh_le_r,
5192 gen_helper_sve_st4ss_le_r,
5193 gen_helper_sve_st4dd_le_r } },
5194 { { gen_helper_sve_st2bb_r,
5195 gen_helper_sve_st2hh_be_r,
5196 gen_helper_sve_st2ss_be_r,
5197 gen_helper_sve_st2dd_be_r },
5198 { gen_helper_sve_st3bb_r,
5199 gen_helper_sve_st3hh_be_r,
5200 gen_helper_sve_st3ss_be_r,
5201 gen_helper_sve_st3dd_be_r },
5202 { gen_helper_sve_st4bb_r,
5203 gen_helper_sve_st4hh_be_r,
5204 gen_helper_sve_st4ss_be_r,
5205 gen_helper_sve_st4dd_be_r } } },
5206 { { { gen_helper_sve_st2bb_r_mte,
5207 gen_helper_sve_st2hh_le_r_mte,
5208 gen_helper_sve_st2ss_le_r_mte,
5209 gen_helper_sve_st2dd_le_r_mte },
5210 { gen_helper_sve_st3bb_r_mte,
5211 gen_helper_sve_st3hh_le_r_mte,
5212 gen_helper_sve_st3ss_le_r_mte,
5213 gen_helper_sve_st3dd_le_r_mte },
5214 { gen_helper_sve_st4bb_r_mte,
5215 gen_helper_sve_st4hh_le_r_mte,
5216 gen_helper_sve_st4ss_le_r_mte,
5217 gen_helper_sve_st4dd_le_r_mte } },
5218 { { gen_helper_sve_st2bb_r_mte,
5219 gen_helper_sve_st2hh_be_r_mte,
5220 gen_helper_sve_st2ss_be_r_mte,
5221 gen_helper_sve_st2dd_be_r_mte },
5222 { gen_helper_sve_st3bb_r_mte,
5223 gen_helper_sve_st3hh_be_r_mte,
5224 gen_helper_sve_st3ss_be_r_mte,
5225 gen_helper_sve_st3dd_be_r_mte },
5226 { gen_helper_sve_st4bb_r_mte,
5227 gen_helper_sve_st4hh_be_r_mte,
5228 gen_helper_sve_st4ss_be_r_mte,
5229 gen_helper_sve_st4dd_be_r_mte } } },
1a039c7e
RH
5230 };
5231 gen_helper_gvec_mem *fn;
28d57f2d 5232 int be = s->be_data == MO_BE;
1a039c7e
RH
5233
5234 if (nreg == 0) {
5235 /* ST1 */
71b9f394
RH
5236 fn = fn_single[s->mte_active[0]][be][msz][esz];
5237 nreg = 1;
1a039c7e
RH
5238 } else {
5239 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
5240 assert(msz == esz);
71b9f394 5241 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
1a039c7e
RH
5242 }
5243 assert(fn != NULL);
71b9f394 5244 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
1a039c7e
RH
5245}
5246
3a7be554 5247static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
1a039c7e
RH
5248{
5249 if (a->rm == 31 || a->msz > a->esz) {
5250 return false;
5251 }
5252 if (sve_access_check(s)) {
5253 TCGv_i64 addr = new_tmp_a64(s);
50ef1cbf 5254 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
1a039c7e
RH
5255 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5256 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5257 }
5258 return true;
5259}
5260
3a7be554 5261static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
1a039c7e
RH
5262{
5263 if (a->msz > a->esz) {
5264 return false;
5265 }
5266 if (sve_access_check(s)) {
5267 int vsz = vec_full_reg_size(s);
5268 int elements = vsz >> a->esz;
5269 TCGv_i64 addr = new_tmp_a64(s);
5270
5271 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5272 (a->imm * elements * (a->nreg + 1)) << a->msz);
5273 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5274 }
5275 return true;
5276}
f6dbf62a
RH
5277
5278/*
5279 *** SVE gather loads / scatter stores
5280 */
5281
500d0484 5282static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
d28d12f0 5283 int scale, TCGv_i64 scalar, int msz, bool is_write,
500d0484 5284 gen_helper_gvec_mem_scatter *fn)
f6dbf62a
RH
5285{
5286 unsigned vsz = vec_full_reg_size(s);
f6dbf62a
RH
5287 TCGv_ptr t_zm = tcg_temp_new_ptr();
5288 TCGv_ptr t_pg = tcg_temp_new_ptr();
5289 TCGv_ptr t_zt = tcg_temp_new_ptr();
500d0484 5290 TCGv_i32 t_desc;
d28d12f0 5291 int desc = 0;
500d0484 5292
d28d12f0
RH
5293 if (s->mte_active[0]) {
5294 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
5295 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
5296 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
5297 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
28f32503 5298 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
d28d12f0
RH
5299 desc <<= SVE_MTEDESC_SHIFT;
5300 }
cdecb3fc 5301 desc = simd_desc(vsz, vsz, desc | scale);
500d0484 5302 t_desc = tcg_const_i32(desc);
f6dbf62a
RH
5303
5304 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
5305 tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
5306 tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
500d0484 5307 fn(cpu_env, t_zt, t_pg, t_zm, scalar, t_desc);
f6dbf62a
RH
5308
5309 tcg_temp_free_ptr(t_zt);
5310 tcg_temp_free_ptr(t_zm);
5311 tcg_temp_free_ptr(t_pg);
500d0484 5312 tcg_temp_free_i32(t_desc);
f6dbf62a
RH
5313}
5314
d28d12f0
RH
5315/* Indexed by [mte][be][ff][xs][u][msz]. */
5316static gen_helper_gvec_mem_scatter * const
5317gather_load_fn32[2][2][2][2][2][3] = {
5318 { /* MTE Inactive */
5319 { /* Little-endian */
5320 { { { gen_helper_sve_ldbss_zsu,
5321 gen_helper_sve_ldhss_le_zsu,
5322 NULL, },
5323 { gen_helper_sve_ldbsu_zsu,
5324 gen_helper_sve_ldhsu_le_zsu,
5325 gen_helper_sve_ldss_le_zsu, } },
5326 { { gen_helper_sve_ldbss_zss,
5327 gen_helper_sve_ldhss_le_zss,
5328 NULL, },
5329 { gen_helper_sve_ldbsu_zss,
5330 gen_helper_sve_ldhsu_le_zss,
5331 gen_helper_sve_ldss_le_zss, } } },
5332
5333 /* First-fault */
5334 { { { gen_helper_sve_ldffbss_zsu,
5335 gen_helper_sve_ldffhss_le_zsu,
5336 NULL, },
5337 { gen_helper_sve_ldffbsu_zsu,
5338 gen_helper_sve_ldffhsu_le_zsu,
5339 gen_helper_sve_ldffss_le_zsu, } },
5340 { { gen_helper_sve_ldffbss_zss,
5341 gen_helper_sve_ldffhss_le_zss,
5342 NULL, },
5343 { gen_helper_sve_ldffbsu_zss,
5344 gen_helper_sve_ldffhsu_le_zss,
5345 gen_helper_sve_ldffss_le_zss, } } } },
5346
5347 { /* Big-endian */
5348 { { { gen_helper_sve_ldbss_zsu,
5349 gen_helper_sve_ldhss_be_zsu,
5350 NULL, },
5351 { gen_helper_sve_ldbsu_zsu,
5352 gen_helper_sve_ldhsu_be_zsu,
5353 gen_helper_sve_ldss_be_zsu, } },
5354 { { gen_helper_sve_ldbss_zss,
5355 gen_helper_sve_ldhss_be_zss,
5356 NULL, },
5357 { gen_helper_sve_ldbsu_zss,
5358 gen_helper_sve_ldhsu_be_zss,
5359 gen_helper_sve_ldss_be_zss, } } },
5360
5361 /* First-fault */
5362 { { { gen_helper_sve_ldffbss_zsu,
5363 gen_helper_sve_ldffhss_be_zsu,
5364 NULL, },
5365 { gen_helper_sve_ldffbsu_zsu,
5366 gen_helper_sve_ldffhsu_be_zsu,
5367 gen_helper_sve_ldffss_be_zsu, } },
5368 { { gen_helper_sve_ldffbss_zss,
5369 gen_helper_sve_ldffhss_be_zss,
5370 NULL, },
5371 { gen_helper_sve_ldffbsu_zss,
5372 gen_helper_sve_ldffhsu_be_zss,
5373 gen_helper_sve_ldffss_be_zss, } } } } },
5374 { /* MTE Active */
5375 { /* Little-endian */
5376 { { { gen_helper_sve_ldbss_zsu_mte,
5377 gen_helper_sve_ldhss_le_zsu_mte,
5378 NULL, },
5379 { gen_helper_sve_ldbsu_zsu_mte,
5380 gen_helper_sve_ldhsu_le_zsu_mte,
5381 gen_helper_sve_ldss_le_zsu_mte, } },
5382 { { gen_helper_sve_ldbss_zss_mte,
5383 gen_helper_sve_ldhss_le_zss_mte,
5384 NULL, },
5385 { gen_helper_sve_ldbsu_zss_mte,
5386 gen_helper_sve_ldhsu_le_zss_mte,
5387 gen_helper_sve_ldss_le_zss_mte, } } },
5388
5389 /* First-fault */
5390 { { { gen_helper_sve_ldffbss_zsu_mte,
5391 gen_helper_sve_ldffhss_le_zsu_mte,
5392 NULL, },
5393 { gen_helper_sve_ldffbsu_zsu_mte,
5394 gen_helper_sve_ldffhsu_le_zsu_mte,
5395 gen_helper_sve_ldffss_le_zsu_mte, } },
5396 { { gen_helper_sve_ldffbss_zss_mte,
5397 gen_helper_sve_ldffhss_le_zss_mte,
5398 NULL, },
5399 { gen_helper_sve_ldffbsu_zss_mte,
5400 gen_helper_sve_ldffhsu_le_zss_mte,
5401 gen_helper_sve_ldffss_le_zss_mte, } } } },
5402
5403 { /* Big-endian */
5404 { { { gen_helper_sve_ldbss_zsu_mte,
5405 gen_helper_sve_ldhss_be_zsu_mte,
5406 NULL, },
5407 { gen_helper_sve_ldbsu_zsu_mte,
5408 gen_helper_sve_ldhsu_be_zsu_mte,
5409 gen_helper_sve_ldss_be_zsu_mte, } },
5410 { { gen_helper_sve_ldbss_zss_mte,
5411 gen_helper_sve_ldhss_be_zss_mte,
5412 NULL, },
5413 { gen_helper_sve_ldbsu_zss_mte,
5414 gen_helper_sve_ldhsu_be_zss_mte,
5415 gen_helper_sve_ldss_be_zss_mte, } } },
5416
5417 /* First-fault */
5418 { { { gen_helper_sve_ldffbss_zsu_mte,
5419 gen_helper_sve_ldffhss_be_zsu_mte,
5420 NULL, },
5421 { gen_helper_sve_ldffbsu_zsu_mte,
5422 gen_helper_sve_ldffhsu_be_zsu_mte,
5423 gen_helper_sve_ldffss_be_zsu_mte, } },
5424 { { gen_helper_sve_ldffbss_zss_mte,
5425 gen_helper_sve_ldffhss_be_zss_mte,
5426 NULL, },
5427 { gen_helper_sve_ldffbsu_zss_mte,
5428 gen_helper_sve_ldffhsu_be_zss_mte,
5429 gen_helper_sve_ldffss_be_zss_mte, } } } } },
673e9fa6
RH
5430};
5431
5432/* Note that we overload xs=2 to indicate 64-bit offset. */
d28d12f0
RH
5433static gen_helper_gvec_mem_scatter * const
5434gather_load_fn64[2][2][2][3][2][4] = {
5435 { /* MTE Inactive */
5436 { /* Little-endian */
5437 { { { gen_helper_sve_ldbds_zsu,
5438 gen_helper_sve_ldhds_le_zsu,
5439 gen_helper_sve_ldsds_le_zsu,
5440 NULL, },
5441 { gen_helper_sve_ldbdu_zsu,
5442 gen_helper_sve_ldhdu_le_zsu,
5443 gen_helper_sve_ldsdu_le_zsu,
5444 gen_helper_sve_lddd_le_zsu, } },
5445 { { gen_helper_sve_ldbds_zss,
5446 gen_helper_sve_ldhds_le_zss,
5447 gen_helper_sve_ldsds_le_zss,
5448 NULL, },
5449 { gen_helper_sve_ldbdu_zss,
5450 gen_helper_sve_ldhdu_le_zss,
5451 gen_helper_sve_ldsdu_le_zss,
5452 gen_helper_sve_lddd_le_zss, } },
5453 { { gen_helper_sve_ldbds_zd,
5454 gen_helper_sve_ldhds_le_zd,
5455 gen_helper_sve_ldsds_le_zd,
5456 NULL, },
5457 { gen_helper_sve_ldbdu_zd,
5458 gen_helper_sve_ldhdu_le_zd,
5459 gen_helper_sve_ldsdu_le_zd,
5460 gen_helper_sve_lddd_le_zd, } } },
5461
5462 /* First-fault */
5463 { { { gen_helper_sve_ldffbds_zsu,
5464 gen_helper_sve_ldffhds_le_zsu,
5465 gen_helper_sve_ldffsds_le_zsu,
5466 NULL, },
5467 { gen_helper_sve_ldffbdu_zsu,
5468 gen_helper_sve_ldffhdu_le_zsu,
5469 gen_helper_sve_ldffsdu_le_zsu,
5470 gen_helper_sve_ldffdd_le_zsu, } },
5471 { { gen_helper_sve_ldffbds_zss,
5472 gen_helper_sve_ldffhds_le_zss,
5473 gen_helper_sve_ldffsds_le_zss,
5474 NULL, },
5475 { gen_helper_sve_ldffbdu_zss,
5476 gen_helper_sve_ldffhdu_le_zss,
5477 gen_helper_sve_ldffsdu_le_zss,
5478 gen_helper_sve_ldffdd_le_zss, } },
5479 { { gen_helper_sve_ldffbds_zd,
5480 gen_helper_sve_ldffhds_le_zd,
5481 gen_helper_sve_ldffsds_le_zd,
5482 NULL, },
5483 { gen_helper_sve_ldffbdu_zd,
5484 gen_helper_sve_ldffhdu_le_zd,
5485 gen_helper_sve_ldffsdu_le_zd,
5486 gen_helper_sve_ldffdd_le_zd, } } } },
5487 { /* Big-endian */
5488 { { { gen_helper_sve_ldbds_zsu,
5489 gen_helper_sve_ldhds_be_zsu,
5490 gen_helper_sve_ldsds_be_zsu,
5491 NULL, },
5492 { gen_helper_sve_ldbdu_zsu,
5493 gen_helper_sve_ldhdu_be_zsu,
5494 gen_helper_sve_ldsdu_be_zsu,
5495 gen_helper_sve_lddd_be_zsu, } },
5496 { { gen_helper_sve_ldbds_zss,
5497 gen_helper_sve_ldhds_be_zss,
5498 gen_helper_sve_ldsds_be_zss,
5499 NULL, },
5500 { gen_helper_sve_ldbdu_zss,
5501 gen_helper_sve_ldhdu_be_zss,
5502 gen_helper_sve_ldsdu_be_zss,
5503 gen_helper_sve_lddd_be_zss, } },
5504 { { gen_helper_sve_ldbds_zd,
5505 gen_helper_sve_ldhds_be_zd,
5506 gen_helper_sve_ldsds_be_zd,
5507 NULL, },
5508 { gen_helper_sve_ldbdu_zd,
5509 gen_helper_sve_ldhdu_be_zd,
5510 gen_helper_sve_ldsdu_be_zd,
5511 gen_helper_sve_lddd_be_zd, } } },
5512
5513 /* First-fault */
5514 { { { gen_helper_sve_ldffbds_zsu,
5515 gen_helper_sve_ldffhds_be_zsu,
5516 gen_helper_sve_ldffsds_be_zsu,
5517 NULL, },
5518 { gen_helper_sve_ldffbdu_zsu,
5519 gen_helper_sve_ldffhdu_be_zsu,
5520 gen_helper_sve_ldffsdu_be_zsu,
5521 gen_helper_sve_ldffdd_be_zsu, } },
5522 { { gen_helper_sve_ldffbds_zss,
5523 gen_helper_sve_ldffhds_be_zss,
5524 gen_helper_sve_ldffsds_be_zss,
5525 NULL, },
5526 { gen_helper_sve_ldffbdu_zss,
5527 gen_helper_sve_ldffhdu_be_zss,
5528 gen_helper_sve_ldffsdu_be_zss,
5529 gen_helper_sve_ldffdd_be_zss, } },
5530 { { gen_helper_sve_ldffbds_zd,
5531 gen_helper_sve_ldffhds_be_zd,
5532 gen_helper_sve_ldffsds_be_zd,
5533 NULL, },
5534 { gen_helper_sve_ldffbdu_zd,
5535 gen_helper_sve_ldffhdu_be_zd,
5536 gen_helper_sve_ldffsdu_be_zd,
5537 gen_helper_sve_ldffdd_be_zd, } } } } },
5538 { /* MTE Active */
5539 { /* Little-endian */
5540 { { { gen_helper_sve_ldbds_zsu_mte,
5541 gen_helper_sve_ldhds_le_zsu_mte,
5542 gen_helper_sve_ldsds_le_zsu_mte,
5543 NULL, },
5544 { gen_helper_sve_ldbdu_zsu_mte,
5545 gen_helper_sve_ldhdu_le_zsu_mte,
5546 gen_helper_sve_ldsdu_le_zsu_mte,
5547 gen_helper_sve_lddd_le_zsu_mte, } },
5548 { { gen_helper_sve_ldbds_zss_mte,
5549 gen_helper_sve_ldhds_le_zss_mte,
5550 gen_helper_sve_ldsds_le_zss_mte,
5551 NULL, },
5552 { gen_helper_sve_ldbdu_zss_mte,
5553 gen_helper_sve_ldhdu_le_zss_mte,
5554 gen_helper_sve_ldsdu_le_zss_mte,
5555 gen_helper_sve_lddd_le_zss_mte, } },
5556 { { gen_helper_sve_ldbds_zd_mte,
5557 gen_helper_sve_ldhds_le_zd_mte,
5558 gen_helper_sve_ldsds_le_zd_mte,
5559 NULL, },
5560 { gen_helper_sve_ldbdu_zd_mte,
5561 gen_helper_sve_ldhdu_le_zd_mte,
5562 gen_helper_sve_ldsdu_le_zd_mte,
5563 gen_helper_sve_lddd_le_zd_mte, } } },
5564
5565 /* First-fault */
5566 { { { gen_helper_sve_ldffbds_zsu_mte,
5567 gen_helper_sve_ldffhds_le_zsu_mte,
5568 gen_helper_sve_ldffsds_le_zsu_mte,
5569 NULL, },
5570 { gen_helper_sve_ldffbdu_zsu_mte,
5571 gen_helper_sve_ldffhdu_le_zsu_mte,
5572 gen_helper_sve_ldffsdu_le_zsu_mte,
5573 gen_helper_sve_ldffdd_le_zsu_mte, } },
5574 { { gen_helper_sve_ldffbds_zss_mte,
5575 gen_helper_sve_ldffhds_le_zss_mte,
5576 gen_helper_sve_ldffsds_le_zss_mte,
5577 NULL, },
5578 { gen_helper_sve_ldffbdu_zss_mte,
5579 gen_helper_sve_ldffhdu_le_zss_mte,
5580 gen_helper_sve_ldffsdu_le_zss_mte,
5581 gen_helper_sve_ldffdd_le_zss_mte, } },
5582 { { gen_helper_sve_ldffbds_zd_mte,
5583 gen_helper_sve_ldffhds_le_zd_mte,
5584 gen_helper_sve_ldffsds_le_zd_mte,
5585 NULL, },
5586 { gen_helper_sve_ldffbdu_zd_mte,
5587 gen_helper_sve_ldffhdu_le_zd_mte,
5588 gen_helper_sve_ldffsdu_le_zd_mte,
5589 gen_helper_sve_ldffdd_le_zd_mte, } } } },
5590 { /* Big-endian */
5591 { { { gen_helper_sve_ldbds_zsu_mte,
5592 gen_helper_sve_ldhds_be_zsu_mte,
5593 gen_helper_sve_ldsds_be_zsu_mte,
5594 NULL, },
5595 { gen_helper_sve_ldbdu_zsu_mte,
5596 gen_helper_sve_ldhdu_be_zsu_mte,
5597 gen_helper_sve_ldsdu_be_zsu_mte,
5598 gen_helper_sve_lddd_be_zsu_mte, } },
5599 { { gen_helper_sve_ldbds_zss_mte,
5600 gen_helper_sve_ldhds_be_zss_mte,
5601 gen_helper_sve_ldsds_be_zss_mte,
5602 NULL, },
5603 { gen_helper_sve_ldbdu_zss_mte,
5604 gen_helper_sve_ldhdu_be_zss_mte,
5605 gen_helper_sve_ldsdu_be_zss_mte,
5606 gen_helper_sve_lddd_be_zss_mte, } },
5607 { { gen_helper_sve_ldbds_zd_mte,
5608 gen_helper_sve_ldhds_be_zd_mte,
5609 gen_helper_sve_ldsds_be_zd_mte,
5610 NULL, },
5611 { gen_helper_sve_ldbdu_zd_mte,
5612 gen_helper_sve_ldhdu_be_zd_mte,
5613 gen_helper_sve_ldsdu_be_zd_mte,
5614 gen_helper_sve_lddd_be_zd_mte, } } },
5615
5616 /* First-fault */
5617 { { { gen_helper_sve_ldffbds_zsu_mte,
5618 gen_helper_sve_ldffhds_be_zsu_mte,
5619 gen_helper_sve_ldffsds_be_zsu_mte,
5620 NULL, },
5621 { gen_helper_sve_ldffbdu_zsu_mte,
5622 gen_helper_sve_ldffhdu_be_zsu_mte,
5623 gen_helper_sve_ldffsdu_be_zsu_mte,
5624 gen_helper_sve_ldffdd_be_zsu_mte, } },
5625 { { gen_helper_sve_ldffbds_zss_mte,
5626 gen_helper_sve_ldffhds_be_zss_mte,
5627 gen_helper_sve_ldffsds_be_zss_mte,
5628 NULL, },
5629 { gen_helper_sve_ldffbdu_zss_mte,
5630 gen_helper_sve_ldffhdu_be_zss_mte,
5631 gen_helper_sve_ldffsdu_be_zss_mte,
5632 gen_helper_sve_ldffdd_be_zss_mte, } },
5633 { { gen_helper_sve_ldffbds_zd_mte,
5634 gen_helper_sve_ldffhds_be_zd_mte,
5635 gen_helper_sve_ldffsds_be_zd_mte,
5636 NULL, },
5637 { gen_helper_sve_ldffbdu_zd_mte,
5638 gen_helper_sve_ldffhdu_be_zd_mte,
5639 gen_helper_sve_ldffsdu_be_zd_mte,
5640 gen_helper_sve_ldffdd_be_zd_mte, } } } } },
673e9fa6
RH
5641};
5642
3a7be554 5643static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
673e9fa6
RH
5644{
5645 gen_helper_gvec_mem_scatter *fn = NULL;
d28d12f0
RH
5646 bool be = s->be_data == MO_BE;
5647 bool mte = s->mte_active[0];
673e9fa6
RH
5648
5649 if (!sve_access_check(s)) {
5650 return true;
5651 }
5652
5653 switch (a->esz) {
5654 case MO_32:
d28d12f0 5655 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz];
673e9fa6
RH
5656 break;
5657 case MO_64:
d28d12f0 5658 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
673e9fa6
RH
5659 break;
5660 }
5661 assert(fn != NULL);
5662
5663 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
d28d12f0 5664 cpu_reg_sp(s, a->rn), a->msz, false, fn);
673e9fa6
RH
5665 return true;
5666}
5667
3a7be554 5668static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
673e9fa6
RH
5669{
5670 gen_helper_gvec_mem_scatter *fn = NULL;
d28d12f0
RH
5671 bool be = s->be_data == MO_BE;
5672 bool mte = s->mte_active[0];
673e9fa6
RH
5673 TCGv_i64 imm;
5674
5675 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
5676 return false;
5677 }
5678 if (!sve_access_check(s)) {
5679 return true;
5680 }
5681
5682 switch (a->esz) {
5683 case MO_32:
d28d12f0 5684 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz];
673e9fa6
RH
5685 break;
5686 case MO_64:
d28d12f0 5687 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz];
673e9fa6
RH
5688 break;
5689 }
5690 assert(fn != NULL);
5691
5692 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
5693 * by loading the immediate into the scalar parameter.
5694 */
5695 imm = tcg_const_i64(a->imm << a->msz);
d28d12f0 5696 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, false, fn);
673e9fa6
RH
5697 tcg_temp_free_i64(imm);
5698 return true;
5699}
5700
d28d12f0
RH
5701/* Indexed by [mte][be][xs][msz]. */
5702static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
5703 { /* MTE Inactive */
5704 { /* Little-endian */
5705 { gen_helper_sve_stbs_zsu,
5706 gen_helper_sve_sths_le_zsu,
5707 gen_helper_sve_stss_le_zsu, },
5708 { gen_helper_sve_stbs_zss,
5709 gen_helper_sve_sths_le_zss,
5710 gen_helper_sve_stss_le_zss, } },
5711 { /* Big-endian */
5712 { gen_helper_sve_stbs_zsu,
5713 gen_helper_sve_sths_be_zsu,
5714 gen_helper_sve_stss_be_zsu, },
5715 { gen_helper_sve_stbs_zss,
5716 gen_helper_sve_sths_be_zss,
5717 gen_helper_sve_stss_be_zss, } } },
5718 { /* MTE Active */
5719 { /* Little-endian */
5720 { gen_helper_sve_stbs_zsu_mte,
5721 gen_helper_sve_sths_le_zsu_mte,
5722 gen_helper_sve_stss_le_zsu_mte, },
5723 { gen_helper_sve_stbs_zss_mte,
5724 gen_helper_sve_sths_le_zss_mte,
5725 gen_helper_sve_stss_le_zss_mte, } },
5726 { /* Big-endian */
5727 { gen_helper_sve_stbs_zsu_mte,
5728 gen_helper_sve_sths_be_zsu_mte,
5729 gen_helper_sve_stss_be_zsu_mte, },
5730 { gen_helper_sve_stbs_zss_mte,
5731 gen_helper_sve_sths_be_zss_mte,
5732 gen_helper_sve_stss_be_zss_mte, } } },
408ecde9
RH
5733};
5734
5735/* Note that we overload xs=2 to indicate 64-bit offset. */
d28d12f0
RH
5736static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
5737 { /* MTE Inactive */
5738 { /* Little-endian */
5739 { gen_helper_sve_stbd_zsu,
5740 gen_helper_sve_sthd_le_zsu,
5741 gen_helper_sve_stsd_le_zsu,
5742 gen_helper_sve_stdd_le_zsu, },
5743 { gen_helper_sve_stbd_zss,
5744 gen_helper_sve_sthd_le_zss,
5745 gen_helper_sve_stsd_le_zss,
5746 gen_helper_sve_stdd_le_zss, },
5747 { gen_helper_sve_stbd_zd,
5748 gen_helper_sve_sthd_le_zd,
5749 gen_helper_sve_stsd_le_zd,
5750 gen_helper_sve_stdd_le_zd, } },
5751 { /* Big-endian */
5752 { gen_helper_sve_stbd_zsu,
5753 gen_helper_sve_sthd_be_zsu,
5754 gen_helper_sve_stsd_be_zsu,
5755 gen_helper_sve_stdd_be_zsu, },
5756 { gen_helper_sve_stbd_zss,
5757 gen_helper_sve_sthd_be_zss,
5758 gen_helper_sve_stsd_be_zss,
5759 gen_helper_sve_stdd_be_zss, },
5760 { gen_helper_sve_stbd_zd,
5761 gen_helper_sve_sthd_be_zd,
5762 gen_helper_sve_stsd_be_zd,
5763 gen_helper_sve_stdd_be_zd, } } },
5764 { /* MTE Inactive */
5765 { /* Little-endian */
5766 { gen_helper_sve_stbd_zsu_mte,
5767 gen_helper_sve_sthd_le_zsu_mte,
5768 gen_helper_sve_stsd_le_zsu_mte,
5769 gen_helper_sve_stdd_le_zsu_mte, },
5770 { gen_helper_sve_stbd_zss_mte,
5771 gen_helper_sve_sthd_le_zss_mte,
5772 gen_helper_sve_stsd_le_zss_mte,
5773 gen_helper_sve_stdd_le_zss_mte, },
5774 { gen_helper_sve_stbd_zd_mte,
5775 gen_helper_sve_sthd_le_zd_mte,
5776 gen_helper_sve_stsd_le_zd_mte,
5777 gen_helper_sve_stdd_le_zd_mte, } },
5778 { /* Big-endian */
5779 { gen_helper_sve_stbd_zsu_mte,
5780 gen_helper_sve_sthd_be_zsu_mte,
5781 gen_helper_sve_stsd_be_zsu_mte,
5782 gen_helper_sve_stdd_be_zsu_mte, },
5783 { gen_helper_sve_stbd_zss_mte,
5784 gen_helper_sve_sthd_be_zss_mte,
5785 gen_helper_sve_stsd_be_zss_mte,
5786 gen_helper_sve_stdd_be_zss_mte, },
5787 { gen_helper_sve_stbd_zd_mte,
5788 gen_helper_sve_sthd_be_zd_mte,
5789 gen_helper_sve_stsd_be_zd_mte,
5790 gen_helper_sve_stdd_be_zd_mte, } } },
408ecde9
RH
5791};
5792
3a7be554 5793static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
f6dbf62a 5794{
f6dbf62a 5795 gen_helper_gvec_mem_scatter *fn;
d28d12f0
RH
5796 bool be = s->be_data == MO_BE;
5797 bool mte = s->mte_active[0];
f6dbf62a
RH
5798
5799 if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
5800 return false;
5801 }
5802 if (!sve_access_check(s)) {
5803 return true;
5804 }
5805 switch (a->esz) {
5806 case MO_32:
d28d12f0 5807 fn = scatter_store_fn32[mte][be][a->xs][a->msz];
f6dbf62a
RH
5808 break;
5809 case MO_64:
d28d12f0 5810 fn = scatter_store_fn64[mte][be][a->xs][a->msz];
f6dbf62a
RH
5811 break;
5812 default:
5813 g_assert_not_reached();
5814 }
5815 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
d28d12f0 5816 cpu_reg_sp(s, a->rn), a->msz, true, fn);
f6dbf62a
RH
5817 return true;
5818}
dec6cf6b 5819
3a7be554 5820static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
408ecde9
RH
5821{
5822 gen_helper_gvec_mem_scatter *fn = NULL;
d28d12f0
RH
5823 bool be = s->be_data == MO_BE;
5824 bool mte = s->mte_active[0];
408ecde9
RH
5825 TCGv_i64 imm;
5826
5827 if (a->esz < a->msz) {
5828 return false;
5829 }
5830 if (!sve_access_check(s)) {
5831 return true;
5832 }
5833
5834 switch (a->esz) {
5835 case MO_32:
d28d12f0 5836 fn = scatter_store_fn32[mte][be][0][a->msz];
408ecde9
RH
5837 break;
5838 case MO_64:
d28d12f0 5839 fn = scatter_store_fn64[mte][be][2][a->msz];
408ecde9
RH
5840 break;
5841 }
5842 assert(fn != NULL);
5843
5844 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
5845 * by loading the immediate into the scalar parameter.
5846 */
5847 imm = tcg_const_i64(a->imm << a->msz);
d28d12f0 5848 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, true, fn);
408ecde9
RH
5849 tcg_temp_free_i64(imm);
5850 return true;
5851}
5852
dec6cf6b
RH
5853/*
5854 * Prefetches
5855 */
5856
3a7be554 5857static bool trans_PRF(DisasContext *s, arg_PRF *a)
dec6cf6b
RH
5858{
5859 /* Prefetch is a nop within QEMU. */
2f95a3b0 5860 (void)sve_access_check(s);
dec6cf6b
RH
5861 return true;
5862}
5863
3a7be554 5864static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
dec6cf6b
RH
5865{
5866 if (a->rm == 31) {
5867 return false;
5868 }
5869 /* Prefetch is a nop within QEMU. */
2f95a3b0 5870 (void)sve_access_check(s);
dec6cf6b
RH
5871 return true;
5872}
a2103582
RH
5873
5874/*
5875 * Move Prefix
5876 *
5877 * TODO: The implementation so far could handle predicated merging movprfx.
5878 * The helper functions as written take an extra source register to
5879 * use in the operation, but the result is only written when predication
5880 * succeeds. For unpredicated movprfx, we need to rearrange the helpers
5881 * to allow the final write back to the destination to be unconditional.
5882 * For predicated zeroing movprfx, we need to rearrange the helpers to
5883 * allow the final write back to zero inactives.
5884 *
5885 * In the meantime, just emit the moves.
5886 */
5887
3a7be554 5888static bool trans_MOVPRFX(DisasContext *s, arg_MOVPRFX *a)
a2103582
RH
5889{
5890 return do_mov_z(s, a->rd, a->rn);
5891}
5892
3a7be554 5893static bool trans_MOVPRFX_m(DisasContext *s, arg_rpr_esz *a)
a2103582
RH
5894{
5895 if (sve_access_check(s)) {
5896 do_sel_z(s, a->rd, a->rn, a->rd, a->pg, a->esz);
5897 }
5898 return true;
5899}
5900
3a7be554 5901static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a)
a2103582 5902{
60245996 5903 return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false);
a2103582 5904}
5dad1ba5
RH
5905
5906/*
5907 * SVE2 Integer Multiply - Unpredicated
5908 */
5909
5910static bool trans_MUL_zzz(DisasContext *s, arg_rrr_esz *a)
5911{
5912 if (!dc_isar_feature(aa64_sve2, s)) {
5913 return false;
5914 }
5915 if (sve_access_check(s)) {
5916 gen_gvec_fn_zzz(s, tcg_gen_gvec_mul, a->esz, a->rd, a->rn, a->rm);
5917 }
5918 return true;
5919}
5920
5921static bool do_sve2_zzz_ool(DisasContext *s, arg_rrr_esz *a,
5922 gen_helper_gvec_3 *fn)
5923{
5924 if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
5925 return false;
5926 }
5927 if (sve_access_check(s)) {
5928 gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
5929 }
5930 return true;
5931}
5932
5933static bool trans_SMULH_zzz(DisasContext *s, arg_rrr_esz *a)
5934{
5935 static gen_helper_gvec_3 * const fns[4] = {
5936 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
5937 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
5938 };
5939 return do_sve2_zzz_ool(s, a, fns[a->esz]);
5940}
5941
5942static bool trans_UMULH_zzz(DisasContext *s, arg_rrr_esz *a)
5943{
5944 static gen_helper_gvec_3 * const fns[4] = {
5945 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
5946 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
5947 };
5948 return do_sve2_zzz_ool(s, a, fns[a->esz]);
5949}
5950
5951static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a)
5952{
5953 return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b);
5954}
d4b1e59d
RH
5955
5956/*
5957 * SVE2 Integer - Predicated
5958 */
5959
5960static bool do_sve2_zpzz_ool(DisasContext *s, arg_rprr_esz *a,
5961 gen_helper_gvec_4 *fn)
5962{
5963 if (!dc_isar_feature(aa64_sve2, s)) {
5964 return false;
5965 }
5966 return do_zpzz_ool(s, a, fn);
5967}
5968
5969static bool trans_SADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
5970{
5971 static gen_helper_gvec_4 * const fns[3] = {
5972 gen_helper_sve2_sadalp_zpzz_h,
5973 gen_helper_sve2_sadalp_zpzz_s,
5974 gen_helper_sve2_sadalp_zpzz_d,
5975 };
5976 if (a->esz == 0) {
5977 return false;
5978 }
5979 return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
5980}
5981
5982static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
5983{
5984 static gen_helper_gvec_4 * const fns[3] = {
5985 gen_helper_sve2_uadalp_zpzz_h,
5986 gen_helper_sve2_uadalp_zpzz_s,
5987 gen_helper_sve2_uadalp_zpzz_d,
5988 };
5989 if (a->esz == 0) {
5990 return false;
5991 }
5992 return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
5993}
db366da8
RH
5994
5995/*
5996 * SVE2 integer unary operations (predicated)
5997 */
5998
5999static bool do_sve2_zpz_ool(DisasContext *s, arg_rpr_esz *a,
6000 gen_helper_gvec_3 *fn)
6001{
6002 if (!dc_isar_feature(aa64_sve2, s)) {
6003 return false;
6004 }
6005 return do_zpz_ool(s, a, fn);
6006}
6007
6008static bool trans_URECPE(DisasContext *s, arg_rpr_esz *a)
6009{
6010 if (a->esz != 2) {
6011 return false;
6012 }
6013 return do_sve2_zpz_ool(s, a, gen_helper_sve2_urecpe_s);
6014}
6015
6016static bool trans_URSQRTE(DisasContext *s, arg_rpr_esz *a)
6017{
6018 if (a->esz != 2) {
6019 return false;
6020 }
6021 return do_sve2_zpz_ool(s, a, gen_helper_sve2_ursqrte_s);
6022}
6023
6024static bool trans_SQABS(DisasContext *s, arg_rpr_esz *a)
6025{
6026 static gen_helper_gvec_3 * const fns[4] = {
6027 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
6028 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
6029 };
6030 return do_sve2_zpz_ool(s, a, fns[a->esz]);
6031}
6032
6033static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a)
6034{
6035 static gen_helper_gvec_3 * const fns[4] = {
6036 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
6037 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
6038 };
6039 return do_sve2_zpz_ool(s, a, fns[a->esz]);
6040}
45d9503d
RH
6041
6042#define DO_SVE2_ZPZZ(NAME, name) \
6043static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
6044{ \
6045 static gen_helper_gvec_4 * const fns[4] = { \
6046 gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \
6047 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \
6048 }; \
6049 return do_sve2_zpzz_ool(s, a, fns[a->esz]); \
6050}
6051
6052DO_SVE2_ZPZZ(SQSHL, sqshl)
6053DO_SVE2_ZPZZ(SQRSHL, sqrshl)
6054DO_SVE2_ZPZZ(SRSHL, srshl)
6055
6056DO_SVE2_ZPZZ(UQSHL, uqshl)
6057DO_SVE2_ZPZZ(UQRSHL, uqrshl)
6058DO_SVE2_ZPZZ(URSHL, urshl)
a47dc220
RH
6059
6060DO_SVE2_ZPZZ(SHADD, shadd)
6061DO_SVE2_ZPZZ(SRHADD, srhadd)
6062DO_SVE2_ZPZZ(SHSUB, shsub)
6063
6064DO_SVE2_ZPZZ(UHADD, uhadd)
6065DO_SVE2_ZPZZ(URHADD, urhadd)
6066DO_SVE2_ZPZZ(UHSUB, uhsub)
8597dc8b
RH
6067
6068DO_SVE2_ZPZZ(ADDP, addp)
6069DO_SVE2_ZPZZ(SMAXP, smaxp)
6070DO_SVE2_ZPZZ(UMAXP, umaxp)
6071DO_SVE2_ZPZZ(SMINP, sminp)
6072DO_SVE2_ZPZZ(UMINP, uminp)
4f07fbeb
RH
6073
6074DO_SVE2_ZPZZ(SQADD_zpzz, sqadd)
6075DO_SVE2_ZPZZ(UQADD_zpzz, uqadd)
6076DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub)
6077DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub)
6078DO_SVE2_ZPZZ(SUQADD, suqadd)
6079DO_SVE2_ZPZZ(USQADD, usqadd)
0ce1dda8
RH
6080
6081/*
6082 * SVE2 Widening Integer Arithmetic
6083 */
6084
6085static bool do_sve2_zzw_ool(DisasContext *s, arg_rrr_esz *a,
6086 gen_helper_gvec_3 *fn, int data)
6087{
6088 if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
6089 return false;
6090 }
6091 if (sve_access_check(s)) {
6092 unsigned vsz = vec_full_reg_size(s);
6093 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
6094 vec_full_reg_offset(s, a->rn),
6095 vec_full_reg_offset(s, a->rm),
6096 vsz, vsz, data, fn);
6097 }
6098 return true;
6099}
6100
6101#define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \
6102static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
6103{ \
6104 static gen_helper_gvec_3 * const fns[4] = { \
6105 NULL, gen_helper_sve2_##name##_h, \
6106 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
6107 }; \
6108 return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \
6109}
6110
6111DO_SVE2_ZZZ_TB(SADDLB, saddl, false, false)
6112DO_SVE2_ZZZ_TB(SSUBLB, ssubl, false, false)
6113DO_SVE2_ZZZ_TB(SABDLB, sabdl, false, false)
6114
6115DO_SVE2_ZZZ_TB(UADDLB, uaddl, false, false)
6116DO_SVE2_ZZZ_TB(USUBLB, usubl, false, false)
6117DO_SVE2_ZZZ_TB(UABDLB, uabdl, false, false)
6118
6119DO_SVE2_ZZZ_TB(SADDLT, saddl, true, true)
6120DO_SVE2_ZZZ_TB(SSUBLT, ssubl, true, true)
6121DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true)
6122
6123DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true)
6124DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true)
6125DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true)
daec426b
RH
6126
6127DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true)
6128DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true)
6129DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false)
81fccf09 6130
69ccc099
RH
6131DO_SVE2_ZZZ_TB(SQDMULLB_zzz, sqdmull_zzz, false, false)
6132DO_SVE2_ZZZ_TB(SQDMULLT_zzz, sqdmull_zzz, true, true)
6133
6134DO_SVE2_ZZZ_TB(SMULLB_zzz, smull_zzz, false, false)
6135DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true)
6136
6137DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false)
6138DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true)
6139
2df3ca55
RH
6140static bool do_eor_tb(DisasContext *s, arg_rrr_esz *a, bool sel1)
6141{
6142 static gen_helper_gvec_3 * const fns[4] = {
6143 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
6144 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
6145 };
6146 return do_sve2_zzw_ool(s, a, fns[a->esz], (!sel1 << 1) | sel1);
6147}
6148
6149static bool trans_EORBT(DisasContext *s, arg_rrr_esz *a)
6150{
6151 return do_eor_tb(s, a, false);
6152}
6153
6154static bool trans_EORTB(DisasContext *s, arg_rrr_esz *a)
6155{
6156 return do_eor_tb(s, a, true);
6157}
6158
e3a56131
RH
6159static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
6160{
6161 static gen_helper_gvec_3 * const fns[4] = {
6162 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
6163 NULL, gen_helper_sve2_pmull_d,
6164 };
6165 if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
6166 return false;
6167 }
6168 return do_sve2_zzw_ool(s, a, fns[a->esz], sel);
6169}
6170
6171static bool trans_PMULLB(DisasContext *s, arg_rrr_esz *a)
6172{
6173 return do_trans_pmull(s, a, false);
6174}
6175
6176static bool trans_PMULLT(DisasContext *s, arg_rrr_esz *a)
6177{
6178 return do_trans_pmull(s, a, true);
6179}
6180
81fccf09
RH
6181#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
6182static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
6183{ \
6184 static gen_helper_gvec_3 * const fns[4] = { \
6185 NULL, gen_helper_sve2_##name##_h, \
6186 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
6187 }; \
6188 return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \
6189}
6190
6191DO_SVE2_ZZZ_WTB(SADDWB, saddw, false)
6192DO_SVE2_ZZZ_WTB(SADDWT, saddw, true)
6193DO_SVE2_ZZZ_WTB(SSUBWB, ssubw, false)
6194DO_SVE2_ZZZ_WTB(SSUBWT, ssubw, true)
6195
6196DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false)
6197DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true)
6198DO_SVE2_ZZZ_WTB(USUBWB, usubw, false)
6199DO_SVE2_ZZZ_WTB(USUBWT, usubw, true)
4269fef1
RH
6200
6201static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6202{
6203 int top = imm & 1;
6204 int shl = imm >> 1;
6205 int halfbits = 4 << vece;
6206
6207 if (top) {
6208 if (shl == halfbits) {
6209 TCGv_vec t = tcg_temp_new_vec_matching(d);
6210 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6211 tcg_gen_and_vec(vece, d, n, t);
6212 tcg_temp_free_vec(t);
6213 } else {
6214 tcg_gen_sari_vec(vece, d, n, halfbits);
6215 tcg_gen_shli_vec(vece, d, d, shl);
6216 }
6217 } else {
6218 tcg_gen_shli_vec(vece, d, n, halfbits);
6219 tcg_gen_sari_vec(vece, d, d, halfbits - shl);
6220 }
6221}
6222
6223static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
6224{
6225 int halfbits = 4 << vece;
6226 int top = imm & 1;
6227 int shl = (imm >> 1);
6228 int shift;
6229 uint64_t mask;
6230
6231 mask = MAKE_64BIT_MASK(0, halfbits);
6232 mask <<= shl;
6233 mask = dup_const(vece, mask);
6234
6235 shift = shl - top * halfbits;
6236 if (shift < 0) {
6237 tcg_gen_shri_i64(d, n, -shift);
6238 } else {
6239 tcg_gen_shli_i64(d, n, shift);
6240 }
6241 tcg_gen_andi_i64(d, d, mask);
6242}
6243
6244static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6245{
6246 gen_ushll_i64(MO_16, d, n, imm);
6247}
6248
6249static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6250{
6251 gen_ushll_i64(MO_32, d, n, imm);
6252}
6253
6254static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6255{
6256 gen_ushll_i64(MO_64, d, n, imm);
6257}
6258
6259static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6260{
6261 int halfbits = 4 << vece;
6262 int top = imm & 1;
6263 int shl = imm >> 1;
6264
6265 if (top) {
6266 if (shl == halfbits) {
6267 TCGv_vec t = tcg_temp_new_vec_matching(d);
6268 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6269 tcg_gen_and_vec(vece, d, n, t);
6270 tcg_temp_free_vec(t);
6271 } else {
6272 tcg_gen_shri_vec(vece, d, n, halfbits);
6273 tcg_gen_shli_vec(vece, d, d, shl);
6274 }
6275 } else {
6276 if (shl == 0) {
6277 TCGv_vec t = tcg_temp_new_vec_matching(d);
6278 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6279 tcg_gen_and_vec(vece, d, n, t);
6280 tcg_temp_free_vec(t);
6281 } else {
6282 tcg_gen_shli_vec(vece, d, n, halfbits);
6283 tcg_gen_shri_vec(vece, d, d, halfbits - shl);
6284 }
6285 }
6286}
6287
6288static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
6289 bool sel, bool uns)
6290{
6291 static const TCGOpcode sshll_list[] = {
6292 INDEX_op_shli_vec, INDEX_op_sari_vec, 0
6293 };
6294 static const TCGOpcode ushll_list[] = {
6295 INDEX_op_shli_vec, INDEX_op_shri_vec, 0
6296 };
6297 static const GVecGen2i ops[2][3] = {
6298 { { .fniv = gen_sshll_vec,
6299 .opt_opc = sshll_list,
6300 .fno = gen_helper_sve2_sshll_h,
6301 .vece = MO_16 },
6302 { .fniv = gen_sshll_vec,
6303 .opt_opc = sshll_list,
6304 .fno = gen_helper_sve2_sshll_s,
6305 .vece = MO_32 },
6306 { .fniv = gen_sshll_vec,
6307 .opt_opc = sshll_list,
6308 .fno = gen_helper_sve2_sshll_d,
6309 .vece = MO_64 } },
6310 { { .fni8 = gen_ushll16_i64,
6311 .fniv = gen_ushll_vec,
6312 .opt_opc = ushll_list,
6313 .fno = gen_helper_sve2_ushll_h,
6314 .vece = MO_16 },
6315 { .fni8 = gen_ushll32_i64,
6316 .fniv = gen_ushll_vec,
6317 .opt_opc = ushll_list,
6318 .fno = gen_helper_sve2_ushll_s,
6319 .vece = MO_32 },
6320 { .fni8 = gen_ushll64_i64,
6321 .fniv = gen_ushll_vec,
6322 .opt_opc = ushll_list,
6323 .fno = gen_helper_sve2_ushll_d,
6324 .vece = MO_64 } },
6325 };
6326
6327 if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
6328 return false;
6329 }
6330 if (sve_access_check(s)) {
6331 unsigned vsz = vec_full_reg_size(s);
6332 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6333 vec_full_reg_offset(s, a->rn),
6334 vsz, vsz, (a->imm << 1) | sel,
6335 &ops[uns][a->esz]);
6336 }
6337 return true;
6338}
6339
6340static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
6341{
6342 return do_sve2_shll_tb(s, a, false, false);
6343}
6344
6345static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
6346{
6347 return do_sve2_shll_tb(s, a, true, false);
6348}
6349
6350static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
6351{
6352 return do_sve2_shll_tb(s, a, false, true);
6353}
6354
6355static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
6356{
6357 return do_sve2_shll_tb(s, a, true, true);
6358}
cb9c33b8
RH
6359
6360static bool trans_BEXT(DisasContext *s, arg_rrr_esz *a)
6361{
6362 static gen_helper_gvec_3 * const fns[4] = {
6363 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
6364 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
6365 };
6366 if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
6367 return false;
6368 }
6369 return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
6370}
6371
6372static bool trans_BDEP(DisasContext *s, arg_rrr_esz *a)
6373{
6374 static gen_helper_gvec_3 * const fns[4] = {
6375 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
6376 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
6377 };
6378 if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
6379 return false;
6380 }
6381 return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
6382}
6383
6384static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a)
6385{
6386 static gen_helper_gvec_3 * const fns[4] = {
6387 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
6388 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
6389 };
6390 if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
6391 return false;
6392 }
6393 return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
6394}
ed4a6387
RH
6395
6396static bool do_cadd(DisasContext *s, arg_rrr_esz *a, bool sq, bool rot)
6397{
6398 static gen_helper_gvec_3 * const fns[2][4] = {
6399 { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
6400 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d },
6401 { gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
6402 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d },
6403 };
6404 return do_sve2_zzw_ool(s, a, fns[sq][a->esz], rot);
6405}
6406
6407static bool trans_CADD_rot90(DisasContext *s, arg_rrr_esz *a)
6408{
6409 return do_cadd(s, a, false, false);
6410}
6411
6412static bool trans_CADD_rot270(DisasContext *s, arg_rrr_esz *a)
6413{
6414 return do_cadd(s, a, false, true);
6415}
6416
6417static bool trans_SQCADD_rot90(DisasContext *s, arg_rrr_esz *a)
6418{
6419 return do_cadd(s, a, true, false);
6420}
6421
6422static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a)
6423{
6424 return do_cadd(s, a, true, true);
6425}
38650638
RH
6426
6427static bool do_sve2_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
6428 gen_helper_gvec_4 *fn, int data)
6429{
6430 if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
6431 return false;
6432 }
6433 if (sve_access_check(s)) {
6434 gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
6435 }
6436 return true;
6437}
6438
6439static bool do_abal(DisasContext *s, arg_rrrr_esz *a, bool uns, bool sel)
6440{
6441 static gen_helper_gvec_4 * const fns[2][4] = {
6442 { NULL, gen_helper_sve2_sabal_h,
6443 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d },
6444 { NULL, gen_helper_sve2_uabal_h,
6445 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d },
6446 };
6447 return do_sve2_zzzz_ool(s, a, fns[uns][a->esz], sel);
6448}
6449
6450static bool trans_SABALB(DisasContext *s, arg_rrrr_esz *a)
6451{
6452 return do_abal(s, a, false, false);
6453}
6454
6455static bool trans_SABALT(DisasContext *s, arg_rrrr_esz *a)
6456{
6457 return do_abal(s, a, false, true);
6458}
6459
6460static bool trans_UABALB(DisasContext *s, arg_rrrr_esz *a)
6461{
6462 return do_abal(s, a, true, false);
6463}
6464
6465static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a)
6466{
6467 return do_abal(s, a, true, true);
6468}
b8295dfb
RH
6469
6470static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
6471{
6472 static gen_helper_gvec_4 * const fns[2] = {
6473 gen_helper_sve2_adcl_s,
6474 gen_helper_sve2_adcl_d,
6475 };
6476 /*
6477 * Note that in this case the ESZ field encodes both size and sign.
6478 * Split out 'subtract' into bit 1 of the data field for the helper.
6479 */
6480 return do_sve2_zzzz_ool(s, a, fns[a->esz & 1], (a->esz & 2) | sel);
6481}
6482
6483static bool trans_ADCLB(DisasContext *s, arg_rrrr_esz *a)
6484{
6485 return do_adcl(s, a, false);
6486}
6487
6488static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a)
6489{
6490 return do_adcl(s, a, true);
6491}
a7e3a90e
RH
6492
6493static bool do_sve2_fn2i(DisasContext *s, arg_rri_esz *a, GVecGen2iFn *fn)
6494{
6495 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
6496 return false;
6497 }
6498 if (sve_access_check(s)) {
6499 unsigned vsz = vec_full_reg_size(s);
6500 unsigned rd_ofs = vec_full_reg_offset(s, a->rd);
6501 unsigned rn_ofs = vec_full_reg_offset(s, a->rn);
6502 fn(a->esz, rd_ofs, rn_ofs, a->imm, vsz, vsz);
6503 }
6504 return true;
6505}
6506
6507static bool trans_SSRA(DisasContext *s, arg_rri_esz *a)
6508{
6509 return do_sve2_fn2i(s, a, gen_gvec_ssra);
6510}
6511
6512static bool trans_USRA(DisasContext *s, arg_rri_esz *a)
6513{
6514 return do_sve2_fn2i(s, a, gen_gvec_usra);
6515}
6516
6517static bool trans_SRSRA(DisasContext *s, arg_rri_esz *a)
6518{
6519 return do_sve2_fn2i(s, a, gen_gvec_srsra);
6520}
6521
6522static bool trans_URSRA(DisasContext *s, arg_rri_esz *a)
6523{
6524 return do_sve2_fn2i(s, a, gen_gvec_ursra);
6525}
fc12b46a
RH
6526
6527static bool trans_SRI(DisasContext *s, arg_rri_esz *a)
6528{
6529 return do_sve2_fn2i(s, a, gen_gvec_sri);
6530}
6531
6532static bool trans_SLI(DisasContext *s, arg_rri_esz *a)
6533{
6534 return do_sve2_fn2i(s, a, gen_gvec_sli);
6535}
289a1797
RH
6536
6537static bool do_sve2_fn_zzz(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *fn)
6538{
6539 if (!dc_isar_feature(aa64_sve2, s)) {
6540 return false;
6541 }
6542 if (sve_access_check(s)) {
6543 gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
6544 }
6545 return true;
6546}
6547
6548static bool trans_SABA(DisasContext *s, arg_rrr_esz *a)
6549{
6550 return do_sve2_fn_zzz(s, a, gen_gvec_saba);
6551}
6552
6553static bool trans_UABA(DisasContext *s, arg_rrr_esz *a)
6554{
6555 return do_sve2_fn_zzz(s, a, gen_gvec_uaba);
6556}
5ff2838d
RH
6557
6558static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
6559 const GVecGen2 ops[3])
6560{
6561 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
6562 !dc_isar_feature(aa64_sve2, s)) {
6563 return false;
6564 }
6565 if (sve_access_check(s)) {
6566 unsigned vsz = vec_full_reg_size(s);
6567 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
6568 vec_full_reg_offset(s, a->rn),
6569 vsz, vsz, &ops[a->esz]);
6570 }
6571 return true;
6572}
6573
6574static const TCGOpcode sqxtn_list[] = {
6575 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
6576};
6577
6578static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6579{
6580 TCGv_vec t = tcg_temp_new_vec_matching(d);
6581 int halfbits = 4 << vece;
6582 int64_t mask = (1ull << halfbits) - 1;
6583 int64_t min = -1ull << (halfbits - 1);
6584 int64_t max = -min - 1;
6585
6586 tcg_gen_dupi_vec(vece, t, min);
6587 tcg_gen_smax_vec(vece, d, n, t);
6588 tcg_gen_dupi_vec(vece, t, max);
6589 tcg_gen_smin_vec(vece, d, d, t);
6590 tcg_gen_dupi_vec(vece, t, mask);
6591 tcg_gen_and_vec(vece, d, d, t);
6592 tcg_temp_free_vec(t);
6593}
6594
6595static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
6596{
6597 static const GVecGen2 ops[3] = {
6598 { .fniv = gen_sqxtnb_vec,
6599 .opt_opc = sqxtn_list,
6600 .fno = gen_helper_sve2_sqxtnb_h,
6601 .vece = MO_16 },
6602 { .fniv = gen_sqxtnb_vec,
6603 .opt_opc = sqxtn_list,
6604 .fno = gen_helper_sve2_sqxtnb_s,
6605 .vece = MO_32 },
6606 { .fniv = gen_sqxtnb_vec,
6607 .opt_opc = sqxtn_list,
6608 .fno = gen_helper_sve2_sqxtnb_d,
6609 .vece = MO_64 },
6610 };
6611 return do_sve2_narrow_extract(s, a, ops);
6612}
6613
6614static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6615{
6616 TCGv_vec t = tcg_temp_new_vec_matching(d);
6617 int halfbits = 4 << vece;
6618 int64_t mask = (1ull << halfbits) - 1;
6619 int64_t min = -1ull << (halfbits - 1);
6620 int64_t max = -min - 1;
6621
6622 tcg_gen_dupi_vec(vece, t, min);
6623 tcg_gen_smax_vec(vece, n, n, t);
6624 tcg_gen_dupi_vec(vece, t, max);
6625 tcg_gen_smin_vec(vece, n, n, t);
6626 tcg_gen_shli_vec(vece, n, n, halfbits);
6627 tcg_gen_dupi_vec(vece, t, mask);
6628 tcg_gen_bitsel_vec(vece, d, t, d, n);
6629 tcg_temp_free_vec(t);
6630}
6631
6632static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
6633{
6634 static const GVecGen2 ops[3] = {
6635 { .fniv = gen_sqxtnt_vec,
6636 .opt_opc = sqxtn_list,
6637 .load_dest = true,
6638 .fno = gen_helper_sve2_sqxtnt_h,
6639 .vece = MO_16 },
6640 { .fniv = gen_sqxtnt_vec,
6641 .opt_opc = sqxtn_list,
6642 .load_dest = true,
6643 .fno = gen_helper_sve2_sqxtnt_s,
6644 .vece = MO_32 },
6645 { .fniv = gen_sqxtnt_vec,
6646 .opt_opc = sqxtn_list,
6647 .load_dest = true,
6648 .fno = gen_helper_sve2_sqxtnt_d,
6649 .vece = MO_64 },
6650 };
6651 return do_sve2_narrow_extract(s, a, ops);
6652}
6653
6654static const TCGOpcode uqxtn_list[] = {
6655 INDEX_op_shli_vec, INDEX_op_umin_vec, 0
6656};
6657
6658static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6659{
6660 TCGv_vec t = tcg_temp_new_vec_matching(d);
6661 int halfbits = 4 << vece;
6662 int64_t max = (1ull << halfbits) - 1;
6663
6664 tcg_gen_dupi_vec(vece, t, max);
6665 tcg_gen_umin_vec(vece, d, n, t);
6666 tcg_temp_free_vec(t);
6667}
6668
6669static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
6670{
6671 static const GVecGen2 ops[3] = {
6672 { .fniv = gen_uqxtnb_vec,
6673 .opt_opc = uqxtn_list,
6674 .fno = gen_helper_sve2_uqxtnb_h,
6675 .vece = MO_16 },
6676 { .fniv = gen_uqxtnb_vec,
6677 .opt_opc = uqxtn_list,
6678 .fno = gen_helper_sve2_uqxtnb_s,
6679 .vece = MO_32 },
6680 { .fniv = gen_uqxtnb_vec,
6681 .opt_opc = uqxtn_list,
6682 .fno = gen_helper_sve2_uqxtnb_d,
6683 .vece = MO_64 },
6684 };
6685 return do_sve2_narrow_extract(s, a, ops);
6686}
6687
6688static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6689{
6690 TCGv_vec t = tcg_temp_new_vec_matching(d);
6691 int halfbits = 4 << vece;
6692 int64_t max = (1ull << halfbits) - 1;
6693
6694 tcg_gen_dupi_vec(vece, t, max);
6695 tcg_gen_umin_vec(vece, n, n, t);
6696 tcg_gen_shli_vec(vece, n, n, halfbits);
6697 tcg_gen_bitsel_vec(vece, d, t, d, n);
6698 tcg_temp_free_vec(t);
6699}
6700
6701static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
6702{
6703 static const GVecGen2 ops[3] = {
6704 { .fniv = gen_uqxtnt_vec,
6705 .opt_opc = uqxtn_list,
6706 .load_dest = true,
6707 .fno = gen_helper_sve2_uqxtnt_h,
6708 .vece = MO_16 },
6709 { .fniv = gen_uqxtnt_vec,
6710 .opt_opc = uqxtn_list,
6711 .load_dest = true,
6712 .fno = gen_helper_sve2_uqxtnt_s,
6713 .vece = MO_32 },
6714 { .fniv = gen_uqxtnt_vec,
6715 .opt_opc = uqxtn_list,
6716 .load_dest = true,
6717 .fno = gen_helper_sve2_uqxtnt_d,
6718 .vece = MO_64 },
6719 };
6720 return do_sve2_narrow_extract(s, a, ops);
6721}
6722
6723static const TCGOpcode sqxtun_list[] = {
6724 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
6725};
6726
6727static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6728{
6729 TCGv_vec t = tcg_temp_new_vec_matching(d);
6730 int halfbits = 4 << vece;
6731 int64_t max = (1ull << halfbits) - 1;
6732
6733 tcg_gen_dupi_vec(vece, t, 0);
6734 tcg_gen_smax_vec(vece, d, n, t);
6735 tcg_gen_dupi_vec(vece, t, max);
6736 tcg_gen_umin_vec(vece, d, d, t);
6737 tcg_temp_free_vec(t);
6738}
6739
6740static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
6741{
6742 static const GVecGen2 ops[3] = {
6743 { .fniv = gen_sqxtunb_vec,
6744 .opt_opc = sqxtun_list,
6745 .fno = gen_helper_sve2_sqxtunb_h,
6746 .vece = MO_16 },
6747 { .fniv = gen_sqxtunb_vec,
6748 .opt_opc = sqxtun_list,
6749 .fno = gen_helper_sve2_sqxtunb_s,
6750 .vece = MO_32 },
6751 { .fniv = gen_sqxtunb_vec,
6752 .opt_opc = sqxtun_list,
6753 .fno = gen_helper_sve2_sqxtunb_d,
6754 .vece = MO_64 },
6755 };
6756 return do_sve2_narrow_extract(s, a, ops);
6757}
6758
6759static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6760{
6761 TCGv_vec t = tcg_temp_new_vec_matching(d);
6762 int halfbits = 4 << vece;
6763 int64_t max = (1ull << halfbits) - 1;
6764
6765 tcg_gen_dupi_vec(vece, t, 0);
6766 tcg_gen_smax_vec(vece, n, n, t);
6767 tcg_gen_dupi_vec(vece, t, max);
6768 tcg_gen_umin_vec(vece, n, n, t);
6769 tcg_gen_shli_vec(vece, n, n, halfbits);
6770 tcg_gen_bitsel_vec(vece, d, t, d, n);
6771 tcg_temp_free_vec(t);
6772}
6773
6774static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
6775{
6776 static const GVecGen2 ops[3] = {
6777 { .fniv = gen_sqxtunt_vec,
6778 .opt_opc = sqxtun_list,
6779 .load_dest = true,
6780 .fno = gen_helper_sve2_sqxtunt_h,
6781 .vece = MO_16 },
6782 { .fniv = gen_sqxtunt_vec,
6783 .opt_opc = sqxtun_list,
6784 .load_dest = true,
6785 .fno = gen_helper_sve2_sqxtunt_s,
6786 .vece = MO_32 },
6787 { .fniv = gen_sqxtunt_vec,
6788 .opt_opc = sqxtun_list,
6789 .load_dest = true,
6790 .fno = gen_helper_sve2_sqxtunt_d,
6791 .vece = MO_64 },
6792 };
6793 return do_sve2_narrow_extract(s, a, ops);
46d111b2
RH
6794}
6795
6796static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
6797 const GVecGen2i ops[3])
6798{
6799 if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
6800 return false;
6801 }
6802 assert(a->imm > 0 && a->imm <= (8 << a->esz));
6803 if (sve_access_check(s)) {
6804 unsigned vsz = vec_full_reg_size(s);
6805 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6806 vec_full_reg_offset(s, a->rn),
6807 vsz, vsz, a->imm, &ops[a->esz]);
6808 }
6809 return true;
6810}
6811
6812static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6813{
6814 int halfbits = 4 << vece;
6815 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6816
6817 tcg_gen_shri_i64(d, n, shr);
6818 tcg_gen_andi_i64(d, d, mask);
6819}
6820
6821static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6822{
6823 gen_shrnb_i64(MO_16, d, n, shr);
6824}
6825
6826static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6827{
6828 gen_shrnb_i64(MO_32, d, n, shr);
6829}
6830
6831static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6832{
6833 gen_shrnb_i64(MO_64, d, n, shr);
6834}
6835
6836static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6837{
6838 TCGv_vec t = tcg_temp_new_vec_matching(d);
6839 int halfbits = 4 << vece;
6840 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6841
6842 tcg_gen_shri_vec(vece, n, n, shr);
6843 tcg_gen_dupi_vec(vece, t, mask);
6844 tcg_gen_and_vec(vece, d, n, t);
6845 tcg_temp_free_vec(t);
6846}
6847
6848static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
6849{
6850 static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
6851 static const GVecGen2i ops[3] = {
6852 { .fni8 = gen_shrnb16_i64,
6853 .fniv = gen_shrnb_vec,
6854 .opt_opc = vec_list,
6855 .fno = gen_helper_sve2_shrnb_h,
6856 .vece = MO_16 },
6857 { .fni8 = gen_shrnb32_i64,
6858 .fniv = gen_shrnb_vec,
6859 .opt_opc = vec_list,
6860 .fno = gen_helper_sve2_shrnb_s,
6861 .vece = MO_32 },
6862 { .fni8 = gen_shrnb64_i64,
6863 .fniv = gen_shrnb_vec,
6864 .opt_opc = vec_list,
6865 .fno = gen_helper_sve2_shrnb_d,
6866 .vece = MO_64 },
6867 };
6868 return do_sve2_shr_narrow(s, a, ops);
6869}
6870
6871static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6872{
6873 int halfbits = 4 << vece;
6874 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6875
6876 tcg_gen_shli_i64(n, n, halfbits - shr);
6877 tcg_gen_andi_i64(n, n, ~mask);
6878 tcg_gen_andi_i64(d, d, mask);
6879 tcg_gen_or_i64(d, d, n);
6880}
6881
6882static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6883{
6884 gen_shrnt_i64(MO_16, d, n, shr);
6885}
6886
6887static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6888{
6889 gen_shrnt_i64(MO_32, d, n, shr);
6890}
6891
6892static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6893{
6894 tcg_gen_shri_i64(n, n, shr);
6895 tcg_gen_deposit_i64(d, d, n, 32, 32);
6896}
6897
6898static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6899{
6900 TCGv_vec t = tcg_temp_new_vec_matching(d);
6901 int halfbits = 4 << vece;
6902 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6903
6904 tcg_gen_shli_vec(vece, n, n, halfbits - shr);
6905 tcg_gen_dupi_vec(vece, t, mask);
6906 tcg_gen_bitsel_vec(vece, d, t, d, n);
6907 tcg_temp_free_vec(t);
6908}
6909
6910static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
6911{
6912 static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
6913 static const GVecGen2i ops[3] = {
6914 { .fni8 = gen_shrnt16_i64,
6915 .fniv = gen_shrnt_vec,
6916 .opt_opc = vec_list,
6917 .load_dest = true,
6918 .fno = gen_helper_sve2_shrnt_h,
6919 .vece = MO_16 },
6920 { .fni8 = gen_shrnt32_i64,
6921 .fniv = gen_shrnt_vec,
6922 .opt_opc = vec_list,
6923 .load_dest = true,
6924 .fno = gen_helper_sve2_shrnt_s,
6925 .vece = MO_32 },
6926 { .fni8 = gen_shrnt64_i64,
6927 .fniv = gen_shrnt_vec,
6928 .opt_opc = vec_list,
6929 .load_dest = true,
6930 .fno = gen_helper_sve2_shrnt_d,
6931 .vece = MO_64 },
6932 };
6933 return do_sve2_shr_narrow(s, a, ops);
6934}
6935
6936static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
6937{
6938 static const GVecGen2i ops[3] = {
6939 { .fno = gen_helper_sve2_rshrnb_h },
6940 { .fno = gen_helper_sve2_rshrnb_s },
6941 { .fno = gen_helper_sve2_rshrnb_d },
6942 };
6943 return do_sve2_shr_narrow(s, a, ops);
6944}
6945
6946static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
6947{
6948 static const GVecGen2i ops[3] = {
6949 { .fno = gen_helper_sve2_rshrnt_h },
6950 { .fno = gen_helper_sve2_rshrnt_s },
6951 { .fno = gen_helper_sve2_rshrnt_d },
6952 };
6953 return do_sve2_shr_narrow(s, a, ops);
81fd3e6e
RH
6954}
6955
6956static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
6957 TCGv_vec n, int64_t shr)
6958{
6959 TCGv_vec t = tcg_temp_new_vec_matching(d);
6960 int halfbits = 4 << vece;
6961
6962 tcg_gen_sari_vec(vece, n, n, shr);
6963 tcg_gen_dupi_vec(vece, t, 0);
6964 tcg_gen_smax_vec(vece, n, n, t);
6965 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6966 tcg_gen_umin_vec(vece, d, n, t);
6967 tcg_temp_free_vec(t);
6968}
6969
6970static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
6971{
6972 static const TCGOpcode vec_list[] = {
6973 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
6974 };
6975 static const GVecGen2i ops[3] = {
6976 { .fniv = gen_sqshrunb_vec,
6977 .opt_opc = vec_list,
6978 .fno = gen_helper_sve2_sqshrunb_h,
6979 .vece = MO_16 },
6980 { .fniv = gen_sqshrunb_vec,
6981 .opt_opc = vec_list,
6982 .fno = gen_helper_sve2_sqshrunb_s,
6983 .vece = MO_32 },
6984 { .fniv = gen_sqshrunb_vec,
6985 .opt_opc = vec_list,
6986 .fno = gen_helper_sve2_sqshrunb_d,
6987 .vece = MO_64 },
6988 };
6989 return do_sve2_shr_narrow(s, a, ops);
6990}
6991
6992static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
6993 TCGv_vec n, int64_t shr)
6994{
6995 TCGv_vec t = tcg_temp_new_vec_matching(d);
6996 int halfbits = 4 << vece;
6997
6998 tcg_gen_sari_vec(vece, n, n, shr);
6999 tcg_gen_dupi_vec(vece, t, 0);
7000 tcg_gen_smax_vec(vece, n, n, t);
7001 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7002 tcg_gen_umin_vec(vece, n, n, t);
7003 tcg_gen_shli_vec(vece, n, n, halfbits);
7004 tcg_gen_bitsel_vec(vece, d, t, d, n);
7005 tcg_temp_free_vec(t);
7006}
7007
7008static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
7009{
7010 static const TCGOpcode vec_list[] = {
7011 INDEX_op_shli_vec, INDEX_op_sari_vec,
7012 INDEX_op_smax_vec, INDEX_op_umin_vec, 0
7013 };
7014 static const GVecGen2i ops[3] = {
7015 { .fniv = gen_sqshrunt_vec,
7016 .opt_opc = vec_list,
7017 .load_dest = true,
7018 .fno = gen_helper_sve2_sqshrunt_h,
7019 .vece = MO_16 },
7020 { .fniv = gen_sqshrunt_vec,
7021 .opt_opc = vec_list,
7022 .load_dest = true,
7023 .fno = gen_helper_sve2_sqshrunt_s,
7024 .vece = MO_32 },
7025 { .fniv = gen_sqshrunt_vec,
7026 .opt_opc = vec_list,
7027 .load_dest = true,
7028 .fno = gen_helper_sve2_sqshrunt_d,
7029 .vece = MO_64 },
7030 };
7031 return do_sve2_shr_narrow(s, a, ops);
7032}
7033
7034static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
7035{
7036 static const GVecGen2i ops[3] = {
7037 { .fno = gen_helper_sve2_sqrshrunb_h },
7038 { .fno = gen_helper_sve2_sqrshrunb_s },
7039 { .fno = gen_helper_sve2_sqrshrunb_d },
7040 };
7041 return do_sve2_shr_narrow(s, a, ops);
7042}
7043
7044static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
7045{
7046 static const GVecGen2i ops[3] = {
7047 { .fno = gen_helper_sve2_sqrshrunt_h },
7048 { .fno = gen_helper_sve2_sqrshrunt_s },
7049 { .fno = gen_helper_sve2_sqrshrunt_d },
7050 };
7051 return do_sve2_shr_narrow(s, a, ops);
c13418da
RH
7052}
7053
743bb147
RH
7054static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
7055 TCGv_vec n, int64_t shr)
7056{
7057 TCGv_vec t = tcg_temp_new_vec_matching(d);
7058 int halfbits = 4 << vece;
7059 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7060 int64_t min = -max - 1;
7061
7062 tcg_gen_sari_vec(vece, n, n, shr);
7063 tcg_gen_dupi_vec(vece, t, min);
7064 tcg_gen_smax_vec(vece, n, n, t);
7065 tcg_gen_dupi_vec(vece, t, max);
7066 tcg_gen_smin_vec(vece, n, n, t);
7067 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7068 tcg_gen_and_vec(vece, d, n, t);
7069 tcg_temp_free_vec(t);
7070}
7071
7072static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
7073{
7074 static const TCGOpcode vec_list[] = {
7075 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7076 };
7077 static const GVecGen2i ops[3] = {
7078 { .fniv = gen_sqshrnb_vec,
7079 .opt_opc = vec_list,
7080 .fno = gen_helper_sve2_sqshrnb_h,
7081 .vece = MO_16 },
7082 { .fniv = gen_sqshrnb_vec,
7083 .opt_opc = vec_list,
7084 .fno = gen_helper_sve2_sqshrnb_s,
7085 .vece = MO_32 },
7086 { .fniv = gen_sqshrnb_vec,
7087 .opt_opc = vec_list,
7088 .fno = gen_helper_sve2_sqshrnb_d,
7089 .vece = MO_64 },
7090 };
7091 return do_sve2_shr_narrow(s, a, ops);
7092}
7093
7094static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
7095 TCGv_vec n, int64_t shr)
7096{
7097 TCGv_vec t = tcg_temp_new_vec_matching(d);
7098 int halfbits = 4 << vece;
7099 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7100 int64_t min = -max - 1;
7101
7102 tcg_gen_sari_vec(vece, n, n, shr);
7103 tcg_gen_dupi_vec(vece, t, min);
7104 tcg_gen_smax_vec(vece, n, n, t);
7105 tcg_gen_dupi_vec(vece, t, max);
7106 tcg_gen_smin_vec(vece, n, n, t);
7107 tcg_gen_shli_vec(vece, n, n, halfbits);
7108 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7109 tcg_gen_bitsel_vec(vece, d, t, d, n);
7110 tcg_temp_free_vec(t);
7111}
7112
7113static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
7114{
7115 static const TCGOpcode vec_list[] = {
7116 INDEX_op_shli_vec, INDEX_op_sari_vec,
7117 INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7118 };
7119 static const GVecGen2i ops[3] = {
7120 { .fniv = gen_sqshrnt_vec,
7121 .opt_opc = vec_list,
7122 .load_dest = true,
7123 .fno = gen_helper_sve2_sqshrnt_h,
7124 .vece = MO_16 },
7125 { .fniv = gen_sqshrnt_vec,
7126 .opt_opc = vec_list,
7127 .load_dest = true,
7128 .fno = gen_helper_sve2_sqshrnt_s,
7129 .vece = MO_32 },
7130 { .fniv = gen_sqshrnt_vec,
7131 .opt_opc = vec_list,
7132 .load_dest = true,
7133 .fno = gen_helper_sve2_sqshrnt_d,
7134 .vece = MO_64 },
7135 };
7136 return do_sve2_shr_narrow(s, a, ops);
7137}
7138
7139static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
7140{
7141 static const GVecGen2i ops[3] = {
7142 { .fno = gen_helper_sve2_sqrshrnb_h },
7143 { .fno = gen_helper_sve2_sqrshrnb_s },
7144 { .fno = gen_helper_sve2_sqrshrnb_d },
7145 };
7146 return do_sve2_shr_narrow(s, a, ops);
7147}
7148
7149static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
7150{
7151 static const GVecGen2i ops[3] = {
7152 { .fno = gen_helper_sve2_sqrshrnt_h },
7153 { .fno = gen_helper_sve2_sqrshrnt_s },
7154 { .fno = gen_helper_sve2_sqrshrnt_d },
7155 };
7156 return do_sve2_shr_narrow(s, a, ops);
7157}
7158
c13418da
RH
7159static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
7160 TCGv_vec n, int64_t shr)
7161{
7162 TCGv_vec t = tcg_temp_new_vec_matching(d);
7163 int halfbits = 4 << vece;
7164
7165 tcg_gen_shri_vec(vece, n, n, shr);
7166 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7167 tcg_gen_umin_vec(vece, d, n, t);
7168 tcg_temp_free_vec(t);
7169}
7170
7171static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
7172{
7173 static const TCGOpcode vec_list[] = {
7174 INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7175 };
7176 static const GVecGen2i ops[3] = {
7177 { .fniv = gen_uqshrnb_vec,
7178 .opt_opc = vec_list,
7179 .fno = gen_helper_sve2_uqshrnb_h,
7180 .vece = MO_16 },
7181 { .fniv = gen_uqshrnb_vec,
7182 .opt_opc = vec_list,
7183 .fno = gen_helper_sve2_uqshrnb_s,
7184 .vece = MO_32 },
7185 { .fniv = gen_uqshrnb_vec,
7186 .opt_opc = vec_list,
7187 .fno = gen_helper_sve2_uqshrnb_d,
7188 .vece = MO_64 },
7189 };
7190 return do_sve2_shr_narrow(s, a, ops);
7191}
7192
7193static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
7194 TCGv_vec n, int64_t shr)
7195{
7196 TCGv_vec t = tcg_temp_new_vec_matching(d);
7197 int halfbits = 4 << vece;
7198
7199 tcg_gen_shri_vec(vece, n, n, shr);
7200 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7201 tcg_gen_umin_vec(vece, n, n, t);
7202 tcg_gen_shli_vec(vece, n, n, halfbits);
7203 tcg_gen_bitsel_vec(vece, d, t, d, n);
7204 tcg_temp_free_vec(t);
7205}
7206
7207static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
7208{
7209 static const TCGOpcode vec_list[] = {
7210 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7211 };
7212 static const GVecGen2i ops[3] = {
7213 { .fniv = gen_uqshrnt_vec,
7214 .opt_opc = vec_list,
7215 .load_dest = true,
7216 .fno = gen_helper_sve2_uqshrnt_h,
7217 .vece = MO_16 },
7218 { .fniv = gen_uqshrnt_vec,
7219 .opt_opc = vec_list,
7220 .load_dest = true,
7221 .fno = gen_helper_sve2_uqshrnt_s,
7222 .vece = MO_32 },
7223 { .fniv = gen_uqshrnt_vec,
7224 .opt_opc = vec_list,
7225 .load_dest = true,
7226 .fno = gen_helper_sve2_uqshrnt_d,
7227 .vece = MO_64 },
7228 };
7229 return do_sve2_shr_narrow(s, a, ops);
7230}
7231
7232static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
7233{
7234 static const GVecGen2i ops[3] = {
7235 { .fno = gen_helper_sve2_uqrshrnb_h },
7236 { .fno = gen_helper_sve2_uqrshrnb_s },
7237 { .fno = gen_helper_sve2_uqrshrnb_d },
7238 };
7239 return do_sve2_shr_narrow(s, a, ops);
7240}
7241
7242static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
7243{
7244 static const GVecGen2i ops[3] = {
7245 { .fno = gen_helper_sve2_uqrshrnt_h },
7246 { .fno = gen_helper_sve2_uqrshrnt_s },
7247 { .fno = gen_helper_sve2_uqrshrnt_d },
7248 };
7249 return do_sve2_shr_narrow(s, a, ops);
5ff2838d 7250}
b87dbeeb
SL
7251
7252static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
7253 gen_helper_gvec_4_ptr *fn)
7254{
7255 if (!dc_isar_feature(aa64_sve2, s)) {
7256 return false;
7257 }
7258 return do_zpzz_fp(s, a, fn);
7259}
7260
7261#define DO_SVE2_ZPZZ_FP(NAME, name) \
7262static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
7263{ \
7264 static gen_helper_gvec_4_ptr * const fns[4] = { \
7265 NULL, gen_helper_sve2_##name##_zpzz_h, \
7266 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
7267 }; \
7268 return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
7269}
7270
7271DO_SVE2_ZPZZ_FP(FADDP, faddp)
7272DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
7273DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
7274DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
7275DO_SVE2_ZPZZ_FP(FMINP, fminp)