]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate-sve.c
target/arm: Implement SVE Integer Reduction Group
[mirror_qemu.git] / target / arm / translate-sve.c
CommitLineData
38388f7e
RH
1/*
2 * AArch64 SVE translation
3 *
4 * Copyright (c) 2018 Linaro, Ltd
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg-op.h"
24#include "tcg-op-gvec.h"
028e2a7b 25#include "tcg-gvec-desc.h"
38388f7e
RH
26#include "qemu/log.h"
27#include "arm_ldst.h"
28#include "translate.h"
29#include "internals.h"
30#include "exec/helper-proto.h"
31#include "exec/helper-gen.h"
32#include "exec/log.h"
33#include "trace-tcg.h"
34#include "translate-a64.h"
35
36/*
37 * Include the generated decoder.
38 */
39
40#include "decode-sve.inc.c"
41
42/*
43 * Implement all of the translator functions referenced by the decoder.
44 */
45
d1822297
RH
46/* Return the offset info CPUARMState of the predicate vector register Pn.
47 * Note for this purpose, FFR is P16.
48 */
49static inline int pred_full_reg_offset(DisasContext *s, int regno)
50{
51 return offsetof(CPUARMState, vfp.pregs[regno]);
52}
53
54/* Return the byte size of the whole predicate register, VL / 64. */
55static inline int pred_full_reg_size(DisasContext *s)
56{
57 return s->sve_len >> 3;
58}
59
516e246a
RH
60/* Round up the size of a register to a size allowed by
61 * the tcg vector infrastructure. Any operation which uses this
62 * size may assume that the bits above pred_full_reg_size are zero,
63 * and must leave them the same way.
64 *
65 * Note that this is not needed for the vector registers as they
66 * are always properly sized for tcg vectors.
67 */
68static int size_for_gvec(int size)
69{
70 if (size <= 8) {
71 return 8;
72 } else {
73 return QEMU_ALIGN_UP(size, 16);
74 }
75}
76
77static int pred_gvec_reg_size(DisasContext *s)
78{
79 return size_for_gvec(pred_full_reg_size(s));
80}
81
39eea561
RH
82/* Invoke a vector expander on two Zregs. */
83static bool do_vector2_z(DisasContext *s, GVecGen2Fn *gvec_fn,
84 int esz, int rd, int rn)
38388f7e 85{
39eea561
RH
86 if (sve_access_check(s)) {
87 unsigned vsz = vec_full_reg_size(s);
88 gvec_fn(esz, vec_full_reg_offset(s, rd),
89 vec_full_reg_offset(s, rn), vsz, vsz);
90 }
91 return true;
38388f7e
RH
92}
93
39eea561
RH
94/* Invoke a vector expander on three Zregs. */
95static bool do_vector3_z(DisasContext *s, GVecGen3Fn *gvec_fn,
96 int esz, int rd, int rn, int rm)
38388f7e 97{
39eea561
RH
98 if (sve_access_check(s)) {
99 unsigned vsz = vec_full_reg_size(s);
100 gvec_fn(esz, vec_full_reg_offset(s, rd),
101 vec_full_reg_offset(s, rn),
102 vec_full_reg_offset(s, rm), vsz, vsz);
103 }
104 return true;
38388f7e
RH
105}
106
39eea561
RH
107/* Invoke a vector move on two Zregs. */
108static bool do_mov_z(DisasContext *s, int rd, int rn)
38388f7e 109{
39eea561 110 return do_vector2_z(s, tcg_gen_gvec_mov, 0, rd, rn);
38388f7e
RH
111}
112
516e246a
RH
113/* Invoke a vector expander on two Pregs. */
114static bool do_vector2_p(DisasContext *s, GVecGen2Fn *gvec_fn,
115 int esz, int rd, int rn)
116{
117 if (sve_access_check(s)) {
118 unsigned psz = pred_gvec_reg_size(s);
119 gvec_fn(esz, pred_full_reg_offset(s, rd),
120 pred_full_reg_offset(s, rn), psz, psz);
121 }
122 return true;
123}
124
125/* Invoke a vector expander on three Pregs. */
126static bool do_vector3_p(DisasContext *s, GVecGen3Fn *gvec_fn,
127 int esz, int rd, int rn, int rm)
128{
129 if (sve_access_check(s)) {
130 unsigned psz = pred_gvec_reg_size(s);
131 gvec_fn(esz, pred_full_reg_offset(s, rd),
132 pred_full_reg_offset(s, rn),
133 pred_full_reg_offset(s, rm), psz, psz);
134 }
135 return true;
136}
137
138/* Invoke a vector operation on four Pregs. */
139static bool do_vecop4_p(DisasContext *s, const GVecGen4 *gvec_op,
140 int rd, int rn, int rm, int rg)
141{
142 if (sve_access_check(s)) {
143 unsigned psz = pred_gvec_reg_size(s);
144 tcg_gen_gvec_4(pred_full_reg_offset(s, rd),
145 pred_full_reg_offset(s, rn),
146 pred_full_reg_offset(s, rm),
147 pred_full_reg_offset(s, rg),
148 psz, psz, gvec_op);
149 }
150 return true;
151}
152
153/* Invoke a vector move on two Pregs. */
154static bool do_mov_p(DisasContext *s, int rd, int rn)
155{
156 return do_vector2_p(s, tcg_gen_gvec_mov, 0, rd, rn);
157}
158
9e18d7a6
RH
159/* Set the cpu flags as per a return from an SVE helper. */
160static void do_pred_flags(TCGv_i32 t)
161{
162 tcg_gen_mov_i32(cpu_NF, t);
163 tcg_gen_andi_i32(cpu_ZF, t, 2);
164 tcg_gen_andi_i32(cpu_CF, t, 1);
165 tcg_gen_movi_i32(cpu_VF, 0);
166}
167
168/* Subroutines computing the ARM PredTest psuedofunction. */
169static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
170{
171 TCGv_i32 t = tcg_temp_new_i32();
172
173 gen_helper_sve_predtest1(t, d, g);
174 do_pred_flags(t);
175 tcg_temp_free_i32(t);
176}
177
178static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
179{
180 TCGv_ptr dptr = tcg_temp_new_ptr();
181 TCGv_ptr gptr = tcg_temp_new_ptr();
182 TCGv_i32 t;
183
184 tcg_gen_addi_ptr(dptr, cpu_env, dofs);
185 tcg_gen_addi_ptr(gptr, cpu_env, gofs);
186 t = tcg_const_i32(words);
187
188 gen_helper_sve_predtest(t, dptr, gptr, t);
189 tcg_temp_free_ptr(dptr);
190 tcg_temp_free_ptr(gptr);
191
192 do_pred_flags(t);
193 tcg_temp_free_i32(t);
194}
195
028e2a7b
RH
196/* For each element size, the bits within a predicate word that are active. */
197const uint64_t pred_esz_masks[4] = {
198 0xffffffffffffffffull, 0x5555555555555555ull,
199 0x1111111111111111ull, 0x0101010101010101ull
200};
201
39eea561
RH
202/*
203 *** SVE Logical - Unpredicated Group
204 */
205
206static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
207{
208 return do_vector3_z(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm);
209}
210
211static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
212{
213 if (a->rn == a->rm) { /* MOV */
214 return do_mov_z(s, a->rd, a->rn);
215 } else {
216 return do_vector3_z(s, tcg_gen_gvec_or, 0, a->rd, a->rn, a->rm);
217 }
218}
219
220static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
221{
222 return do_vector3_z(s, tcg_gen_gvec_xor, 0, a->rd, a->rn, a->rm);
223}
224
225static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a, uint32_t insn)
38388f7e 226{
39eea561 227 return do_vector3_z(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm);
38388f7e 228}
d1822297 229
f97cfd59
RH
230/*
231 *** SVE Integer Arithmetic - Binary Predicated Group
232 */
233
234static bool do_zpzz_ool(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4 *fn)
235{
236 unsigned vsz = vec_full_reg_size(s);
237 if (fn == NULL) {
238 return false;
239 }
240 if (sve_access_check(s)) {
241 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd),
242 vec_full_reg_offset(s, a->rn),
243 vec_full_reg_offset(s, a->rm),
244 pred_full_reg_offset(s, a->pg),
245 vsz, vsz, 0, fn);
246 }
247 return true;
248}
249
250#define DO_ZPZZ(NAME, name) \
251static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a, \
252 uint32_t insn) \
253{ \
254 static gen_helper_gvec_4 * const fns[4] = { \
255 gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \
256 gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \
257 }; \
258 return do_zpzz_ool(s, a, fns[a->esz]); \
259}
260
261DO_ZPZZ(AND, and)
262DO_ZPZZ(EOR, eor)
263DO_ZPZZ(ORR, orr)
264DO_ZPZZ(BIC, bic)
265
266DO_ZPZZ(ADD, add)
267DO_ZPZZ(SUB, sub)
268
269DO_ZPZZ(SMAX, smax)
270DO_ZPZZ(UMAX, umax)
271DO_ZPZZ(SMIN, smin)
272DO_ZPZZ(UMIN, umin)
273DO_ZPZZ(SABD, sabd)
274DO_ZPZZ(UABD, uabd)
275
276DO_ZPZZ(MUL, mul)
277DO_ZPZZ(SMULH, smulh)
278DO_ZPZZ(UMULH, umulh)
279
280static bool trans_SDIV_zpzz(DisasContext *s, arg_rprr_esz *a, uint32_t insn)
281{
282 static gen_helper_gvec_4 * const fns[4] = {
283 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
284 };
285 return do_zpzz_ool(s, a, fns[a->esz]);
286}
287
288static bool trans_UDIV_zpzz(DisasContext *s, arg_rprr_esz *a, uint32_t insn)
289{
290 static gen_helper_gvec_4 * const fns[4] = {
291 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
292 };
293 return do_zpzz_ool(s, a, fns[a->esz]);
294}
295
296#undef DO_ZPZZ
297
047cec97
RH
298/*
299 *** SVE Integer Reduction Group
300 */
301
302typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
303static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
304 gen_helper_gvec_reduc *fn)
305{
306 unsigned vsz = vec_full_reg_size(s);
307 TCGv_ptr t_zn, t_pg;
308 TCGv_i32 desc;
309 TCGv_i64 temp;
310
311 if (fn == NULL) {
312 return false;
313 }
314 if (!sve_access_check(s)) {
315 return true;
316 }
317
318 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
319 temp = tcg_temp_new_i64();
320 t_zn = tcg_temp_new_ptr();
321 t_pg = tcg_temp_new_ptr();
322
323 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
324 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
325 fn(temp, t_zn, t_pg, desc);
326 tcg_temp_free_ptr(t_zn);
327 tcg_temp_free_ptr(t_pg);
328 tcg_temp_free_i32(desc);
329
330 write_fp_dreg(s, a->rd, temp);
331 tcg_temp_free_i64(temp);
332 return true;
333}
334
335#define DO_VPZ(NAME, name) \
336static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a, uint32_t insn) \
337{ \
338 static gen_helper_gvec_reduc * const fns[4] = { \
339 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
340 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
341 }; \
342 return do_vpz_ool(s, a, fns[a->esz]); \
343}
344
345DO_VPZ(ORV, orv)
346DO_VPZ(ANDV, andv)
347DO_VPZ(EORV, eorv)
348
349DO_VPZ(UADDV, uaddv)
350DO_VPZ(SMAXV, smaxv)
351DO_VPZ(UMAXV, umaxv)
352DO_VPZ(SMINV, sminv)
353DO_VPZ(UMINV, uminv)
354
355static bool trans_SADDV(DisasContext *s, arg_rpr_esz *a, uint32_t insn)
356{
357 static gen_helper_gvec_reduc * const fns[4] = {
358 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
359 gen_helper_sve_saddv_s, NULL
360 };
361 return do_vpz_ool(s, a, fns[a->esz]);
362}
363
364#undef DO_VPZ
365
516e246a
RH
366/*
367 *** SVE Predicate Logical Operations Group
368 */
369
370static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
371 const GVecGen4 *gvec_op)
372{
373 if (!sve_access_check(s)) {
374 return true;
375 }
376
377 unsigned psz = pred_gvec_reg_size(s);
378 int dofs = pred_full_reg_offset(s, a->rd);
379 int nofs = pred_full_reg_offset(s, a->rn);
380 int mofs = pred_full_reg_offset(s, a->rm);
381 int gofs = pred_full_reg_offset(s, a->pg);
382
383 if (psz == 8) {
384 /* Do the operation and the flags generation in temps. */
385 TCGv_i64 pd = tcg_temp_new_i64();
386 TCGv_i64 pn = tcg_temp_new_i64();
387 TCGv_i64 pm = tcg_temp_new_i64();
388 TCGv_i64 pg = tcg_temp_new_i64();
389
390 tcg_gen_ld_i64(pn, cpu_env, nofs);
391 tcg_gen_ld_i64(pm, cpu_env, mofs);
392 tcg_gen_ld_i64(pg, cpu_env, gofs);
393
394 gvec_op->fni8(pd, pn, pm, pg);
395 tcg_gen_st_i64(pd, cpu_env, dofs);
396
397 do_predtest1(pd, pg);
398
399 tcg_temp_free_i64(pd);
400 tcg_temp_free_i64(pn);
401 tcg_temp_free_i64(pm);
402 tcg_temp_free_i64(pg);
403 } else {
404 /* The operation and flags generation is large. The computation
405 * of the flags depends on the original contents of the guarding
406 * predicate. If the destination overwrites the guarding predicate,
407 * then the easiest way to get this right is to save a copy.
408 */
409 int tofs = gofs;
410 if (a->rd == a->pg) {
411 tofs = offsetof(CPUARMState, vfp.preg_tmp);
412 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
413 }
414
415 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
416 do_predtest(s, dofs, tofs, psz / 8);
417 }
418 return true;
419}
420
421static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
422{
423 tcg_gen_and_i64(pd, pn, pm);
424 tcg_gen_and_i64(pd, pd, pg);
425}
426
427static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
428 TCGv_vec pm, TCGv_vec pg)
429{
430 tcg_gen_and_vec(vece, pd, pn, pm);
431 tcg_gen_and_vec(vece, pd, pd, pg);
432}
433
434static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
435{
436 static const GVecGen4 op = {
437 .fni8 = gen_and_pg_i64,
438 .fniv = gen_and_pg_vec,
439 .fno = gen_helper_sve_and_pppp,
440 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
441 };
442 if (a->s) {
443 return do_pppp_flags(s, a, &op);
444 } else if (a->rn == a->rm) {
445 if (a->pg == a->rn) {
446 return do_mov_p(s, a->rd, a->rn);
447 } else {
448 return do_vector3_p(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->pg);
449 }
450 } else if (a->pg == a->rn || a->pg == a->rm) {
451 return do_vector3_p(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm);
452 } else {
453 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
454 }
455}
456
457static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
458{
459 tcg_gen_andc_i64(pd, pn, pm);
460 tcg_gen_and_i64(pd, pd, pg);
461}
462
463static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
464 TCGv_vec pm, TCGv_vec pg)
465{
466 tcg_gen_andc_vec(vece, pd, pn, pm);
467 tcg_gen_and_vec(vece, pd, pd, pg);
468}
469
470static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
471{
472 static const GVecGen4 op = {
473 .fni8 = gen_bic_pg_i64,
474 .fniv = gen_bic_pg_vec,
475 .fno = gen_helper_sve_bic_pppp,
476 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
477 };
478 if (a->s) {
479 return do_pppp_flags(s, a, &op);
480 } else if (a->pg == a->rn) {
481 return do_vector3_p(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm);
482 } else {
483 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
484 }
485}
486
487static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
488{
489 tcg_gen_xor_i64(pd, pn, pm);
490 tcg_gen_and_i64(pd, pd, pg);
491}
492
493static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
494 TCGv_vec pm, TCGv_vec pg)
495{
496 tcg_gen_xor_vec(vece, pd, pn, pm);
497 tcg_gen_and_vec(vece, pd, pd, pg);
498}
499
500static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
501{
502 static const GVecGen4 op = {
503 .fni8 = gen_eor_pg_i64,
504 .fniv = gen_eor_pg_vec,
505 .fno = gen_helper_sve_eor_pppp,
506 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
507 };
508 if (a->s) {
509 return do_pppp_flags(s, a, &op);
510 } else {
511 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
512 }
513}
514
515static void gen_sel_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
516{
517 tcg_gen_and_i64(pn, pn, pg);
518 tcg_gen_andc_i64(pm, pm, pg);
519 tcg_gen_or_i64(pd, pn, pm);
520}
521
522static void gen_sel_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
523 TCGv_vec pm, TCGv_vec pg)
524{
525 tcg_gen_and_vec(vece, pn, pn, pg);
526 tcg_gen_andc_vec(vece, pm, pm, pg);
527 tcg_gen_or_vec(vece, pd, pn, pm);
528}
529
530static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
531{
532 static const GVecGen4 op = {
533 .fni8 = gen_sel_pg_i64,
534 .fniv = gen_sel_pg_vec,
535 .fno = gen_helper_sve_sel_pppp,
536 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
537 };
538 if (a->s) {
539 return false;
540 } else {
541 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
542 }
543}
544
545static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
546{
547 tcg_gen_or_i64(pd, pn, pm);
548 tcg_gen_and_i64(pd, pd, pg);
549}
550
551static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
552 TCGv_vec pm, TCGv_vec pg)
553{
554 tcg_gen_or_vec(vece, pd, pn, pm);
555 tcg_gen_and_vec(vece, pd, pd, pg);
556}
557
558static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
559{
560 static const GVecGen4 op = {
561 .fni8 = gen_orr_pg_i64,
562 .fniv = gen_orr_pg_vec,
563 .fno = gen_helper_sve_orr_pppp,
564 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
565 };
566 if (a->s) {
567 return do_pppp_flags(s, a, &op);
568 } else if (a->pg == a->rn && a->rn == a->rm) {
569 return do_mov_p(s, a->rd, a->rn);
570 } else {
571 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
572 }
573}
574
575static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
576{
577 tcg_gen_orc_i64(pd, pn, pm);
578 tcg_gen_and_i64(pd, pd, pg);
579}
580
581static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
582 TCGv_vec pm, TCGv_vec pg)
583{
584 tcg_gen_orc_vec(vece, pd, pn, pm);
585 tcg_gen_and_vec(vece, pd, pd, pg);
586}
587
588static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
589{
590 static const GVecGen4 op = {
591 .fni8 = gen_orn_pg_i64,
592 .fniv = gen_orn_pg_vec,
593 .fno = gen_helper_sve_orn_pppp,
594 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
595 };
596 if (a->s) {
597 return do_pppp_flags(s, a, &op);
598 } else {
599 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
600 }
601}
602
603static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
604{
605 tcg_gen_or_i64(pd, pn, pm);
606 tcg_gen_andc_i64(pd, pg, pd);
607}
608
609static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
610 TCGv_vec pm, TCGv_vec pg)
611{
612 tcg_gen_or_vec(vece, pd, pn, pm);
613 tcg_gen_andc_vec(vece, pd, pg, pd);
614}
615
616static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
617{
618 static const GVecGen4 op = {
619 .fni8 = gen_nor_pg_i64,
620 .fniv = gen_nor_pg_vec,
621 .fno = gen_helper_sve_nor_pppp,
622 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
623 };
624 if (a->s) {
625 return do_pppp_flags(s, a, &op);
626 } else {
627 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
628 }
629}
630
631static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
632{
633 tcg_gen_and_i64(pd, pn, pm);
634 tcg_gen_andc_i64(pd, pg, pd);
635}
636
637static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
638 TCGv_vec pm, TCGv_vec pg)
639{
640 tcg_gen_and_vec(vece, pd, pn, pm);
641 tcg_gen_andc_vec(vece, pd, pg, pd);
642}
643
644static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a, uint32_t insn)
645{
646 static const GVecGen4 op = {
647 .fni8 = gen_nand_pg_i64,
648 .fniv = gen_nand_pg_vec,
649 .fno = gen_helper_sve_nand_pppp,
650 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
651 };
652 if (a->s) {
653 return do_pppp_flags(s, a, &op);
654 } else {
655 return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg);
656 }
657}
658
9e18d7a6
RH
659/*
660 *** SVE Predicate Misc Group
661 */
662
663static bool trans_PTEST(DisasContext *s, arg_PTEST *a, uint32_t insn)
664{
665 if (sve_access_check(s)) {
666 int nofs = pred_full_reg_offset(s, a->rn);
667 int gofs = pred_full_reg_offset(s, a->pg);
668 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
669
670 if (words == 1) {
671 TCGv_i64 pn = tcg_temp_new_i64();
672 TCGv_i64 pg = tcg_temp_new_i64();
673
674 tcg_gen_ld_i64(pn, cpu_env, nofs);
675 tcg_gen_ld_i64(pg, cpu_env, gofs);
676 do_predtest1(pn, pg);
677
678 tcg_temp_free_i64(pn);
679 tcg_temp_free_i64(pg);
680 } else {
681 do_predtest(s, nofs, gofs, words);
682 }
683 }
684 return true;
685}
686
028e2a7b
RH
687/* See the ARM pseudocode DecodePredCount. */
688static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
689{
690 unsigned elements = fullsz >> esz;
691 unsigned bound;
692
693 switch (pattern) {
694 case 0x0: /* POW2 */
695 return pow2floor(elements);
696 case 0x1: /* VL1 */
697 case 0x2: /* VL2 */
698 case 0x3: /* VL3 */
699 case 0x4: /* VL4 */
700 case 0x5: /* VL5 */
701 case 0x6: /* VL6 */
702 case 0x7: /* VL7 */
703 case 0x8: /* VL8 */
704 bound = pattern;
705 break;
706 case 0x9: /* VL16 */
707 case 0xa: /* VL32 */
708 case 0xb: /* VL64 */
709 case 0xc: /* VL128 */
710 case 0xd: /* VL256 */
711 bound = 16 << (pattern - 9);
712 break;
713 case 0x1d: /* MUL4 */
714 return elements - elements % 4;
715 case 0x1e: /* MUL3 */
716 return elements - elements % 3;
717 case 0x1f: /* ALL */
718 return elements;
719 default: /* #uimm5 */
720 return 0;
721 }
722 return elements >= bound ? bound : 0;
723}
724
725/* This handles all of the predicate initialization instructions,
726 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
727 * so that decode_pred_count returns 0. For SETFFR, we will have
728 * set RD == 16 == FFR.
729 */
730static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
731{
732 if (!sve_access_check(s)) {
733 return true;
734 }
735
736 unsigned fullsz = vec_full_reg_size(s);
737 unsigned ofs = pred_full_reg_offset(s, rd);
738 unsigned numelem, setsz, i;
739 uint64_t word, lastword;
740 TCGv_i64 t;
741
742 numelem = decode_pred_count(fullsz, pat, esz);
743
744 /* Determine what we must store into each bit, and how many. */
745 if (numelem == 0) {
746 lastword = word = 0;
747 setsz = fullsz;
748 } else {
749 setsz = numelem << esz;
750 lastword = word = pred_esz_masks[esz];
751 if (setsz % 64) {
752 lastword &= ~(-1ull << (setsz % 64));
753 }
754 }
755
756 t = tcg_temp_new_i64();
757 if (fullsz <= 64) {
758 tcg_gen_movi_i64(t, lastword);
759 tcg_gen_st_i64(t, cpu_env, ofs);
760 goto done;
761 }
762
763 if (word == lastword) {
764 unsigned maxsz = size_for_gvec(fullsz / 8);
765 unsigned oprsz = size_for_gvec(setsz / 8);
766
767 if (oprsz * 8 == setsz) {
768 tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
769 goto done;
770 }
771 if (oprsz * 8 == setsz + 8) {
772 tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
773 tcg_gen_movi_i64(t, 0);
774 tcg_gen_st_i64(t, cpu_env, ofs + oprsz - 8);
775 goto done;
776 }
777 }
778
779 setsz /= 8;
780 fullsz /= 8;
781
782 tcg_gen_movi_i64(t, word);
783 for (i = 0; i < setsz; i += 8) {
784 tcg_gen_st_i64(t, cpu_env, ofs + i);
785 }
786 if (lastword != word) {
787 tcg_gen_movi_i64(t, lastword);
788 tcg_gen_st_i64(t, cpu_env, ofs + i);
789 i += 8;
790 }
791 if (i < fullsz) {
792 tcg_gen_movi_i64(t, 0);
793 for (; i < fullsz; i += 8) {
794 tcg_gen_st_i64(t, cpu_env, ofs + i);
795 }
796 }
797
798 done:
799 tcg_temp_free_i64(t);
800
801 /* PTRUES */
802 if (setflag) {
803 tcg_gen_movi_i32(cpu_NF, -(word != 0));
804 tcg_gen_movi_i32(cpu_CF, word == 0);
805 tcg_gen_movi_i32(cpu_VF, 0);
806 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
807 }
808 return true;
809}
810
811static bool trans_PTRUE(DisasContext *s, arg_PTRUE *a, uint32_t insn)
812{
813 return do_predset(s, a->esz, a->rd, a->pat, a->s);
814}
815
816static bool trans_SETFFR(DisasContext *s, arg_SETFFR *a, uint32_t insn)
817{
818 /* Note pat == 31 is #all, to set all elements. */
819 return do_predset(s, 0, FFR_PRED_NUM, 31, false);
820}
821
822static bool trans_PFALSE(DisasContext *s, arg_PFALSE *a, uint32_t insn)
823{
824 /* Note pat == 32 is #unimp, to set no elements. */
825 return do_predset(s, 0, a->rd, 32, false);
826}
827
828static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a, uint32_t insn)
829{
830 /* The path through do_pppp_flags is complicated enough to want to avoid
831 * duplication. Frob the arguments into the form of a predicated AND.
832 */
833 arg_rprr_s alt_a = {
834 .rd = a->rd, .pg = a->pg, .s = a->s,
835 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
836 };
837 return trans_AND_pppp(s, &alt_a, insn);
838}
839
840static bool trans_RDFFR(DisasContext *s, arg_RDFFR *a, uint32_t insn)
841{
842 return do_mov_p(s, a->rd, FFR_PRED_NUM);
843}
844
845static bool trans_WRFFR(DisasContext *s, arg_WRFFR *a, uint32_t insn)
846{
847 return do_mov_p(s, FFR_PRED_NUM, a->rn);
848}
849
850static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
851 void (*gen_fn)(TCGv_i32, TCGv_ptr,
852 TCGv_ptr, TCGv_i32))
853{
854 if (!sve_access_check(s)) {
855 return true;
856 }
857
858 TCGv_ptr t_pd = tcg_temp_new_ptr();
859 TCGv_ptr t_pg = tcg_temp_new_ptr();
860 TCGv_i32 t;
861 unsigned desc;
862
863 desc = DIV_ROUND_UP(pred_full_reg_size(s), 8);
864 desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz);
865
866 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
867 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
868 t = tcg_const_i32(desc);
869
870 gen_fn(t, t_pd, t_pg, t);
871 tcg_temp_free_ptr(t_pd);
872 tcg_temp_free_ptr(t_pg);
873
874 do_pred_flags(t);
875 tcg_temp_free_i32(t);
876 return true;
877}
878
879static bool trans_PFIRST(DisasContext *s, arg_rr_esz *a, uint32_t insn)
880{
881 return do_pfirst_pnext(s, a, gen_helper_sve_pfirst);
882}
883
884static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a, uint32_t insn)
885{
886 return do_pfirst_pnext(s, a, gen_helper_sve_pnext);
887}
888
d1822297
RH
889/*
890 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
891 */
892
893/* Subroutine loading a vector register at VOFS of LEN bytes.
894 * The load should begin at the address Rn + IMM.
895 */
896
897static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,
898 int rn, int imm)
899{
900 uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);
901 uint32_t len_remain = len % 8;
902 uint32_t nparts = len / 8 + ctpop8(len_remain);
903 int midx = get_mem_index(s);
904 TCGv_i64 addr, t0, t1;
905
906 addr = tcg_temp_new_i64();
907 t0 = tcg_temp_new_i64();
908
909 /* Note that unpredicated load/store of vector/predicate registers
910 * are defined as a stream of bytes, which equates to little-endian
911 * operations on larger quantities. There is no nice way to force
912 * a little-endian load for aarch64_be-linux-user out of line.
913 *
914 * Attempt to keep code expansion to a minimum by limiting the
915 * amount of unrolling done.
916 */
917 if (nparts <= 4) {
918 int i;
919
920 for (i = 0; i < len_align; i += 8) {
921 tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i);
922 tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);
923 tcg_gen_st_i64(t0, cpu_env, vofs + i);
924 }
925 } else {
926 TCGLabel *loop = gen_new_label();
927 TCGv_ptr tp, i = tcg_const_local_ptr(0);
928
929 gen_set_label(loop);
930
931 /* Minimize the number of local temps that must be re-read from
932 * the stack each iteration. Instead, re-compute values other
933 * than the loop counter.
934 */
935 tp = tcg_temp_new_ptr();
936 tcg_gen_addi_ptr(tp, i, imm);
937 tcg_gen_extu_ptr_i64(addr, tp);
938 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn));
939
940 tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);
941
942 tcg_gen_add_ptr(tp, cpu_env, i);
943 tcg_gen_addi_ptr(i, i, 8);
944 tcg_gen_st_i64(t0, tp, vofs);
945 tcg_temp_free_ptr(tp);
946
947 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
948 tcg_temp_free_ptr(i);
949 }
950
951 /* Predicate register loads can be any multiple of 2.
952 * Note that we still store the entire 64-bit unit into cpu_env.
953 */
954 if (len_remain) {
955 tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align);
956
957 switch (len_remain) {
958 case 2:
959 case 4:
960 case 8:
961 tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LE | ctz32(len_remain));
962 break;
963
964 case 6:
965 t1 = tcg_temp_new_i64();
966 tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEUL);
967 tcg_gen_addi_i64(addr, addr, 4);
968 tcg_gen_qemu_ld_i64(t1, addr, midx, MO_LEUW);
969 tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
970 tcg_temp_free_i64(t1);
971 break;
972
973 default:
974 g_assert_not_reached();
975 }
976 tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
977 }
978 tcg_temp_free_i64(addr);
979 tcg_temp_free_i64(t0);
980}
981
982static bool trans_LDR_zri(DisasContext *s, arg_rri *a, uint32_t insn)
983{
984 if (sve_access_check(s)) {
985 int size = vec_full_reg_size(s);
986 int off = vec_full_reg_offset(s, a->rd);
987 do_ldr(s, off, size, a->rn, a->imm * size);
988 }
989 return true;
990}
991
992static bool trans_LDR_pri(DisasContext *s, arg_rri *a, uint32_t insn)
993{
994 if (sve_access_check(s)) {
995 int size = pred_full_reg_size(s);
996 int off = pred_full_reg_offset(s, a->rd);
997 do_ldr(s, off, size, a->rn, a->imm * size);
998 }
999 return true;
1000}