]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/insn_trans/trans_rvv.c.inc
Merge tag 'pull-target-arm-20240111' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / target / riscv / insn_trans / trans_rvv.c.inc
1 /*
2 *
3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2 or later, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17 #include "tcg/tcg-op-gvec.h"
18 #include "tcg/tcg-gvec-desc.h"
19 #include "internals.h"
20
21 static inline bool is_overlapped(const int8_t astart, int8_t asize,
22 const int8_t bstart, int8_t bsize)
23 {
24 const int8_t aend = astart + asize;
25 const int8_t bend = bstart + bsize;
26
27 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
28 }
29
30 static bool require_rvv(DisasContext *s)
31 {
32 return s->mstatus_vs != EXT_STATUS_DISABLED;
33 }
34
35 static bool require_rvf(DisasContext *s)
36 {
37 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
38 return false;
39 }
40
41 switch (s->sew) {
42 case MO_16:
43 return s->cfg_ptr->ext_zvfh;
44 case MO_32:
45 return s->cfg_ptr->ext_zve32f;
46 case MO_64:
47 return s->cfg_ptr->ext_zve64d;
48 default:
49 return false;
50 }
51 }
52
53 static bool require_scale_rvf(DisasContext *s)
54 {
55 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
56 return false;
57 }
58
59 switch (s->sew) {
60 case MO_8:
61 return s->cfg_ptr->ext_zvfh;
62 case MO_16:
63 return s->cfg_ptr->ext_zve32f;
64 case MO_32:
65 return s->cfg_ptr->ext_zve64d;
66 default:
67 return false;
68 }
69 }
70
71 static bool require_scale_rvfmin(DisasContext *s)
72 {
73 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
74 return false;
75 }
76
77 switch (s->sew) {
78 case MO_8:
79 return s->cfg_ptr->ext_zvfhmin;
80 case MO_16:
81 return s->cfg_ptr->ext_zve32f;
82 case MO_32:
83 return s->cfg_ptr->ext_zve64d;
84 default:
85 return false;
86 }
87 }
88
89 /* Destination vector register group cannot overlap source mask register. */
90 static bool require_vm(int vm, int vd)
91 {
92 return (vm != 0 || vd != 0);
93 }
94
95 static bool require_nf(int vd, int nf, int lmul)
96 {
97 int size = nf << MAX(lmul, 0);
98 return size <= 8 && vd + size <= 32;
99 }
100
101 /*
102 * Vector register should aligned with the passed-in LMUL (EMUL).
103 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
104 */
105 static bool require_align(const int8_t val, const int8_t lmul)
106 {
107 return lmul <= 0 || extract32(val, 0, lmul) == 0;
108 }
109
110 /*
111 * A destination vector register group can overlap a source vector
112 * register group only if one of the following holds:
113 * 1. The destination EEW equals the source EEW.
114 * 2. The destination EEW is smaller than the source EEW and the overlap
115 * is in the lowest-numbered part of the source register group.
116 * 3. The destination EEW is greater than the source EEW, the source EMUL
117 * is at least 1, and the overlap is in the highest-numbered part of
118 * the destination register group.
119 * (Section 5.2)
120 *
121 * This function returns true if one of the following holds:
122 * * Destination vector register group does not overlap a source vector
123 * register group.
124 * * Rule 3 met.
125 * For rule 1, overlap is allowed so this function doesn't need to be called.
126 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
127 * calling this function.
128 */
129 static bool require_noover(const int8_t dst, const int8_t dst_lmul,
130 const int8_t src, const int8_t src_lmul)
131 {
132 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
133 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
134
135 /* Destination EEW is greater than the source EEW, check rule 3. */
136 if (dst_size > src_size) {
137 if (dst < src &&
138 src_lmul >= 0 &&
139 is_overlapped(dst, dst_size, src, src_size) &&
140 !is_overlapped(dst, dst_size, src + src_size, src_size)) {
141 return true;
142 }
143 }
144
145 return !is_overlapped(dst, dst_size, src, src_size);
146 }
147
148 static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
149 {
150 TCGv s1, dst;
151
152 if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
153 return false;
154 }
155
156 dst = dest_gpr(s, rd);
157
158 if (rd == 0 && rs1 == 0) {
159 s1 = tcg_temp_new();
160 tcg_gen_mov_tl(s1, cpu_vl);
161 } else if (rs1 == 0) {
162 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
163 s1 = tcg_constant_tl(RV_VLEN_MAX);
164 } else {
165 s1 = get_gpr(s, rs1, EXT_ZERO);
166 }
167
168 gen_helper_vsetvl(dst, tcg_env, s1, s2);
169 gen_set_gpr(s, rd, dst);
170 mark_vs_dirty(s);
171
172 gen_update_pc(s, s->cur_insn_len);
173 lookup_and_goto_ptr(s);
174 s->base.is_jmp = DISAS_NORETURN;
175 return true;
176 }
177
178 static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
179 {
180 TCGv dst;
181
182 if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
183 return false;
184 }
185
186 dst = dest_gpr(s, rd);
187
188 gen_helper_vsetvl(dst, tcg_env, s1, s2);
189 gen_set_gpr(s, rd, dst);
190 mark_vs_dirty(s);
191 gen_update_pc(s, s->cur_insn_len);
192 lookup_and_goto_ptr(s);
193 s->base.is_jmp = DISAS_NORETURN;
194
195 return true;
196 }
197
198 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
199 {
200 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
201 return do_vsetvl(s, a->rd, a->rs1, s2);
202 }
203
204 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
205 {
206 TCGv s2 = tcg_constant_tl(a->zimm);
207 return do_vsetvl(s, a->rd, a->rs1, s2);
208 }
209
210 static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
211 {
212 TCGv s1 = tcg_constant_tl(a->rs1);
213 TCGv s2 = tcg_constant_tl(a->zimm);
214 return do_vsetivli(s, a->rd, s1, s2);
215 }
216
217 /* vector register offset from env */
218 static uint32_t vreg_ofs(DisasContext *s, int reg)
219 {
220 return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8;
221 }
222
223 /* check functions */
224
225 /*
226 * Vector unit-stride, strided, unit-stride segment, strided segment
227 * store check function.
228 *
229 * Rules to be checked here:
230 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
231 * 2. Destination vector register number is multiples of EMUL.
232 * (Section 3.4.2, 7.3)
233 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
234 * 4. Vector register numbers accessed by the segment load or store
235 * cannot increment past 31. (Section 7.8)
236 */
237 static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
238 {
239 int8_t emul = eew - s->sew + s->lmul;
240 return (emul >= -3 && emul <= 3) &&
241 require_align(vd, emul) &&
242 require_nf(vd, nf, emul);
243 }
244
245 /*
246 * Vector unit-stride, strided, unit-stride segment, strided segment
247 * load check function.
248 *
249 * Rules to be checked here:
250 * 1. All rules applies to store instructions are applies
251 * to load instructions.
252 * 2. Destination vector register group for a masked vector
253 * instruction cannot overlap the source mask register (v0).
254 * (Section 5.3)
255 */
256 static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
257 uint8_t eew)
258 {
259 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
260 }
261
262 /*
263 * Vector indexed, indexed segment store check function.
264 *
265 * Rules to be checked here:
266 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
267 * 2. Index vector register number is multiples of EMUL.
268 * (Section 3.4.2, 7.3)
269 * 3. Destination vector register number is multiples of LMUL.
270 * (Section 3.4.2, 7.3)
271 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
272 * 5. Vector register numbers accessed by the segment load or store
273 * cannot increment past 31. (Section 7.8)
274 */
275 static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
276 uint8_t eew)
277 {
278 int8_t emul = eew - s->sew + s->lmul;
279 bool ret = (emul >= -3 && emul <= 3) &&
280 require_align(vs2, emul) &&
281 require_align(vd, s->lmul) &&
282 require_nf(vd, nf, s->lmul);
283
284 /*
285 * V extension supports all vector load and store instructions,
286 * except V extension does not support EEW=64 for index values
287 * when XLEN=32. (Section 18.3)
288 */
289 if (get_xl(s) == MXL_RV32) {
290 ret &= (eew != MO_64);
291 }
292
293 return ret;
294 }
295
296 /*
297 * Vector indexed, indexed segment load check function.
298 *
299 * Rules to be checked here:
300 * 1. All rules applies to store instructions are applies
301 * to load instructions.
302 * 2. Destination vector register group for a masked vector
303 * instruction cannot overlap the source mask register (v0).
304 * (Section 5.3)
305 * 3. Destination vector register cannot overlap a source vector
306 * register (vs2) group.
307 * (Section 5.2)
308 * 4. Destination vector register groups cannot overlap
309 * the source vector register (vs2) group for
310 * indexed segment load instructions. (Section 7.8.3)
311 */
312 static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
313 int nf, int vm, uint8_t eew)
314 {
315 int8_t seg_vd;
316 int8_t emul = eew - s->sew + s->lmul;
317 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
318 require_vm(vm, vd);
319
320 /* Each segment register group has to follow overlap rules. */
321 for (int i = 0; i < nf; ++i) {
322 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
323
324 if (eew > s->sew) {
325 if (seg_vd != vs2) {
326 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
327 }
328 } else if (eew < s->sew) {
329 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
330 }
331
332 /*
333 * Destination vector register groups cannot overlap
334 * the source vector register (vs2) group for
335 * indexed segment load instructions.
336 */
337 if (nf > 1) {
338 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
339 vs2, 1 << MAX(emul, 0));
340 }
341 }
342 return ret;
343 }
344
345 static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
346 {
347 return require_vm(vm, vd) &&
348 require_align(vd, s->lmul) &&
349 require_align(vs, s->lmul);
350 }
351
352 /*
353 * Check function for vector instruction with format:
354 * single-width result and single-width sources (SEW = SEW op SEW)
355 *
356 * Rules to be checked here:
357 * 1. Destination vector register group for a masked vector
358 * instruction cannot overlap the source mask register (v0).
359 * (Section 5.3)
360 * 2. Destination vector register number is multiples of LMUL.
361 * (Section 3.4.2)
362 * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
363 * (Section 3.4.2)
364 */
365 static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
366 {
367 return vext_check_ss(s, vd, vs2, vm) &&
368 require_align(vs1, s->lmul);
369 }
370
371 static bool vext_check_ms(DisasContext *s, int vd, int vs)
372 {
373 bool ret = require_align(vs, s->lmul);
374 if (vd != vs) {
375 ret &= require_noover(vd, 0, vs, s->lmul);
376 }
377 return ret;
378 }
379
380 /*
381 * Check function for maskable vector instruction with format:
382 * single-width result and single-width sources (SEW = SEW op SEW)
383 *
384 * Rules to be checked here:
385 * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
386 * (Section 3.4.2)
387 * 2. Destination vector register cannot overlap a source vector
388 * register (vs2, vs1) group.
389 * (Section 5.2)
390 * 3. The destination vector register group for a masked vector
391 * instruction cannot overlap the source mask register (v0),
392 * unless the destination vector register is being written
393 * with a mask value (e.g., comparisons) or the scalar result
394 * of a reduction. (Section 5.3)
395 */
396 static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
397 {
398 bool ret = vext_check_ms(s, vd, vs2) &&
399 require_align(vs1, s->lmul);
400 if (vd != vs1) {
401 ret &= require_noover(vd, 0, vs1, s->lmul);
402 }
403 return ret;
404 }
405
406 /*
407 * Common check function for vector widening instructions
408 * of double-width result (2*SEW).
409 *
410 * Rules to be checked here:
411 * 1. The largest vector register group used by an instruction
412 * can not be greater than 8 vector registers (Section 5.2):
413 * => LMUL < 8.
414 * => SEW < 64.
415 * 2. Double-width SEW cannot greater than ELEN.
416 * 3. Destination vector register number is multiples of 2 * LMUL.
417 * (Section 3.4.2)
418 * 4. Destination vector register group for a masked vector
419 * instruction cannot overlap the source mask register (v0).
420 * (Section 5.3)
421 */
422 static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
423 {
424 return (s->lmul <= 2) &&
425 (s->sew < MO_64) &&
426 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
427 require_align(vd, s->lmul + 1) &&
428 require_vm(vm, vd);
429 }
430
431 /*
432 * Common check function for vector narrowing instructions
433 * of single-width result (SEW) and double-width source (2*SEW).
434 *
435 * Rules to be checked here:
436 * 1. The largest vector register group used by an instruction
437 * can not be greater than 8 vector registers (Section 5.2):
438 * => LMUL < 8.
439 * => SEW < 64.
440 * 2. Double-width SEW cannot greater than ELEN.
441 * 3. Source vector register number is multiples of 2 * LMUL.
442 * (Section 3.4.2)
443 * 4. Destination vector register number is multiples of LMUL.
444 * (Section 3.4.2)
445 * 5. Destination vector register group for a masked vector
446 * instruction cannot overlap the source mask register (v0).
447 * (Section 5.3)
448 */
449 static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
450 int vm)
451 {
452 return (s->lmul <= 2) &&
453 (s->sew < MO_64) &&
454 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
455 require_align(vs2, s->lmul + 1) &&
456 require_align(vd, s->lmul) &&
457 require_vm(vm, vd);
458 }
459
460 static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
461 {
462 return vext_wide_check_common(s, vd, vm) &&
463 require_align(vs, s->lmul) &&
464 require_noover(vd, s->lmul + 1, vs, s->lmul);
465 }
466
467 static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
468 {
469 return vext_wide_check_common(s, vd, vm) &&
470 require_align(vs, s->lmul + 1);
471 }
472
473 /*
474 * Check function for vector instruction with format:
475 * double-width result and single-width sources (2*SEW = SEW op SEW)
476 *
477 * Rules to be checked here:
478 * 1. All rules in defined in widen common rules are applied.
479 * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
480 * (Section 3.4.2)
481 * 3. Destination vector register cannot overlap a source vector
482 * register (vs2, vs1) group.
483 * (Section 5.2)
484 */
485 static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
486 {
487 return vext_check_ds(s, vd, vs2, vm) &&
488 require_align(vs1, s->lmul) &&
489 require_noover(vd, s->lmul + 1, vs1, s->lmul);
490 }
491
492 /*
493 * Check function for vector instruction with format:
494 * double-width result and double-width source1 and single-width
495 * source2 (2*SEW = 2*SEW op SEW)
496 *
497 * Rules to be checked here:
498 * 1. All rules in defined in widen common rules are applied.
499 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
500 * (Section 3.4.2)
501 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
502 * (Section 3.4.2)
503 * 4. Destination vector register cannot overlap a source vector
504 * register (vs1) group.
505 * (Section 5.2)
506 */
507 static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
508 {
509 return vext_check_ds(s, vd, vs1, vm) &&
510 require_align(vs2, s->lmul + 1);
511 }
512
513 static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
514 {
515 bool ret = vext_narrow_check_common(s, vd, vs, vm);
516 if (vd != vs) {
517 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
518 }
519 return ret;
520 }
521
522 /*
523 * Check function for vector instruction with format:
524 * single-width result and double-width source 1 and single-width
525 * source 2 (SEW = 2*SEW op SEW)
526 *
527 * Rules to be checked here:
528 * 1. All rules in defined in narrow common rules are applied.
529 * 2. Destination vector register cannot overlap a source vector
530 * register (vs2) group.
531 * (Section 5.2)
532 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
533 * (Section 3.4.2)
534 */
535 static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
536 {
537 return vext_check_sd(s, vd, vs2, vm) &&
538 require_align(vs1, s->lmul);
539 }
540
541 /*
542 * Check function for vector reduction instructions.
543 *
544 * Rules to be checked here:
545 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
546 * (Section 3.4.2)
547 */
548 static bool vext_check_reduction(DisasContext *s, int vs2)
549 {
550 return require_align(vs2, s->lmul) && s->vstart_eq_zero;
551 }
552
553 /*
554 * Check function for vector slide instructions.
555 *
556 * Rules to be checked here:
557 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
558 * (Section 3.4.2)
559 * 2. Destination vector register number is multiples of LMUL.
560 * (Section 3.4.2)
561 * 3. Destination vector register group for a masked vector
562 * instruction cannot overlap the source mask register (v0).
563 * (Section 5.3)
564 * 4. The destination vector register group for vslideup, vslide1up,
565 * vfslide1up, cannot overlap the source vector register (vs2) group.
566 * (Section 5.2, 16.3.1, 16.3.3)
567 */
568 static bool vext_check_slide(DisasContext *s, int vd, int vs2,
569 int vm, bool is_over)
570 {
571 bool ret = require_align(vs2, s->lmul) &&
572 require_align(vd, s->lmul) &&
573 require_vm(vm, vd);
574 if (is_over) {
575 ret &= (vd != vs2);
576 }
577 return ret;
578 }
579
580 /*
581 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
582 * So RVV is also be checked in this function.
583 */
584 static bool vext_check_isa_ill(DisasContext *s)
585 {
586 return !s->vill;
587 }
588
589 /* common translation macro */
590 #define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
591 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
592 { \
593 if (CHECK(s, a, EEW)) { \
594 return OP(s, a, EEW); \
595 } \
596 return false; \
597 }
598
599 static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
600 {
601 int8_t emul = eew - s->sew + s->lmul;
602 return emul < 0 ? 0 : emul;
603 }
604
605 /*
606 *** unit stride load and store
607 */
608 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
609 TCGv_env, TCGv_i32);
610
611 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
612 gen_helper_ldst_us *fn, DisasContext *s,
613 bool is_store)
614 {
615 TCGv_ptr dest, mask;
616 TCGv base;
617 TCGv_i32 desc;
618
619 TCGLabel *over = gen_new_label();
620 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
621
622 dest = tcg_temp_new_ptr();
623 mask = tcg_temp_new_ptr();
624 base = get_gpr(s, rs1, EXT_NONE);
625
626 /*
627 * As simd_desc supports at most 2048 bytes, and in this implementation,
628 * the max vector group length is 4096 bytes. So split it into two parts.
629 *
630 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
631 * The second part is lmul, encoded in data of simd_desc.
632 */
633 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
634 s->cfg_ptr->vlen / 8, data));
635
636 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
637 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
638
639 fn(dest, mask, base, tcg_env, desc);
640
641 if (!is_store) {
642 mark_vs_dirty(s);
643 }
644
645 gen_set_label(over);
646 return true;
647 }
648
649 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
650 {
651 uint32_t data = 0;
652 gen_helper_ldst_us *fn;
653 static gen_helper_ldst_us * const fns[2][4] = {
654 /* masked unit stride load */
655 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
656 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
657 /* unmasked unit stride load */
658 { gen_helper_vle8_v, gen_helper_vle16_v,
659 gen_helper_vle32_v, gen_helper_vle64_v }
660 };
661
662 fn = fns[a->vm][eew];
663 if (fn == NULL) {
664 return false;
665 }
666
667 /*
668 * Vector load/store instructions have the EEW encoded
669 * directly in the instructions. The maximum vector size is
670 * calculated with EMUL rather than LMUL.
671 */
672 uint8_t emul = vext_get_emul(s, eew);
673 data = FIELD_DP32(data, VDATA, VM, a->vm);
674 data = FIELD_DP32(data, VDATA, LMUL, emul);
675 data = FIELD_DP32(data, VDATA, NF, a->nf);
676 data = FIELD_DP32(data, VDATA, VTA, s->vta);
677 data = FIELD_DP32(data, VDATA, VMA, s->vma);
678 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
679 }
680
681 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
682 {
683 return require_rvv(s) &&
684 vext_check_isa_ill(s) &&
685 vext_check_load(s, a->rd, a->nf, a->vm, eew);
686 }
687
688 GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
689 GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
690 GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
691 GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
692
693 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
694 {
695 uint32_t data = 0;
696 gen_helper_ldst_us *fn;
697 static gen_helper_ldst_us * const fns[2][4] = {
698 /* masked unit stride store */
699 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
700 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
701 /* unmasked unit stride store */
702 { gen_helper_vse8_v, gen_helper_vse16_v,
703 gen_helper_vse32_v, gen_helper_vse64_v }
704 };
705
706 fn = fns[a->vm][eew];
707 if (fn == NULL) {
708 return false;
709 }
710
711 uint8_t emul = vext_get_emul(s, eew);
712 data = FIELD_DP32(data, VDATA, VM, a->vm);
713 data = FIELD_DP32(data, VDATA, LMUL, emul);
714 data = FIELD_DP32(data, VDATA, NF, a->nf);
715 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
716 }
717
718 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
719 {
720 return require_rvv(s) &&
721 vext_check_isa_ill(s) &&
722 vext_check_store(s, a->rd, a->nf, eew);
723 }
724
725 GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
726 GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
727 GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
728 GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
729
730 /*
731 *** unit stride mask load and store
732 */
733 static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
734 {
735 uint32_t data = 0;
736 gen_helper_ldst_us *fn = gen_helper_vlm_v;
737
738 /* EMUL = 1, NFIELDS = 1 */
739 data = FIELD_DP32(data, VDATA, LMUL, 0);
740 data = FIELD_DP32(data, VDATA, NF, 1);
741 /* Mask destination register are always tail-agnostic */
742 data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
743 data = FIELD_DP32(data, VDATA, VMA, s->vma);
744 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
745 }
746
747 static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
748 {
749 /* EMUL = 1, NFIELDS = 1 */
750 return require_rvv(s) && vext_check_isa_ill(s);
751 }
752
753 static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
754 {
755 uint32_t data = 0;
756 gen_helper_ldst_us *fn = gen_helper_vsm_v;
757
758 /* EMUL = 1, NFIELDS = 1 */
759 data = FIELD_DP32(data, VDATA, LMUL, 0);
760 data = FIELD_DP32(data, VDATA, NF, 1);
761 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
762 }
763
764 static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
765 {
766 /* EMUL = 1, NFIELDS = 1 */
767 return require_rvv(s) && vext_check_isa_ill(s);
768 }
769
770 GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
771 GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
772
773 /*
774 *** stride load and store
775 */
776 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
777 TCGv, TCGv_env, TCGv_i32);
778
779 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
780 uint32_t data, gen_helper_ldst_stride *fn,
781 DisasContext *s, bool is_store)
782 {
783 TCGv_ptr dest, mask;
784 TCGv base, stride;
785 TCGv_i32 desc;
786
787 TCGLabel *over = gen_new_label();
788 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
789
790 dest = tcg_temp_new_ptr();
791 mask = tcg_temp_new_ptr();
792 base = get_gpr(s, rs1, EXT_NONE);
793 stride = get_gpr(s, rs2, EXT_NONE);
794 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
795 s->cfg_ptr->vlen / 8, data));
796
797 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
798 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
799
800 fn(dest, mask, base, stride, tcg_env, desc);
801
802 if (!is_store) {
803 mark_vs_dirty(s);
804 }
805
806 gen_set_label(over);
807 return true;
808 }
809
810 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
811 {
812 uint32_t data = 0;
813 gen_helper_ldst_stride *fn;
814 static gen_helper_ldst_stride * const fns[4] = {
815 gen_helper_vlse8_v, gen_helper_vlse16_v,
816 gen_helper_vlse32_v, gen_helper_vlse64_v
817 };
818
819 fn = fns[eew];
820 if (fn == NULL) {
821 return false;
822 }
823
824 uint8_t emul = vext_get_emul(s, eew);
825 data = FIELD_DP32(data, VDATA, VM, a->vm);
826 data = FIELD_DP32(data, VDATA, LMUL, emul);
827 data = FIELD_DP32(data, VDATA, NF, a->nf);
828 data = FIELD_DP32(data, VDATA, VTA, s->vta);
829 data = FIELD_DP32(data, VDATA, VMA, s->vma);
830 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
831 }
832
833 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
834 {
835 return require_rvv(s) &&
836 vext_check_isa_ill(s) &&
837 vext_check_load(s, a->rd, a->nf, a->vm, eew);
838 }
839
840 GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
841 GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
842 GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
843 GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
844
845 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
846 {
847 uint32_t data = 0;
848 gen_helper_ldst_stride *fn;
849 static gen_helper_ldst_stride * const fns[4] = {
850 /* masked stride store */
851 gen_helper_vsse8_v, gen_helper_vsse16_v,
852 gen_helper_vsse32_v, gen_helper_vsse64_v
853 };
854
855 uint8_t emul = vext_get_emul(s, eew);
856 data = FIELD_DP32(data, VDATA, VM, a->vm);
857 data = FIELD_DP32(data, VDATA, LMUL, emul);
858 data = FIELD_DP32(data, VDATA, NF, a->nf);
859 fn = fns[eew];
860 if (fn == NULL) {
861 return false;
862 }
863
864 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
865 }
866
867 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
868 {
869 return require_rvv(s) &&
870 vext_check_isa_ill(s) &&
871 vext_check_store(s, a->rd, a->nf, eew);
872 }
873
874 GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
875 GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
876 GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
877 GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
878
879 /*
880 *** index load and store
881 */
882 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
883 TCGv_ptr, TCGv_env, TCGv_i32);
884
885 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
886 uint32_t data, gen_helper_ldst_index *fn,
887 DisasContext *s, bool is_store)
888 {
889 TCGv_ptr dest, mask, index;
890 TCGv base;
891 TCGv_i32 desc;
892
893 TCGLabel *over = gen_new_label();
894 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
895
896 dest = tcg_temp_new_ptr();
897 mask = tcg_temp_new_ptr();
898 index = tcg_temp_new_ptr();
899 base = get_gpr(s, rs1, EXT_NONE);
900 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
901 s->cfg_ptr->vlen / 8, data));
902
903 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
904 tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
905 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
906
907 fn(dest, mask, base, index, tcg_env, desc);
908
909 if (!is_store) {
910 mark_vs_dirty(s);
911 }
912
913 gen_set_label(over);
914 return true;
915 }
916
917 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
918 {
919 uint32_t data = 0;
920 gen_helper_ldst_index *fn;
921 static gen_helper_ldst_index * const fns[4][4] = {
922 /*
923 * offset vector register group EEW = 8,
924 * data vector register group EEW = SEW
925 */
926 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
927 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
928 /*
929 * offset vector register group EEW = 16,
930 * data vector register group EEW = SEW
931 */
932 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
933 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
934 /*
935 * offset vector register group EEW = 32,
936 * data vector register group EEW = SEW
937 */
938 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
939 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
940 /*
941 * offset vector register group EEW = 64,
942 * data vector register group EEW = SEW
943 */
944 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
945 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
946 };
947
948 fn = fns[eew][s->sew];
949
950 uint8_t emul = vext_get_emul(s, s->sew);
951 data = FIELD_DP32(data, VDATA, VM, a->vm);
952 data = FIELD_DP32(data, VDATA, LMUL, emul);
953 data = FIELD_DP32(data, VDATA, NF, a->nf);
954 data = FIELD_DP32(data, VDATA, VTA, s->vta);
955 data = FIELD_DP32(data, VDATA, VMA, s->vma);
956 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
957 }
958
959 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
960 {
961 return require_rvv(s) &&
962 vext_check_isa_ill(s) &&
963 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
964 }
965
966 GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
967 GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
968 GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
969 GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
970
971 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
972 {
973 uint32_t data = 0;
974 gen_helper_ldst_index *fn;
975 static gen_helper_ldst_index * const fns[4][4] = {
976 /*
977 * offset vector register group EEW = 8,
978 * data vector register group EEW = SEW
979 */
980 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
981 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
982 /*
983 * offset vector register group EEW = 16,
984 * data vector register group EEW = SEW
985 */
986 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
987 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
988 /*
989 * offset vector register group EEW = 32,
990 * data vector register group EEW = SEW
991 */
992 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
993 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
994 /*
995 * offset vector register group EEW = 64,
996 * data vector register group EEW = SEW
997 */
998 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
999 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
1000 };
1001
1002 fn = fns[eew][s->sew];
1003
1004 uint8_t emul = vext_get_emul(s, s->sew);
1005 data = FIELD_DP32(data, VDATA, VM, a->vm);
1006 data = FIELD_DP32(data, VDATA, LMUL, emul);
1007 data = FIELD_DP32(data, VDATA, NF, a->nf);
1008 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
1009 }
1010
1011 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
1012 {
1013 return require_rvv(s) &&
1014 vext_check_isa_ill(s) &&
1015 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
1016 }
1017
1018 GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
1019 GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
1020 GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
1021 GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
1022
1023 /*
1024 *** unit stride fault-only-first load
1025 */
1026 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
1027 gen_helper_ldst_us *fn, DisasContext *s)
1028 {
1029 TCGv_ptr dest, mask;
1030 TCGv base;
1031 TCGv_i32 desc;
1032
1033 TCGLabel *over = gen_new_label();
1034 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1035
1036 dest = tcg_temp_new_ptr();
1037 mask = tcg_temp_new_ptr();
1038 base = get_gpr(s, rs1, EXT_NONE);
1039 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1040 s->cfg_ptr->vlen / 8, data));
1041
1042 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1043 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1044
1045 fn(dest, mask, base, tcg_env, desc);
1046
1047 mark_vs_dirty(s);
1048 gen_set_label(over);
1049 return true;
1050 }
1051
1052 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
1053 {
1054 uint32_t data = 0;
1055 gen_helper_ldst_us *fn;
1056 static gen_helper_ldst_us * const fns[4] = {
1057 gen_helper_vle8ff_v, gen_helper_vle16ff_v,
1058 gen_helper_vle32ff_v, gen_helper_vle64ff_v
1059 };
1060
1061 fn = fns[eew];
1062 if (fn == NULL) {
1063 return false;
1064 }
1065
1066 uint8_t emul = vext_get_emul(s, eew);
1067 data = FIELD_DP32(data, VDATA, VM, a->vm);
1068 data = FIELD_DP32(data, VDATA, LMUL, emul);
1069 data = FIELD_DP32(data, VDATA, NF, a->nf);
1070 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1071 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1072 return ldff_trans(a->rd, a->rs1, data, fn, s);
1073 }
1074
1075 GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check)
1076 GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
1077 GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
1078 GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
1079
1080 /*
1081 * load and store whole register instructions
1082 */
1083 typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
1084
1085 static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
1086 uint32_t width, gen_helper_ldst_whole *fn,
1087 DisasContext *s, bool is_store)
1088 {
1089 uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / width;
1090 TCGLabel *over = gen_new_label();
1091 tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over);
1092
1093 TCGv_ptr dest;
1094 TCGv base;
1095 TCGv_i32 desc;
1096
1097 uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
1098 dest = tcg_temp_new_ptr();
1099 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1100 s->cfg_ptr->vlen / 8, data));
1101
1102 base = get_gpr(s, rs1, EXT_NONE);
1103 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1104
1105 fn(dest, base, tcg_env, desc);
1106
1107 if (!is_store) {
1108 mark_vs_dirty(s);
1109 }
1110 gen_set_label(over);
1111
1112 return true;
1113 }
1114
1115 /*
1116 * load and store whole register instructions ignore vtype and vl setting.
1117 * Thus, we don't need to check vill bit. (Section 7.9)
1118 */
1119 #define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH, IS_STORE) \
1120 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1121 { \
1122 if (require_rvv(s) && \
1123 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1124 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, WIDTH, \
1125 gen_helper_##NAME, s, IS_STORE); \
1126 } \
1127 return false; \
1128 }
1129
1130 GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1, false)
1131 GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2, false)
1132 GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4, false)
1133 GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8, false)
1134 GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1, false)
1135 GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2, false)
1136 GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4, false)
1137 GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8, false)
1138 GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1, false)
1139 GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2, false)
1140 GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4, false)
1141 GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8, false)
1142 GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1, false)
1143 GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2, false)
1144 GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4, false)
1145 GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8, false)
1146
1147 /*
1148 * The vector whole register store instructions are encoded similar to
1149 * unmasked unit-stride store of elements with EEW=8.
1150 */
1151 GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1, true)
1152 GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1, true)
1153 GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1, true)
1154 GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true)
1155
1156 /*
1157 *** Vector Integer Arithmetic Instructions
1158 */
1159
1160 /*
1161 * MAXSZ returns the maximum vector size can be operated in bytes,
1162 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1163 * to accerlate vector operation.
1164 */
1165 static inline uint32_t MAXSZ(DisasContext *s)
1166 {
1167 int scale = s->lmul - 3;
1168 return s->cfg_ptr->vlen >> -scale;
1169 }
1170
1171 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1172 {
1173 return require_rvv(s) &&
1174 vext_check_isa_ill(s) &&
1175 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1176 }
1177
1178 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1179 uint32_t, uint32_t, uint32_t);
1180
1181 static inline bool
1182 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1183 gen_helper_gvec_4_ptr *fn)
1184 {
1185 TCGLabel *over = gen_new_label();
1186
1187 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1188
1189 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1190 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1191 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1192 MAXSZ(s), MAXSZ(s));
1193 } else {
1194 uint32_t data = 0;
1195
1196 data = FIELD_DP32(data, VDATA, VM, a->vm);
1197 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1198 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1199 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1200 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1201 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1202 tcg_env, s->cfg_ptr->vlen / 8,
1203 s->cfg_ptr->vlen / 8, data, fn);
1204 }
1205 mark_vs_dirty(s);
1206 gen_set_label(over);
1207 return true;
1208 }
1209
1210 /* OPIVV with GVEC IR */
1211 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1212 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1213 { \
1214 static gen_helper_gvec_4_ptr * const fns[4] = { \
1215 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1216 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1217 }; \
1218 if (!opivv_check(s, a)) { \
1219 return false; \
1220 } \
1221 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1222 }
1223
1224 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1225 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1226
1227 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1228 TCGv_env, TCGv_i32);
1229
1230 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1231 gen_helper_opivx *fn, DisasContext *s)
1232 {
1233 TCGv_ptr dest, src2, mask;
1234 TCGv src1;
1235 TCGv_i32 desc;
1236 uint32_t data = 0;
1237
1238 TCGLabel *over = gen_new_label();
1239 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1240
1241 dest = tcg_temp_new_ptr();
1242 mask = tcg_temp_new_ptr();
1243 src2 = tcg_temp_new_ptr();
1244 src1 = get_gpr(s, rs1, EXT_SIGN);
1245
1246 data = FIELD_DP32(data, VDATA, VM, vm);
1247 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1248 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1249 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1250 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1251 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1252 s->cfg_ptr->vlen / 8, data));
1253
1254 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1255 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1256 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1257
1258 fn(dest, mask, src1, src2, tcg_env, desc);
1259
1260 mark_vs_dirty(s);
1261 gen_set_label(over);
1262 return true;
1263 }
1264
1265 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1266 {
1267 return require_rvv(s) &&
1268 vext_check_isa_ill(s) &&
1269 vext_check_ss(s, a->rd, a->rs2, a->vm);
1270 }
1271
1272 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1273 uint32_t, uint32_t);
1274
1275 static inline bool
1276 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1277 gen_helper_opivx *fn)
1278 {
1279 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1280 TCGv_i64 src1 = tcg_temp_new_i64();
1281
1282 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1283 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1284 src1, MAXSZ(s), MAXSZ(s));
1285
1286 mark_vs_dirty(s);
1287 return true;
1288 }
1289 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1290 }
1291
1292 /* OPIVX with GVEC IR */
1293 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1294 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1295 { \
1296 static gen_helper_opivx * const fns[4] = { \
1297 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1298 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1299 }; \
1300 if (!opivx_check(s, a)) { \
1301 return false; \
1302 } \
1303 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1304 }
1305
1306 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1307 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1308
1309 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1310 {
1311 tcg_gen_vec_sub8_i64(d, b, a);
1312 }
1313
1314 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1315 {
1316 tcg_gen_vec_sub16_i64(d, b, a);
1317 }
1318
1319 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1320 {
1321 tcg_gen_sub_i32(ret, arg2, arg1);
1322 }
1323
1324 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1325 {
1326 tcg_gen_sub_i64(ret, arg2, arg1);
1327 }
1328
1329 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1330 {
1331 tcg_gen_sub_vec(vece, r, b, a);
1332 }
1333
1334 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1335 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1336 {
1337 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1338 static const GVecGen2s rsub_op[4] = {
1339 { .fni8 = gen_vec_rsub8_i64,
1340 .fniv = gen_rsub_vec,
1341 .fno = gen_helper_vec_rsubs8,
1342 .opt_opc = vecop_list,
1343 .vece = MO_8 },
1344 { .fni8 = gen_vec_rsub16_i64,
1345 .fniv = gen_rsub_vec,
1346 .fno = gen_helper_vec_rsubs16,
1347 .opt_opc = vecop_list,
1348 .vece = MO_16 },
1349 { .fni4 = gen_rsub_i32,
1350 .fniv = gen_rsub_vec,
1351 .fno = gen_helper_vec_rsubs32,
1352 .opt_opc = vecop_list,
1353 .vece = MO_32 },
1354 { .fni8 = gen_rsub_i64,
1355 .fniv = gen_rsub_vec,
1356 .fno = gen_helper_vec_rsubs64,
1357 .opt_opc = vecop_list,
1358 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1359 .vece = MO_64 },
1360 };
1361
1362 tcg_debug_assert(vece <= MO_64);
1363 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1364 }
1365
1366 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1367
1368 typedef enum {
1369 IMM_ZX, /* Zero-extended */
1370 IMM_SX, /* Sign-extended */
1371 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
1372 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1373 } imm_mode_t;
1374
1375 static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1376 {
1377 switch (imm_mode) {
1378 case IMM_ZX:
1379 return extract64(imm, 0, 5);
1380 case IMM_SX:
1381 return sextract64(imm, 0, 5);
1382 case IMM_TRUNC_SEW:
1383 return extract64(imm, 0, s->sew + 3);
1384 case IMM_TRUNC_2SEW:
1385 return extract64(imm, 0, s->sew + 4);
1386 default:
1387 g_assert_not_reached();
1388 }
1389 }
1390
1391 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1392 gen_helper_opivx *fn, DisasContext *s,
1393 imm_mode_t imm_mode)
1394 {
1395 TCGv_ptr dest, src2, mask;
1396 TCGv src1;
1397 TCGv_i32 desc;
1398 uint32_t data = 0;
1399
1400 TCGLabel *over = gen_new_label();
1401 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1402
1403 dest = tcg_temp_new_ptr();
1404 mask = tcg_temp_new_ptr();
1405 src2 = tcg_temp_new_ptr();
1406 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1407
1408 data = FIELD_DP32(data, VDATA, VM, vm);
1409 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1410 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1411 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1412 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1413 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1414 s->cfg_ptr->vlen / 8, data));
1415
1416 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1417 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1418 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1419
1420 fn(dest, mask, src1, src2, tcg_env, desc);
1421
1422 mark_vs_dirty(s);
1423 gen_set_label(over);
1424 return true;
1425 }
1426
1427 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1428 uint32_t, uint32_t);
1429
1430 static inline bool
1431 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1432 gen_helper_opivx *fn, imm_mode_t imm_mode)
1433 {
1434 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1435 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1436 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1437 mark_vs_dirty(s);
1438 return true;
1439 }
1440 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1441 }
1442
1443 /* OPIVI with GVEC IR */
1444 #define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1445 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1446 { \
1447 static gen_helper_opivx * const fns[4] = { \
1448 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1449 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1450 }; \
1451 if (!opivx_check(s, a)) { \
1452 return false; \
1453 } \
1454 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1455 fns[s->sew], IMM_MODE); \
1456 }
1457
1458 GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1459
1460 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1461 int64_t c, uint32_t oprsz, uint32_t maxsz)
1462 {
1463 TCGv_i64 tmp = tcg_constant_i64(c);
1464 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1465 }
1466
1467 GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1468
1469 /* Vector Widening Integer Add/Subtract */
1470
1471 /* OPIVV with WIDEN */
1472 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1473 {
1474 return require_rvv(s) &&
1475 vext_check_isa_ill(s) &&
1476 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1477 }
1478
1479 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1480 gen_helper_gvec_4_ptr *fn,
1481 bool (*checkfn)(DisasContext *, arg_rmrr *))
1482 {
1483 if (checkfn(s, a)) {
1484 uint32_t data = 0;
1485 TCGLabel *over = gen_new_label();
1486 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1487
1488 data = FIELD_DP32(data, VDATA, VM, a->vm);
1489 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1490 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1491 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1492 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1493 vreg_ofs(s, a->rs1),
1494 vreg_ofs(s, a->rs2),
1495 tcg_env, s->cfg_ptr->vlen / 8,
1496 s->cfg_ptr->vlen / 8,
1497 data, fn);
1498 mark_vs_dirty(s);
1499 gen_set_label(over);
1500 return true;
1501 }
1502 return false;
1503 }
1504
1505 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1506 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1507 { \
1508 static gen_helper_gvec_4_ptr * const fns[3] = { \
1509 gen_helper_##NAME##_b, \
1510 gen_helper_##NAME##_h, \
1511 gen_helper_##NAME##_w \
1512 }; \
1513 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1514 }
1515
1516 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1517 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1518 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1519 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1520
1521 /* OPIVX with WIDEN */
1522 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1523 {
1524 return require_rvv(s) &&
1525 vext_check_isa_ill(s) &&
1526 vext_check_ds(s, a->rd, a->rs2, a->vm);
1527 }
1528
1529 #define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
1530 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1531 { \
1532 if (CHECK(s, a)) { \
1533 static gen_helper_opivx * const fns[3] = { \
1534 gen_helper_##NAME##_b, \
1535 gen_helper_##NAME##_h, \
1536 gen_helper_##NAME##_w \
1537 }; \
1538 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
1539 } \
1540 return false; \
1541 }
1542
1543 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
1544 GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
1545 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
1546 GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
1547
1548 /* WIDEN OPIVV with WIDEN */
1549 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1550 {
1551 return require_rvv(s) &&
1552 vext_check_isa_ill(s) &&
1553 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1554 }
1555
1556 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1557 gen_helper_gvec_4_ptr *fn)
1558 {
1559 if (opiwv_widen_check(s, a)) {
1560 uint32_t data = 0;
1561 TCGLabel *over = gen_new_label();
1562 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1563
1564 data = FIELD_DP32(data, VDATA, VM, a->vm);
1565 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1566 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1567 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1568 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1569 vreg_ofs(s, a->rs1),
1570 vreg_ofs(s, a->rs2),
1571 tcg_env, s->cfg_ptr->vlen / 8,
1572 s->cfg_ptr->vlen / 8, data, fn);
1573 mark_vs_dirty(s);
1574 gen_set_label(over);
1575 return true;
1576 }
1577 return false;
1578 }
1579
1580 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1581 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1582 { \
1583 static gen_helper_gvec_4_ptr * const fns[3] = { \
1584 gen_helper_##NAME##_b, \
1585 gen_helper_##NAME##_h, \
1586 gen_helper_##NAME##_w \
1587 }; \
1588 return do_opiwv_widen(s, a, fns[s->sew]); \
1589 }
1590
1591 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1592 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1593 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1594 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1595
1596 /* WIDEN OPIVX with WIDEN */
1597 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1598 {
1599 return require_rvv(s) &&
1600 vext_check_isa_ill(s) &&
1601 vext_check_dd(s, a->rd, a->rs2, a->vm);
1602 }
1603
1604 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1605 gen_helper_opivx *fn)
1606 {
1607 if (opiwx_widen_check(s, a)) {
1608 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1609 }
1610 return false;
1611 }
1612
1613 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1614 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1615 { \
1616 static gen_helper_opivx * const fns[3] = { \
1617 gen_helper_##NAME##_b, \
1618 gen_helper_##NAME##_h, \
1619 gen_helper_##NAME##_w \
1620 }; \
1621 return do_opiwx_widen(s, a, fns[s->sew]); \
1622 }
1623
1624 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1625 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1626 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1627 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1628
1629 static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
1630 gen_helper_gvec_4_ptr *fn, DisasContext *s)
1631 {
1632 uint32_t data = 0;
1633 TCGLabel *over = gen_new_label();
1634 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1635
1636 data = FIELD_DP32(data, VDATA, VM, vm);
1637 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1638 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1639 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1640 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1641 tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
1642 vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlen / 8,
1643 s->cfg_ptr->vlen / 8, data, fn);
1644 mark_vs_dirty(s);
1645 gen_set_label(over);
1646 return true;
1647 }
1648
1649 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1650 /* OPIVV without GVEC IR */
1651 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1652 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1653 { \
1654 if (CHECK(s, a)) { \
1655 static gen_helper_gvec_4_ptr * const fns[4] = { \
1656 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1657 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1658 }; \
1659 return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1660 } \
1661 return false; \
1662 }
1663
1664 /*
1665 * For vadc and vsbc, an illegal instruction exception is raised if the
1666 * destination vector register is v0 and LMUL > 1. (Section 11.4)
1667 */
1668 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1669 {
1670 return require_rvv(s) &&
1671 vext_check_isa_ill(s) &&
1672 (a->rd != 0) &&
1673 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1674 }
1675
1676 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1677 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1678
1679 /*
1680 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1681 * destination vector register overlaps a source vector register group.
1682 */
1683 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1684 {
1685 return require_rvv(s) &&
1686 vext_check_isa_ill(s) &&
1687 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1688 }
1689
1690 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1691 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1692
1693 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1694 {
1695 return require_rvv(s) &&
1696 vext_check_isa_ill(s) &&
1697 (a->rd != 0) &&
1698 vext_check_ss(s, a->rd, a->rs2, a->vm);
1699 }
1700
1701 /* OPIVX without GVEC IR */
1702 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1703 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1704 { \
1705 if (CHECK(s, a)) { \
1706 static gen_helper_opivx * const fns[4] = { \
1707 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1708 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1709 }; \
1710 \
1711 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1712 } \
1713 return false; \
1714 }
1715
1716 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1717 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1718
1719 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1720 {
1721 return require_rvv(s) &&
1722 vext_check_isa_ill(s) &&
1723 vext_check_ms(s, a->rd, a->rs2);
1724 }
1725
1726 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1727 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1728
1729 /* OPIVI without GVEC IR */
1730 #define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
1731 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1732 { \
1733 if (CHECK(s, a)) { \
1734 static gen_helper_opivx * const fns[4] = { \
1735 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1736 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1737 }; \
1738 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1739 fns[s->sew], s, IMM_MODE); \
1740 } \
1741 return false; \
1742 }
1743
1744 GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1745 GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1746
1747 /* Vector Bitwise Logical Instructions */
1748 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1749 GEN_OPIVV_GVEC_TRANS(vor_vv, or)
1750 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1751 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1752 GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
1753 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1754 GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1755 GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
1756 GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1757
1758 /* Vector Single-Width Bit Shift Instructions */
1759 GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
1760 GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
1761 GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
1762
1763 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1764 uint32_t, uint32_t);
1765
1766 static inline bool
1767 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1768 gen_helper_opivx *fn)
1769 {
1770 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1771 TCGv_i32 src1 = tcg_temp_new_i32();
1772
1773 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1774 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1775 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1776 src1, MAXSZ(s), MAXSZ(s));
1777
1778 mark_vs_dirty(s);
1779 return true;
1780 }
1781 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1782 }
1783
1784 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1785 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1786 { \
1787 static gen_helper_opivx * const fns[4] = { \
1788 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1789 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1790 }; \
1791 if (!opivx_check(s, a)) { \
1792 return false; \
1793 } \
1794 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1795 }
1796
1797 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
1798 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
1799 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
1800
1801 GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1802 GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1803 GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1804
1805 /* Vector Narrowing Integer Right Shift Instructions */
1806 static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1807 {
1808 return require_rvv(s) &&
1809 vext_check_isa_ill(s) &&
1810 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1811 }
1812
1813 /* OPIVV with NARROW */
1814 #define GEN_OPIWV_NARROW_TRANS(NAME) \
1815 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1816 { \
1817 if (opiwv_narrow_check(s, a)) { \
1818 uint32_t data = 0; \
1819 static gen_helper_gvec_4_ptr * const fns[3] = { \
1820 gen_helper_##NAME##_b, \
1821 gen_helper_##NAME##_h, \
1822 gen_helper_##NAME##_w, \
1823 }; \
1824 TCGLabel *over = gen_new_label(); \
1825 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
1826 \
1827 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1828 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1829 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
1830 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
1831 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1832 vreg_ofs(s, a->rs1), \
1833 vreg_ofs(s, a->rs2), tcg_env, \
1834 s->cfg_ptr->vlen / 8, \
1835 s->cfg_ptr->vlen / 8, data, \
1836 fns[s->sew]); \
1837 mark_vs_dirty(s); \
1838 gen_set_label(over); \
1839 return true; \
1840 } \
1841 return false; \
1842 }
1843 GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1844 GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1845
1846 static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1847 {
1848 return require_rvv(s) &&
1849 vext_check_isa_ill(s) &&
1850 vext_check_sd(s, a->rd, a->rs2, a->vm);
1851 }
1852
1853 /* OPIVX with NARROW */
1854 #define GEN_OPIWX_NARROW_TRANS(NAME) \
1855 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1856 { \
1857 if (opiwx_narrow_check(s, a)) { \
1858 static gen_helper_opivx * const fns[3] = { \
1859 gen_helper_##NAME##_b, \
1860 gen_helper_##NAME##_h, \
1861 gen_helper_##NAME##_w, \
1862 }; \
1863 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1864 } \
1865 return false; \
1866 }
1867
1868 GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1869 GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1870
1871 /* OPIWI with NARROW */
1872 #define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
1873 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1874 { \
1875 if (opiwx_narrow_check(s, a)) { \
1876 static gen_helper_opivx * const fns[3] = { \
1877 gen_helper_##OPIVX##_b, \
1878 gen_helper_##OPIVX##_h, \
1879 gen_helper_##OPIVX##_w, \
1880 }; \
1881 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1882 fns[s->sew], s, IMM_MODE); \
1883 } \
1884 return false; \
1885 }
1886
1887 GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1888 GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1889
1890 /* Vector Integer Comparison Instructions */
1891 /*
1892 * For all comparison instructions, an illegal instruction exception is raised
1893 * if the destination vector register overlaps a source vector register group
1894 * and LMUL > 1.
1895 */
1896 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1897 {
1898 return require_rvv(s) &&
1899 vext_check_isa_ill(s) &&
1900 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1901 }
1902
1903 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1904 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1905 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1906 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1907 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1908 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1909
1910 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1911 {
1912 return require_rvv(s) &&
1913 vext_check_isa_ill(s) &&
1914 vext_check_ms(s, a->rd, a->rs2);
1915 }
1916
1917 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1918 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1919 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1920 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1921 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1922 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1923 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1924 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1925
1926 GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1927 GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1928 GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1929 GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1930 GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1931 GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1932
1933 /* Vector Integer Min/Max Instructions */
1934 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1935 GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
1936 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1937 GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
1938 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1939 GEN_OPIVX_TRANS(vmin_vx, opivx_check)
1940 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1941 GEN_OPIVX_TRANS(vmax_vx, opivx_check)
1942
1943 /* Vector Single-Width Integer Multiply Instructions */
1944
1945 static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
1946 {
1947 /*
1948 * All Zve* extensions support all vector integer instructions,
1949 * except that the vmulh integer multiply variants
1950 * that return the high word of the product
1951 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1952 * are not included for EEW=64 in Zve64*. (Section 18.2)
1953 */
1954 return opivv_check(s, a) &&
1955 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1956 }
1957
1958 static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
1959 {
1960 /*
1961 * All Zve* extensions support all vector integer instructions,
1962 * except that the vmulh integer multiply variants
1963 * that return the high word of the product
1964 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1965 * are not included for EEW=64 in Zve64*. (Section 18.2)
1966 */
1967 return opivx_check(s, a) &&
1968 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1969 }
1970
1971 GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
1972 GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check)
1973 GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check)
1974 GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check)
1975 GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
1976 GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check)
1977 GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check)
1978 GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check)
1979
1980 /* Vector Integer Divide Instructions */
1981 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1982 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1983 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1984 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1985 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1986 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1987 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1988 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1989
1990 /* Vector Widening Integer Multiply Instructions */
1991 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1992 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1993 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1994 GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
1995 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
1996 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
1997
1998 /* Vector Single-Width Integer Multiply-Add Instructions */
1999 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
2000 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
2001 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
2002 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
2003 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
2004 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
2005 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
2006 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
2007
2008 /* Vector Widening Integer Multiply-Add Instructions */
2009 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
2010 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
2011 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
2012 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
2013 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
2014 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
2015 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
2016
2017 /* Vector Integer Merge and Move Instructions */
2018 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
2019 {
2020 if (require_rvv(s) &&
2021 vext_check_isa_ill(s) &&
2022 /* vmv.v.v has rs2 = 0 and vm = 1 */
2023 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
2024 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2025 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
2026 vreg_ofs(s, a->rs1),
2027 MAXSZ(s), MAXSZ(s));
2028 } else {
2029 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2030 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2031 static gen_helper_gvec_2_ptr * const fns[4] = {
2032 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
2033 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
2034 };
2035 TCGLabel *over = gen_new_label();
2036 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2037
2038 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
2039 tcg_env, s->cfg_ptr->vlen / 8,
2040 s->cfg_ptr->vlen / 8, data,
2041 fns[s->sew]);
2042 gen_set_label(over);
2043 }
2044 mark_vs_dirty(s);
2045 return true;
2046 }
2047 return false;
2048 }
2049
2050 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
2051 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
2052 {
2053 if (require_rvv(s) &&
2054 vext_check_isa_ill(s) &&
2055 /* vmv.v.x has rs2 = 0 and vm = 1 */
2056 vext_check_ss(s, a->rd, 0, 1)) {
2057 TCGv s1;
2058 TCGLabel *over = gen_new_label();
2059 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2060
2061 s1 = get_gpr(s, a->rs1, EXT_SIGN);
2062
2063 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2064 if (get_xl(s) == MXL_RV32 && s->sew == MO_64) {
2065 TCGv_i64 s1_i64 = tcg_temp_new_i64();
2066 tcg_gen_ext_tl_i64(s1_i64, s1);
2067 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2068 MAXSZ(s), MAXSZ(s), s1_i64);
2069 } else {
2070 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
2071 MAXSZ(s), MAXSZ(s), s1);
2072 }
2073 } else {
2074 TCGv_i32 desc;
2075 TCGv_i64 s1_i64 = tcg_temp_new_i64();
2076 TCGv_ptr dest = tcg_temp_new_ptr();
2077 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2078 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2079 static gen_helper_vmv_vx * const fns[4] = {
2080 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2081 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2082 };
2083
2084 tcg_gen_ext_tl_i64(s1_i64, s1);
2085 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2086 s->cfg_ptr->vlen / 8, data));
2087 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2088 fns[s->sew](dest, s1_i64, tcg_env, desc);
2089 }
2090
2091 mark_vs_dirty(s);
2092 gen_set_label(over);
2093 return true;
2094 }
2095 return false;
2096 }
2097
2098 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
2099 {
2100 if (require_rvv(s) &&
2101 vext_check_isa_ill(s) &&
2102 /* vmv.v.i has rs2 = 0 and vm = 1 */
2103 vext_check_ss(s, a->rd, 0, 1)) {
2104 int64_t simm = sextract64(a->rs1, 0, 5);
2105 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2106 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
2107 MAXSZ(s), MAXSZ(s), simm);
2108 mark_vs_dirty(s);
2109 } else {
2110 TCGv_i32 desc;
2111 TCGv_i64 s1;
2112 TCGv_ptr dest;
2113 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2114 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2115 static gen_helper_vmv_vx * const fns[4] = {
2116 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2117 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2118 };
2119 TCGLabel *over = gen_new_label();
2120 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2121
2122 s1 = tcg_constant_i64(simm);
2123 dest = tcg_temp_new_ptr();
2124 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2125 s->cfg_ptr->vlen / 8, data));
2126 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2127 fns[s->sew](dest, s1, tcg_env, desc);
2128
2129 mark_vs_dirty(s);
2130 gen_set_label(over);
2131 }
2132 return true;
2133 }
2134 return false;
2135 }
2136
2137 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
2138 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
2139 GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
2140
2141 /*
2142 *** Vector Fixed-Point Arithmetic Instructions
2143 */
2144
2145 /* Vector Single-Width Saturating Add and Subtract */
2146 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
2147 GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
2148 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
2149 GEN_OPIVV_TRANS(vssub_vv, opivv_check)
2150 GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
2151 GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
2152 GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
2153 GEN_OPIVX_TRANS(vssub_vx, opivx_check)
2154 GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2155 GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2156
2157 /* Vector Single-Width Averaging Add and Subtract */
2158 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2159 GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2160 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2161 GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2162 GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
2163 GEN_OPIVX_TRANS(vaaddu_vx, opivx_check)
2164 GEN_OPIVX_TRANS(vasub_vx, opivx_check)
2165 GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
2166
2167 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2168
2169 static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
2170 {
2171 /*
2172 * All Zve* extensions support all vector fixed-point arithmetic
2173 * instructions, except that vsmul.vv and vsmul.vx are not supported
2174 * for EEW=64 in Zve64*. (Section 18.2)
2175 */
2176 return opivv_check(s, a) &&
2177 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2178 }
2179
2180 static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
2181 {
2182 /*
2183 * All Zve* extensions support all vector fixed-point arithmetic
2184 * instructions, except that vsmul.vv and vsmul.vx are not supported
2185 * for EEW=64 in Zve64*. (Section 18.2)
2186 */
2187 return opivx_check(s, a) &&
2188 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2189 }
2190
2191 GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
2192 GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check)
2193
2194 /* Vector Single-Width Scaling Shift Instructions */
2195 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2196 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2197 GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
2198 GEN_OPIVX_TRANS(vssra_vx, opivx_check)
2199 GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2200 GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2201
2202 /* Vector Narrowing Fixed-Point Clip Instructions */
2203 GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2204 GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2205 GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2206 GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2207 GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2208 GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2209
2210 /*
2211 *** Vector Float Point Arithmetic Instructions
2212 */
2213
2214 /*
2215 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2216 * RVF and RVD can be treated equally.
2217 * We don't have to deal with the cases of: SEW > FLEN.
2218 *
2219 * If SEW < FLEN, check whether input fp register is a valid
2220 * NaN-boxed value, in which case the least-significant SEW bits
2221 * of the f register are used, else the canonical NaN value is used.
2222 */
2223 static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2224 {
2225 switch (s->sew) {
2226 case 1:
2227 gen_check_nanbox_h(out, in);
2228 break;
2229 case 2:
2230 gen_check_nanbox_s(out, in);
2231 break;
2232 case 3:
2233 tcg_gen_mov_i64(out, in);
2234 break;
2235 default:
2236 g_assert_not_reached();
2237 }
2238 }
2239
2240 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2241
2242 /*
2243 * If the current SEW does not correspond to a supported IEEE floating-point
2244 * type, an illegal instruction exception is raised.
2245 */
2246 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2247 {
2248 return require_rvv(s) &&
2249 require_rvf(s) &&
2250 vext_check_isa_ill(s) &&
2251 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2252 }
2253
2254 /* OPFVV without GVEC IR */
2255 #define GEN_OPFVV_TRANS(NAME, CHECK) \
2256 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2257 { \
2258 if (CHECK(s, a)) { \
2259 uint32_t data = 0; \
2260 static gen_helper_gvec_4_ptr * const fns[3] = { \
2261 gen_helper_##NAME##_h, \
2262 gen_helper_##NAME##_w, \
2263 gen_helper_##NAME##_d, \
2264 }; \
2265 TCGLabel *over = gen_new_label(); \
2266 gen_set_rm(s, RISCV_FRM_DYN); \
2267 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2268 \
2269 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2270 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2271 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2272 data = \
2273 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2274 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2275 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2276 vreg_ofs(s, a->rs1), \
2277 vreg_ofs(s, a->rs2), tcg_env, \
2278 s->cfg_ptr->vlen / 8, \
2279 s->cfg_ptr->vlen / 8, data, \
2280 fns[s->sew - 1]); \
2281 mark_vs_dirty(s); \
2282 gen_set_label(over); \
2283 return true; \
2284 } \
2285 return false; \
2286 }
2287 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2288 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2289
2290 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2291 TCGv_env, TCGv_i32);
2292
2293 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2294 uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2295 {
2296 TCGv_ptr dest, src2, mask;
2297 TCGv_i32 desc;
2298 TCGv_i64 t1;
2299
2300 TCGLabel *over = gen_new_label();
2301 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2302
2303 dest = tcg_temp_new_ptr();
2304 mask = tcg_temp_new_ptr();
2305 src2 = tcg_temp_new_ptr();
2306 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2307 s->cfg_ptr->vlen / 8, data));
2308
2309 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
2310 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
2311 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
2312
2313 /* NaN-box f[rs1] */
2314 t1 = tcg_temp_new_i64();
2315 do_nanbox(s, t1, cpu_fpr[rs1]);
2316
2317 fn(dest, mask, t1, src2, tcg_env, desc);
2318
2319 mark_vs_dirty(s);
2320 gen_set_label(over);
2321 return true;
2322 }
2323
2324 /*
2325 * If the current SEW does not correspond to a supported IEEE floating-point
2326 * type, an illegal instruction exception is raised
2327 */
2328 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2329 {
2330 return require_rvv(s) &&
2331 require_rvf(s) &&
2332 vext_check_isa_ill(s) &&
2333 vext_check_ss(s, a->rd, a->rs2, a->vm);
2334 }
2335
2336 /* OPFVF without GVEC IR */
2337 #define GEN_OPFVF_TRANS(NAME, CHECK) \
2338 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2339 { \
2340 if (CHECK(s, a)) { \
2341 uint32_t data = 0; \
2342 static gen_helper_opfvf *const fns[3] = { \
2343 gen_helper_##NAME##_h, \
2344 gen_helper_##NAME##_w, \
2345 gen_helper_##NAME##_d, \
2346 }; \
2347 gen_set_rm(s, RISCV_FRM_DYN); \
2348 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2349 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2350 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2351 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, \
2352 s->cfg_vta_all_1s); \
2353 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2354 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2355 fns[s->sew - 1], s); \
2356 } \
2357 return false; \
2358 }
2359
2360 GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
2361 GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
2362 GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
2363
2364 /* Vector Widening Floating-Point Add/Subtract Instructions */
2365 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2366 {
2367 return require_rvv(s) &&
2368 require_scale_rvf(s) &&
2369 (s->sew != MO_8) &&
2370 vext_check_isa_ill(s) &&
2371 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2372 }
2373
2374 /* OPFVV with WIDEN */
2375 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
2376 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2377 { \
2378 if (CHECK(s, a)) { \
2379 uint32_t data = 0; \
2380 static gen_helper_gvec_4_ptr * const fns[2] = { \
2381 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2382 }; \
2383 TCGLabel *over = gen_new_label(); \
2384 gen_set_rm(s, RISCV_FRM_DYN); \
2385 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\
2386 \
2387 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2388 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2389 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2390 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2391 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2392 vreg_ofs(s, a->rs1), \
2393 vreg_ofs(s, a->rs2), tcg_env, \
2394 s->cfg_ptr->vlen / 8, \
2395 s->cfg_ptr->vlen / 8, data, \
2396 fns[s->sew - 1]); \
2397 mark_vs_dirty(s); \
2398 gen_set_label(over); \
2399 return true; \
2400 } \
2401 return false; \
2402 }
2403
2404 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2405 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2406
2407 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2408 {
2409 return require_rvv(s) &&
2410 require_scale_rvf(s) &&
2411 (s->sew != MO_8) &&
2412 vext_check_isa_ill(s) &&
2413 vext_check_ds(s, a->rd, a->rs2, a->vm);
2414 }
2415
2416 /* OPFVF with WIDEN */
2417 #define GEN_OPFVF_WIDEN_TRANS(NAME) \
2418 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2419 { \
2420 if (opfvf_widen_check(s, a)) { \
2421 uint32_t data = 0; \
2422 static gen_helper_opfvf *const fns[2] = { \
2423 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2424 }; \
2425 gen_set_rm(s, RISCV_FRM_DYN); \
2426 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2427 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2428 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2429 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2430 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2431 fns[s->sew - 1], s); \
2432 } \
2433 return false; \
2434 }
2435
2436 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2437 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2438
2439 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2440 {
2441 return require_rvv(s) &&
2442 require_scale_rvf(s) &&
2443 (s->sew != MO_8) &&
2444 vext_check_isa_ill(s) &&
2445 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2446 }
2447
2448 /* WIDEN OPFVV with WIDEN */
2449 #define GEN_OPFWV_WIDEN_TRANS(NAME) \
2450 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2451 { \
2452 if (opfwv_widen_check(s, a)) { \
2453 uint32_t data = 0; \
2454 static gen_helper_gvec_4_ptr * const fns[2] = { \
2455 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2456 }; \
2457 TCGLabel *over = gen_new_label(); \
2458 gen_set_rm(s, RISCV_FRM_DYN); \
2459 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2460 \
2461 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2462 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2463 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2464 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2465 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2466 vreg_ofs(s, a->rs1), \
2467 vreg_ofs(s, a->rs2), tcg_env, \
2468 s->cfg_ptr->vlen / 8, \
2469 s->cfg_ptr->vlen / 8, data, \
2470 fns[s->sew - 1]); \
2471 mark_vs_dirty(s); \
2472 gen_set_label(over); \
2473 return true; \
2474 } \
2475 return false; \
2476 }
2477
2478 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2479 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2480
2481 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2482 {
2483 return require_rvv(s) &&
2484 require_scale_rvf(s) &&
2485 (s->sew != MO_8) &&
2486 vext_check_isa_ill(s) &&
2487 vext_check_dd(s, a->rd, a->rs2, a->vm);
2488 }
2489
2490 /* WIDEN OPFVF with WIDEN */
2491 #define GEN_OPFWF_WIDEN_TRANS(NAME) \
2492 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2493 { \
2494 if (opfwf_widen_check(s, a)) { \
2495 uint32_t data = 0; \
2496 static gen_helper_opfvf *const fns[2] = { \
2497 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2498 }; \
2499 gen_set_rm(s, RISCV_FRM_DYN); \
2500 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2501 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2502 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2503 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2504 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2505 fns[s->sew - 1], s); \
2506 } \
2507 return false; \
2508 }
2509
2510 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2511 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2512
2513 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2514 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2515 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2516 GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
2517 GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
2518 GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
2519
2520 /* Vector Widening Floating-Point Multiply */
2521 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2522 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2523
2524 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2525 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2526 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2527 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2528 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2529 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2530 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2531 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2532 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2533 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2534 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2535 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2536 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2537 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2538 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2539 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2540 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2541
2542 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2543 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2544 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2545 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2546 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2547 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2548 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2549 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2550 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2551
2552 /* Vector Floating-Point Square-Root Instruction */
2553
2554 /*
2555 * If the current SEW does not correspond to a supported IEEE floating-point
2556 * type, an illegal instruction exception is raised
2557 */
2558 static bool opfv_check(DisasContext *s, arg_rmr *a)
2559 {
2560 return require_rvv(s) &&
2561 require_rvf(s) &&
2562 vext_check_isa_ill(s) &&
2563 /* OPFV instructions ignore vs1 check */
2564 vext_check_ss(s, a->rd, a->rs2, a->vm);
2565 }
2566
2567 static bool do_opfv(DisasContext *s, arg_rmr *a,
2568 gen_helper_gvec_3_ptr *fn,
2569 bool (*checkfn)(DisasContext *, arg_rmr *),
2570 int rm)
2571 {
2572 if (checkfn(s, a)) {
2573 uint32_t data = 0;
2574 TCGLabel *over = gen_new_label();
2575 gen_set_rm_chkfrm(s, rm);
2576 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2577
2578 data = FIELD_DP32(data, VDATA, VM, a->vm);
2579 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2580 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2581 data = FIELD_DP32(data, VDATA, VMA, s->vma);
2582 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2583 vreg_ofs(s, a->rs2), tcg_env,
2584 s->cfg_ptr->vlen / 8,
2585 s->cfg_ptr->vlen / 8, data, fn);
2586 mark_vs_dirty(s);
2587 gen_set_label(over);
2588 return true;
2589 }
2590 return false;
2591 }
2592
2593 #define GEN_OPFV_TRANS(NAME, CHECK, FRM) \
2594 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2595 { \
2596 static gen_helper_gvec_3_ptr * const fns[3] = { \
2597 gen_helper_##NAME##_h, \
2598 gen_helper_##NAME##_w, \
2599 gen_helper_##NAME##_d \
2600 }; \
2601 return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2602 }
2603
2604 GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
2605 GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
2606 GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN)
2607
2608 /* Vector Floating-Point MIN/MAX Instructions */
2609 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2610 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2611 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2612 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2613
2614 /* Vector Floating-Point Sign-Injection Instructions */
2615 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2616 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2617 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2618 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2619 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2620 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2621
2622 /* Vector Floating-Point Compare Instructions */
2623 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2624 {
2625 return require_rvv(s) &&
2626 require_rvf(s) &&
2627 vext_check_isa_ill(s) &&
2628 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2629 }
2630
2631 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2632 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2633 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2634 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2635
2636 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2637 {
2638 return require_rvv(s) &&
2639 require_rvf(s) &&
2640 vext_check_isa_ill(s) &&
2641 vext_check_ms(s, a->rd, a->rs2);
2642 }
2643
2644 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2645 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2646 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2647 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2648 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2649 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2650
2651 /* Vector Floating-Point Classify Instruction */
2652 GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
2653
2654 /* Vector Floating-Point Merge Instruction */
2655 GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
2656
2657 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2658 {
2659 if (require_rvv(s) &&
2660 require_rvf(s) &&
2661 vext_check_isa_ill(s) &&
2662 require_align(a->rd, s->lmul)) {
2663 gen_set_rm(s, RISCV_FRM_DYN);
2664
2665 TCGv_i64 t1;
2666
2667 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2668 t1 = tcg_temp_new_i64();
2669 /* NaN-box f[rs1] */
2670 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2671
2672 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2673 MAXSZ(s), MAXSZ(s), t1);
2674 mark_vs_dirty(s);
2675 } else {
2676 TCGv_ptr dest;
2677 TCGv_i32 desc;
2678 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2679 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2680 data = FIELD_DP32(data, VDATA, VMA, s->vma);
2681 static gen_helper_vmv_vx * const fns[3] = {
2682 gen_helper_vmv_v_x_h,
2683 gen_helper_vmv_v_x_w,
2684 gen_helper_vmv_v_x_d,
2685 };
2686 TCGLabel *over = gen_new_label();
2687 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2688
2689 t1 = tcg_temp_new_i64();
2690 /* NaN-box f[rs1] */
2691 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2692
2693 dest = tcg_temp_new_ptr();
2694 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2695 s->cfg_ptr->vlen / 8, data));
2696 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2697
2698 fns[s->sew - 1](dest, t1, tcg_env, desc);
2699
2700 mark_vs_dirty(s);
2701 gen_set_label(over);
2702 }
2703 return true;
2704 }
2705 return false;
2706 }
2707
2708 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2709 #define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM) \
2710 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2711 { \
2712 static gen_helper_gvec_3_ptr * const fns[3] = { \
2713 gen_helper_##HELPER##_h, \
2714 gen_helper_##HELPER##_w, \
2715 gen_helper_##HELPER##_d \
2716 }; \
2717 return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2718 }
2719
2720 GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
2721 GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
2722 GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
2723 GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
2724 /* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
2725 GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
2726 GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
2727
2728 /* Widening Floating-Point/Integer Type-Convert Instructions */
2729
2730 /*
2731 * If the current SEW does not correspond to a supported IEEE floating-point
2732 * type, an illegal instruction exception is raised
2733 */
2734 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2735 {
2736 return require_rvv(s) &&
2737 vext_check_isa_ill(s) &&
2738 vext_check_ds(s, a->rd, a->rs2, a->vm);
2739 }
2740
2741 static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
2742 {
2743 return opfv_widen_check(s, a) &&
2744 require_rvf(s);
2745 }
2746
2747 static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
2748 {
2749 return opfv_widen_check(s, a) &&
2750 require_scale_rvfmin(s) &&
2751 (s->sew != MO_8);
2752 }
2753
2754 #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
2755 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2756 { \
2757 if (CHECK(s, a)) { \
2758 uint32_t data = 0; \
2759 static gen_helper_gvec_3_ptr * const fns[2] = { \
2760 gen_helper_##HELPER##_h, \
2761 gen_helper_##HELPER##_w, \
2762 }; \
2763 TCGLabel *over = gen_new_label(); \
2764 gen_set_rm_chkfrm(s, FRM); \
2765 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2766 \
2767 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2768 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2769 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2770 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2771 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2772 vreg_ofs(s, a->rs2), tcg_env, \
2773 s->cfg_ptr->vlen / 8, \
2774 s->cfg_ptr->vlen / 8, data, \
2775 fns[s->sew - 1]); \
2776 mark_vs_dirty(s); \
2777 gen_set_label(over); \
2778 return true; \
2779 } \
2780 return false; \
2781 }
2782
2783 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2784 RISCV_FRM_DYN)
2785 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2786 RISCV_FRM_DYN)
2787 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
2788 RISCV_FRM_DYN)
2789 /* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
2790 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2791 RISCV_FRM_RTZ)
2792 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2793 RISCV_FRM_RTZ)
2794
2795 static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2796 {
2797 return require_rvv(s) &&
2798 require_scale_rvf(s) &&
2799 vext_check_isa_ill(s) &&
2800 /* OPFV widening instructions ignore vs1 check */
2801 vext_check_ds(s, a->rd, a->rs2, a->vm);
2802 }
2803
2804 #define GEN_OPFXV_WIDEN_TRANS(NAME) \
2805 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2806 { \
2807 if (opfxv_widen_check(s, a)) { \
2808 uint32_t data = 0; \
2809 static gen_helper_gvec_3_ptr * const fns[3] = { \
2810 gen_helper_##NAME##_b, \
2811 gen_helper_##NAME##_h, \
2812 gen_helper_##NAME##_w, \
2813 }; \
2814 TCGLabel *over = gen_new_label(); \
2815 gen_set_rm(s, RISCV_FRM_DYN); \
2816 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2817 \
2818 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2819 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2820 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2821 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2822 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2823 vreg_ofs(s, a->rs2), tcg_env, \
2824 s->cfg_ptr->vlen / 8, \
2825 s->cfg_ptr->vlen / 8, data, \
2826 fns[s->sew]); \
2827 mark_vs_dirty(s); \
2828 gen_set_label(over); \
2829 return true; \
2830 } \
2831 return false; \
2832 }
2833
2834 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
2835 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
2836
2837 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2838
2839 /*
2840 * If the current SEW does not correspond to a supported IEEE floating-point
2841 * type, an illegal instruction exception is raised
2842 */
2843 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2844 {
2845 return require_rvv(s) &&
2846 vext_check_isa_ill(s) &&
2847 /* OPFV narrowing instructions ignore vs1 check */
2848 vext_check_sd(s, a->rd, a->rs2, a->vm);
2849 }
2850
2851 static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
2852 {
2853 return opfv_narrow_check(s, a) &&
2854 require_rvf(s) &&
2855 (s->sew != MO_64);
2856 }
2857
2858 static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
2859 {
2860 return opfv_narrow_check(s, a) &&
2861 require_scale_rvfmin(s) &&
2862 (s->sew != MO_8);
2863 }
2864
2865 static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
2866 {
2867 return opfv_narrow_check(s, a) &&
2868 require_scale_rvf(s) &&
2869 (s->sew != MO_8);
2870 }
2871
2872 #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
2873 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2874 { \
2875 if (CHECK(s, a)) { \
2876 uint32_t data = 0; \
2877 static gen_helper_gvec_3_ptr * const fns[2] = { \
2878 gen_helper_##HELPER##_h, \
2879 gen_helper_##HELPER##_w, \
2880 }; \
2881 TCGLabel *over = gen_new_label(); \
2882 gen_set_rm_chkfrm(s, FRM); \
2883 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2884 \
2885 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2886 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2887 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2888 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2889 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2890 vreg_ofs(s, a->rs2), tcg_env, \
2891 s->cfg_ptr->vlen / 8, \
2892 s->cfg_ptr->vlen / 8, data, \
2893 fns[s->sew - 1]); \
2894 mark_vs_dirty(s); \
2895 gen_set_label(over); \
2896 return true; \
2897 } \
2898 return false; \
2899 }
2900
2901 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
2902 RISCV_FRM_DYN)
2903 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
2904 RISCV_FRM_DYN)
2905 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
2906 RISCV_FRM_DYN)
2907 /* Reuse the helper function from vfncvt.f.f.w */
2908 GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
2909 RISCV_FRM_ROD)
2910
2911 static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2912 {
2913 return require_rvv(s) &&
2914 require_scale_rvf(s) &&
2915 vext_check_isa_ill(s) &&
2916 /* OPFV narrowing instructions ignore vs1 check */
2917 vext_check_sd(s, a->rd, a->rs2, a->vm);
2918 }
2919
2920 #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
2921 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2922 { \
2923 if (opxfv_narrow_check(s, a)) { \
2924 uint32_t data = 0; \
2925 static gen_helper_gvec_3_ptr * const fns[3] = { \
2926 gen_helper_##HELPER##_b, \
2927 gen_helper_##HELPER##_h, \
2928 gen_helper_##HELPER##_w, \
2929 }; \
2930 TCGLabel *over = gen_new_label(); \
2931 gen_set_rm_chkfrm(s, FRM); \
2932 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2933 \
2934 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2935 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2936 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2937 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2938 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2939 vreg_ofs(s, a->rs2), tcg_env, \
2940 s->cfg_ptr->vlen / 8, \
2941 s->cfg_ptr->vlen / 8, data, \
2942 fns[s->sew]); \
2943 mark_vs_dirty(s); \
2944 gen_set_label(over); \
2945 return true; \
2946 } \
2947 return false; \
2948 }
2949
2950 GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
2951 GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
2952 /* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
2953 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
2954 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
2955
2956 /*
2957 *** Vector Reduction Operations
2958 */
2959 /* Vector Single-Width Integer Reduction Instructions */
2960 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2961 {
2962 return require_rvv(s) &&
2963 vext_check_isa_ill(s) &&
2964 vext_check_reduction(s, a->rs2);
2965 }
2966
2967 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2968 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2969 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2970 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2971 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2972 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2973 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2974 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2975
2976 /* Vector Widening Integer Reduction Instructions */
2977 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2978 {
2979 return reduction_check(s, a) && (s->sew < MO_64) &&
2980 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4));
2981 }
2982
2983 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2984 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2985
2986 /* Vector Single-Width Floating-Point Reduction Instructions */
2987 static bool freduction_check(DisasContext *s, arg_rmrr *a)
2988 {
2989 return reduction_check(s, a) &&
2990 require_rvf(s);
2991 }
2992
2993 GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
2994 GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
2995 GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
2996 GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
2997
2998 /* Vector Widening Floating-Point Reduction Instructions */
2999 static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
3000 {
3001 return reduction_widen_check(s, a) &&
3002 require_scale_rvf(s) &&
3003 (s->sew != MO_8);
3004 }
3005
3006 GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
3007 GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
3008
3009 /*
3010 *** Vector Mask Operations
3011 */
3012
3013 /* Vector Mask-Register Logical Instructions */
3014 #define GEN_MM_TRANS(NAME) \
3015 static bool trans_##NAME(DisasContext *s, arg_r *a) \
3016 { \
3017 if (require_rvv(s) && \
3018 vext_check_isa_ill(s)) { \
3019 uint32_t data = 0; \
3020 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
3021 TCGLabel *over = gen_new_label(); \
3022 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
3023 \
3024 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
3025 data = \
3026 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3027 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
3028 vreg_ofs(s, a->rs1), \
3029 vreg_ofs(s, a->rs2), tcg_env, \
3030 s->cfg_ptr->vlen / 8, \
3031 s->cfg_ptr->vlen / 8, data, fn); \
3032 mark_vs_dirty(s); \
3033 gen_set_label(over); \
3034 return true; \
3035 } \
3036 return false; \
3037 }
3038
3039 GEN_MM_TRANS(vmand_mm)
3040 GEN_MM_TRANS(vmnand_mm)
3041 GEN_MM_TRANS(vmandn_mm)
3042 GEN_MM_TRANS(vmxor_mm)
3043 GEN_MM_TRANS(vmor_mm)
3044 GEN_MM_TRANS(vmnor_mm)
3045 GEN_MM_TRANS(vmorn_mm)
3046 GEN_MM_TRANS(vmxnor_mm)
3047
3048 /* Vector count population in mask vcpop */
3049 static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
3050 {
3051 if (require_rvv(s) &&
3052 vext_check_isa_ill(s) &&
3053 s->vstart_eq_zero) {
3054 TCGv_ptr src2, mask;
3055 TCGv dst;
3056 TCGv_i32 desc;
3057 uint32_t data = 0;
3058 data = FIELD_DP32(data, VDATA, VM, a->vm);
3059 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3060
3061 mask = tcg_temp_new_ptr();
3062 src2 = tcg_temp_new_ptr();
3063 dst = dest_gpr(s, a->rd);
3064 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
3065 s->cfg_ptr->vlen / 8, data));
3066
3067 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3068 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
3069
3070 gen_helper_vcpop_m(dst, mask, src2, tcg_env, desc);
3071 gen_set_gpr(s, a->rd, dst);
3072 return true;
3073 }
3074 return false;
3075 }
3076
3077 /* vmfirst find-first-set mask bit */
3078 static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
3079 {
3080 if (require_rvv(s) &&
3081 vext_check_isa_ill(s) &&
3082 s->vstart_eq_zero) {
3083 TCGv_ptr src2, mask;
3084 TCGv dst;
3085 TCGv_i32 desc;
3086 uint32_t data = 0;
3087 data = FIELD_DP32(data, VDATA, VM, a->vm);
3088 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3089
3090 mask = tcg_temp_new_ptr();
3091 src2 = tcg_temp_new_ptr();
3092 dst = dest_gpr(s, a->rd);
3093 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
3094 s->cfg_ptr->vlen / 8, data));
3095
3096 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3097 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
3098
3099 gen_helper_vfirst_m(dst, mask, src2, tcg_env, desc);
3100 gen_set_gpr(s, a->rd, dst);
3101 return true;
3102 }
3103 return false;
3104 }
3105
3106 /*
3107 * vmsbf.m set-before-first mask bit
3108 * vmsif.m set-including-first mask bit
3109 * vmsof.m set-only-first mask bit
3110 */
3111 #define GEN_M_TRANS(NAME) \
3112 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3113 { \
3114 if (require_rvv(s) && \
3115 vext_check_isa_ill(s) && \
3116 require_vm(a->vm, a->rd) && \
3117 (a->rd != a->rs2) && \
3118 s->vstart_eq_zero) { \
3119 uint32_t data = 0; \
3120 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
3121 TCGLabel *over = gen_new_label(); \
3122 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
3123 \
3124 data = FIELD_DP32(data, VDATA, VM, a->vm); \
3125 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
3126 data = \
3127 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3128 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
3129 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
3130 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
3131 tcg_env, s->cfg_ptr->vlen / 8, \
3132 s->cfg_ptr->vlen / 8, \
3133 data, fn); \
3134 mark_vs_dirty(s); \
3135 gen_set_label(over); \
3136 return true; \
3137 } \
3138 return false; \
3139 }
3140
3141 GEN_M_TRANS(vmsbf_m)
3142 GEN_M_TRANS(vmsif_m)
3143 GEN_M_TRANS(vmsof_m)
3144
3145 /*
3146 * Vector Iota Instruction
3147 *
3148 * 1. The destination register cannot overlap the source register.
3149 * 2. If masked, cannot overlap the mask register ('v0').
3150 * 3. An illegal instruction exception is raised if vstart is non-zero.
3151 */
3152 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
3153 {
3154 if (require_rvv(s) &&
3155 vext_check_isa_ill(s) &&
3156 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
3157 require_vm(a->vm, a->rd) &&
3158 require_align(a->rd, s->lmul) &&
3159 s->vstart_eq_zero) {
3160 uint32_t data = 0;
3161 TCGLabel *over = gen_new_label();
3162 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3163
3164 data = FIELD_DP32(data, VDATA, VM, a->vm);
3165 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3166 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3167 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3168 static gen_helper_gvec_3_ptr * const fns[4] = {
3169 gen_helper_viota_m_b, gen_helper_viota_m_h,
3170 gen_helper_viota_m_w, gen_helper_viota_m_d,
3171 };
3172 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3173 vreg_ofs(s, a->rs2), tcg_env,
3174 s->cfg_ptr->vlen / 8,
3175 s->cfg_ptr->vlen / 8, data, fns[s->sew]);
3176 mark_vs_dirty(s);
3177 gen_set_label(over);
3178 return true;
3179 }
3180 return false;
3181 }
3182
3183 /* Vector Element Index Instruction */
3184 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
3185 {
3186 if (require_rvv(s) &&
3187 vext_check_isa_ill(s) &&
3188 require_align(a->rd, s->lmul) &&
3189 require_vm(a->vm, a->rd)) {
3190 uint32_t data = 0;
3191 TCGLabel *over = gen_new_label();
3192 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3193
3194 data = FIELD_DP32(data, VDATA, VM, a->vm);
3195 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3196 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3197 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3198 static gen_helper_gvec_2_ptr * const fns[4] = {
3199 gen_helper_vid_v_b, gen_helper_vid_v_h,
3200 gen_helper_vid_v_w, gen_helper_vid_v_d,
3201 };
3202 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3203 tcg_env, s->cfg_ptr->vlen / 8,
3204 s->cfg_ptr->vlen / 8,
3205 data, fns[s->sew]);
3206 mark_vs_dirty(s);
3207 gen_set_label(over);
3208 return true;
3209 }
3210 return false;
3211 }
3212
3213 /*
3214 *** Vector Permutation Instructions
3215 */
3216
3217 static void load_element(TCGv_i64 dest, TCGv_ptr base,
3218 int ofs, int sew, bool sign)
3219 {
3220 switch (sew) {
3221 case MO_8:
3222 if (!sign) {
3223 tcg_gen_ld8u_i64(dest, base, ofs);
3224 } else {
3225 tcg_gen_ld8s_i64(dest, base, ofs);
3226 }
3227 break;
3228 case MO_16:
3229 if (!sign) {
3230 tcg_gen_ld16u_i64(dest, base, ofs);
3231 } else {
3232 tcg_gen_ld16s_i64(dest, base, ofs);
3233 }
3234 break;
3235 case MO_32:
3236 if (!sign) {
3237 tcg_gen_ld32u_i64(dest, base, ofs);
3238 } else {
3239 tcg_gen_ld32s_i64(dest, base, ofs);
3240 }
3241 break;
3242 case MO_64:
3243 tcg_gen_ld_i64(dest, base, ofs);
3244 break;
3245 default:
3246 g_assert_not_reached();
3247 break;
3248 }
3249 }
3250
3251 /* offset of the idx element with base register r */
3252 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
3253 {
3254 #if HOST_BIG_ENDIAN
3255 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
3256 #else
3257 return vreg_ofs(s, r) + (idx << s->sew);
3258 #endif
3259 }
3260
3261 /* adjust the index according to the endian */
3262 static void endian_adjust(TCGv_i32 ofs, int sew)
3263 {
3264 #if HOST_BIG_ENDIAN
3265 tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
3266 #endif
3267 }
3268
3269 /* Load idx >= VLMAX ? 0 : vreg[idx] */
3270 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
3271 int vreg, TCGv idx, int vlmax)
3272 {
3273 TCGv_i32 ofs = tcg_temp_new_i32();
3274 TCGv_ptr base = tcg_temp_new_ptr();
3275 TCGv_i64 t_idx = tcg_temp_new_i64();
3276 TCGv_i64 t_vlmax, t_zero;
3277
3278 /*
3279 * Mask the index to the length so that we do
3280 * not produce an out-of-range load.
3281 */
3282 tcg_gen_trunc_tl_i32(ofs, idx);
3283 tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
3284
3285 /* Convert the index to an offset. */
3286 endian_adjust(ofs, s->sew);
3287 tcg_gen_shli_i32(ofs, ofs, s->sew);
3288
3289 /* Convert the index to a pointer. */
3290 tcg_gen_ext_i32_ptr(base, ofs);
3291 tcg_gen_add_ptr(base, base, tcg_env);
3292
3293 /* Perform the load. */
3294 load_element(dest, base,
3295 vreg_ofs(s, vreg), s->sew, false);
3296
3297 /* Flush out-of-range indexing to zero. */
3298 t_vlmax = tcg_constant_i64(vlmax);
3299 t_zero = tcg_constant_i64(0);
3300 tcg_gen_extu_tl_i64(t_idx, idx);
3301
3302 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
3303 t_vlmax, dest, t_zero);
3304 }
3305
3306 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
3307 int vreg, int idx, bool sign)
3308 {
3309 load_element(dest, tcg_env, endian_ofs(s, vreg, idx), s->sew, sign);
3310 }
3311
3312 /* Integer Scalar Move Instruction */
3313
3314 static void store_element(TCGv_i64 val, TCGv_ptr base,
3315 int ofs, int sew)
3316 {
3317 switch (sew) {
3318 case MO_8:
3319 tcg_gen_st8_i64(val, base, ofs);
3320 break;
3321 case MO_16:
3322 tcg_gen_st16_i64(val, base, ofs);
3323 break;
3324 case MO_32:
3325 tcg_gen_st32_i64(val, base, ofs);
3326 break;
3327 case MO_64:
3328 tcg_gen_st_i64(val, base, ofs);
3329 break;
3330 default:
3331 g_assert_not_reached();
3332 break;
3333 }
3334 }
3335
3336 /*
3337 * Store vreg[idx] = val.
3338 * The index must be in range of VLMAX.
3339 */
3340 static void vec_element_storei(DisasContext *s, int vreg,
3341 int idx, TCGv_i64 val)
3342 {
3343 store_element(val, tcg_env, endian_ofs(s, vreg, idx), s->sew);
3344 }
3345
3346 /* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3347 static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3348 {
3349 if (require_rvv(s) &&
3350 vext_check_isa_ill(s)) {
3351 TCGv_i64 t1;
3352 TCGv dest;
3353
3354 t1 = tcg_temp_new_i64();
3355 dest = tcg_temp_new();
3356 /*
3357 * load vreg and sign-extend to 64 bits,
3358 * then truncate to XLEN bits before storing to gpr.
3359 */
3360 vec_element_loadi(s, t1, a->rs2, 0, true);
3361 tcg_gen_trunc_i64_tl(dest, t1);
3362 gen_set_gpr(s, a->rd, dest);
3363 return true;
3364 }
3365 return false;
3366 }
3367
3368 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
3369 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3370 {
3371 if (require_rvv(s) &&
3372 vext_check_isa_ill(s)) {
3373 /* This instruction ignores LMUL and vector register groups */
3374 TCGv_i64 t1;
3375 TCGv s1;
3376 TCGLabel *over = gen_new_label();
3377
3378 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3379
3380 t1 = tcg_temp_new_i64();
3381
3382 /*
3383 * load gpr and sign-extend to 64 bits,
3384 * then truncate to SEW bits when storing to vreg.
3385 */
3386 s1 = get_gpr(s, a->rs1, EXT_NONE);
3387 tcg_gen_ext_tl_i64(t1, s1);
3388 vec_element_storei(s, a->rd, 0, t1);
3389 mark_vs_dirty(s);
3390 gen_set_label(over);
3391 return true;
3392 }
3393 return false;
3394 }
3395
3396 /* Floating-Point Scalar Move Instructions */
3397 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3398 {
3399 if (require_rvv(s) &&
3400 require_rvf(s) &&
3401 vext_check_isa_ill(s)) {
3402 gen_set_rm(s, RISCV_FRM_DYN);
3403
3404 unsigned int ofs = (8 << s->sew);
3405 unsigned int len = 64 - ofs;
3406 TCGv_i64 t_nan;
3407
3408 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3409 /* NaN-box f[rd] as necessary for SEW */
3410 if (len) {
3411 t_nan = tcg_constant_i64(UINT64_MAX);
3412 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3413 t_nan, ofs, len);
3414 }
3415
3416 mark_fs_dirty(s);
3417 return true;
3418 }
3419 return false;
3420 }
3421
3422 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3423 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3424 {
3425 if (require_rvv(s) &&
3426 require_rvf(s) &&
3427 vext_check_isa_ill(s)) {
3428 gen_set_rm(s, RISCV_FRM_DYN);
3429
3430 /* The instructions ignore LMUL and vector register group. */
3431 TCGv_i64 t1;
3432 TCGLabel *over = gen_new_label();
3433
3434 /* if vstart >= vl, skip vector register write back */
3435 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3436
3437 /* NaN-box f[rs1] */
3438 t1 = tcg_temp_new_i64();
3439 do_nanbox(s, t1, cpu_fpr[a->rs1]);
3440
3441 vec_element_storei(s, a->rd, 0, t1);
3442 mark_vs_dirty(s);
3443 gen_set_label(over);
3444 return true;
3445 }
3446 return false;
3447 }
3448
3449 /* Vector Slide Instructions */
3450 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3451 {
3452 return require_rvv(s) &&
3453 vext_check_isa_ill(s) &&
3454 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3455 }
3456
3457 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3458 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3459 GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3460
3461 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3462 {
3463 return require_rvv(s) &&
3464 vext_check_isa_ill(s) &&
3465 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3466 }
3467
3468 GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3469 GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3470 GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3471
3472 /* Vector Floating-Point Slide Instructions */
3473 static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3474 {
3475 return slideup_check(s, a) &&
3476 require_rvf(s);
3477 }
3478
3479 static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3480 {
3481 return slidedown_check(s, a) &&
3482 require_rvf(s);
3483 }
3484
3485 GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3486 GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3487
3488 /* Vector Register Gather Instruction */
3489 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3490 {
3491 return require_rvv(s) &&
3492 vext_check_isa_ill(s) &&
3493 require_align(a->rd, s->lmul) &&
3494 require_align(a->rs1, s->lmul) &&
3495 require_align(a->rs2, s->lmul) &&
3496 (a->rd != a->rs2 && a->rd != a->rs1) &&
3497 require_vm(a->vm, a->rd);
3498 }
3499
3500 static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3501 {
3502 int8_t emul = MO_16 - s->sew + s->lmul;
3503 return require_rvv(s) &&
3504 vext_check_isa_ill(s) &&
3505 (emul >= -3 && emul <= 3) &&
3506 require_align(a->rd, s->lmul) &&
3507 require_align(a->rs1, emul) &&
3508 require_align(a->rs2, s->lmul) &&
3509 (a->rd != a->rs2 && a->rd != a->rs1) &&
3510 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3511 a->rs1, 1 << MAX(emul, 0)) &&
3512 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3513 a->rs2, 1 << MAX(s->lmul, 0)) &&
3514 require_vm(a->vm, a->rd);
3515 }
3516
3517 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3518 GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3519
3520 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3521 {
3522 return require_rvv(s) &&
3523 vext_check_isa_ill(s) &&
3524 require_align(a->rd, s->lmul) &&
3525 require_align(a->rs2, s->lmul) &&
3526 (a->rd != a->rs2) &&
3527 require_vm(a->vm, a->rd);
3528 }
3529
3530 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3531 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3532 {
3533 if (!vrgather_vx_check(s, a)) {
3534 return false;
3535 }
3536
3537 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3538 int scale = s->lmul - (s->sew + 3);
3539 int vlmax = s->cfg_ptr->vlen >> -scale;
3540 TCGv_i64 dest = tcg_temp_new_i64();
3541
3542 if (a->rs1 == 0) {
3543 vec_element_loadi(s, dest, a->rs2, 0, false);
3544 } else {
3545 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3546 }
3547
3548 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3549 MAXSZ(s), MAXSZ(s), dest);
3550 mark_vs_dirty(s);
3551 } else {
3552 static gen_helper_opivx * const fns[4] = {
3553 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3554 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3555 };
3556 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3557 }
3558 return true;
3559 }
3560
3561 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3562 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3563 {
3564 if (!vrgather_vx_check(s, a)) {
3565 return false;
3566 }
3567
3568 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3569 int scale = s->lmul - (s->sew + 3);
3570 int vlmax = s->cfg_ptr->vlen >> -scale;
3571 if (a->rs1 >= vlmax) {
3572 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3573 MAXSZ(s), MAXSZ(s), 0);
3574 } else {
3575 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3576 endian_ofs(s, a->rs2, a->rs1),
3577 MAXSZ(s), MAXSZ(s));
3578 }
3579 mark_vs_dirty(s);
3580 } else {
3581 static gen_helper_opivx * const fns[4] = {
3582 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3583 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3584 };
3585 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3586 s, IMM_ZX);
3587 }
3588 return true;
3589 }
3590
3591 /*
3592 * Vector Compress Instruction
3593 *
3594 * The destination vector register group cannot overlap the
3595 * source vector register group or the source mask register.
3596 */
3597 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3598 {
3599 return require_rvv(s) &&
3600 vext_check_isa_ill(s) &&
3601 require_align(a->rd, s->lmul) &&
3602 require_align(a->rs2, s->lmul) &&
3603 (a->rd != a->rs2) &&
3604 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
3605 s->vstart_eq_zero;
3606 }
3607
3608 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3609 {
3610 if (vcompress_vm_check(s, a)) {
3611 uint32_t data = 0;
3612 static gen_helper_gvec_4_ptr * const fns[4] = {
3613 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3614 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3615 };
3616 TCGLabel *over = gen_new_label();
3617 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3618
3619 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3620 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3621 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3622 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3623 tcg_env, s->cfg_ptr->vlen / 8,
3624 s->cfg_ptr->vlen / 8, data,
3625 fns[s->sew]);
3626 mark_vs_dirty(s);
3627 gen_set_label(over);
3628 return true;
3629 }
3630 return false;
3631 }
3632
3633 /*
3634 * Whole Vector Register Move Instructions depend on vtype register(vsew).
3635 * Thus, we need to check vill bit. (Section 16.6)
3636 */
3637 #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
3638 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
3639 { \
3640 if (require_rvv(s) && \
3641 vext_check_isa_ill(s) && \
3642 QEMU_IS_ALIGNED(a->rd, LEN) && \
3643 QEMU_IS_ALIGNED(a->rs2, LEN)) { \
3644 uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \
3645 if (s->vstart_eq_zero) { \
3646 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \
3647 vreg_ofs(s, a->rs2), maxsz, maxsz); \
3648 mark_vs_dirty(s); \
3649 } else { \
3650 TCGLabel *over = gen_new_label(); \
3651 tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \
3652 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
3653 tcg_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
3654 mark_vs_dirty(s); \
3655 gen_set_label(over); \
3656 } \
3657 return true; \
3658 } \
3659 return false; \
3660 }
3661
3662 GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3663 GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3664 GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3665 GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3666
3667 static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3668 {
3669 uint8_t from = (s->sew + 3) - div;
3670 bool ret = require_rvv(s) &&
3671 (from >= 3 && from <= 8) &&
3672 (a->rd != a->rs2) &&
3673 require_align(a->rd, s->lmul) &&
3674 require_align(a->rs2, s->lmul - div) &&
3675 require_vm(a->vm, a->rd) &&
3676 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3677 return ret;
3678 }
3679
3680 static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3681 {
3682 uint32_t data = 0;
3683 gen_helper_gvec_3_ptr *fn;
3684 TCGLabel *over = gen_new_label();
3685 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3686
3687 static gen_helper_gvec_3_ptr * const fns[6][4] = {
3688 {
3689 NULL, gen_helper_vzext_vf2_h,
3690 gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3691 },
3692 {
3693 NULL, NULL,
3694 gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3695 },
3696 {
3697 NULL, NULL,
3698 NULL, gen_helper_vzext_vf8_d
3699 },
3700 {
3701 NULL, gen_helper_vsext_vf2_h,
3702 gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3703 },
3704 {
3705 NULL, NULL,
3706 gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3707 },
3708 {
3709 NULL, NULL,
3710 NULL, gen_helper_vsext_vf8_d
3711 }
3712 };
3713
3714 fn = fns[seq][s->sew];
3715 if (fn == NULL) {
3716 return false;
3717 }
3718
3719 data = FIELD_DP32(data, VDATA, VM, a->vm);
3720 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3721 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3722 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3723
3724 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3725 vreg_ofs(s, a->rs2), tcg_env,
3726 s->cfg_ptr->vlen / 8,
3727 s->cfg_ptr->vlen / 8, data, fn);
3728
3729 mark_vs_dirty(s);
3730 gen_set_label(over);
3731 return true;
3732 }
3733
3734 /* Vector Integer Extension */
3735 #define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
3736 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3737 { \
3738 if (int_ext_check(s, a, DIV)) { \
3739 return int_ext_op(s, a, SEQ); \
3740 } \
3741 return false; \
3742 }
3743
3744 GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3745 GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3746 GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3747 GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3748 GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3749 GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)