]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/insn_trans/trans_rvv.c.inc
target/riscv: Change gen_set_pc_imm to gen_update_pc
[mirror_qemu.git] / target / riscv / insn_trans / trans_rvv.c.inc
1 /*
2 *
3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2 or later, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17 #include "tcg/tcg-op-gvec.h"
18 #include "tcg/tcg-gvec-desc.h"
19 #include "internals.h"
20
21 static inline bool is_overlapped(const int8_t astart, int8_t asize,
22 const int8_t bstart, int8_t bsize)
23 {
24 const int8_t aend = astart + asize;
25 const int8_t bend = bstart + bsize;
26
27 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
28 }
29
30 static bool require_rvv(DisasContext *s)
31 {
32 return s->mstatus_vs != EXT_STATUS_DISABLED;
33 }
34
35 static bool require_rvf(DisasContext *s)
36 {
37 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
38 return false;
39 }
40
41 switch (s->sew) {
42 case MO_16:
43 return s->cfg_ptr->ext_zvfh;
44 case MO_32:
45 return s->cfg_ptr->ext_zve32f;
46 case MO_64:
47 return s->cfg_ptr->ext_zve64d;
48 default:
49 return false;
50 }
51 }
52
53 static bool require_scale_rvf(DisasContext *s)
54 {
55 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
56 return false;
57 }
58
59 switch (s->sew) {
60 case MO_8:
61 return s->cfg_ptr->ext_zvfh;
62 case MO_16:
63 return s->cfg_ptr->ext_zve32f;
64 case MO_32:
65 return s->cfg_ptr->ext_zve64d;
66 default:
67 return false;
68 }
69 }
70
71 static bool require_scale_rvfmin(DisasContext *s)
72 {
73 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
74 return false;
75 }
76
77 switch (s->sew) {
78 case MO_8:
79 return s->cfg_ptr->ext_zvfhmin;
80 case MO_16:
81 return s->cfg_ptr->ext_zve32f;
82 case MO_32:
83 return s->cfg_ptr->ext_zve64d;
84 default:
85 return false;
86 }
87 }
88
89 /* Destination vector register group cannot overlap source mask register. */
90 static bool require_vm(int vm, int vd)
91 {
92 return (vm != 0 || vd != 0);
93 }
94
95 static bool require_nf(int vd, int nf, int lmul)
96 {
97 int size = nf << MAX(lmul, 0);
98 return size <= 8 && vd + size <= 32;
99 }
100
101 /*
102 * Vector register should aligned with the passed-in LMUL (EMUL).
103 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
104 */
105 static bool require_align(const int8_t val, const int8_t lmul)
106 {
107 return lmul <= 0 || extract32(val, 0, lmul) == 0;
108 }
109
110 /*
111 * A destination vector register group can overlap a source vector
112 * register group only if one of the following holds:
113 * 1. The destination EEW equals the source EEW.
114 * 2. The destination EEW is smaller than the source EEW and the overlap
115 * is in the lowest-numbered part of the source register group.
116 * 3. The destination EEW is greater than the source EEW, the source EMUL
117 * is at least 1, and the overlap is in the highest-numbered part of
118 * the destination register group.
119 * (Section 5.2)
120 *
121 * This function returns true if one of the following holds:
122 * * Destination vector register group does not overlap a source vector
123 * register group.
124 * * Rule 3 met.
125 * For rule 1, overlap is allowed so this function doesn't need to be called.
126 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
127 * calling this function.
128 */
129 static bool require_noover(const int8_t dst, const int8_t dst_lmul,
130 const int8_t src, const int8_t src_lmul)
131 {
132 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
133 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
134
135 /* Destination EEW is greater than the source EEW, check rule 3. */
136 if (dst_size > src_size) {
137 if (dst < src &&
138 src_lmul >= 0 &&
139 is_overlapped(dst, dst_size, src, src_size) &&
140 !is_overlapped(dst, dst_size, src + src_size, src_size)) {
141 return true;
142 }
143 }
144
145 return !is_overlapped(dst, dst_size, src, src_size);
146 }
147
148 static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
149 {
150 TCGv s1, dst;
151
152 if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
153 return false;
154 }
155
156 dst = dest_gpr(s, rd);
157
158 if (rd == 0 && rs1 == 0) {
159 s1 = tcg_temp_new();
160 tcg_gen_mov_tl(s1, cpu_vl);
161 } else if (rs1 == 0) {
162 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
163 s1 = tcg_constant_tl(RV_VLEN_MAX);
164 } else {
165 s1 = get_gpr(s, rs1, EXT_ZERO);
166 }
167
168 gen_helper_vsetvl(dst, cpu_env, s1, s2);
169 gen_set_gpr(s, rd, dst);
170 mark_vs_dirty(s);
171
172 gen_update_pc(s, s->cur_insn_len);
173 lookup_and_goto_ptr(s);
174 s->base.is_jmp = DISAS_NORETURN;
175 return true;
176 }
177
178 static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
179 {
180 TCGv dst;
181
182 if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
183 return false;
184 }
185
186 dst = dest_gpr(s, rd);
187
188 gen_helper_vsetvl(dst, cpu_env, s1, s2);
189 gen_set_gpr(s, rd, dst);
190 mark_vs_dirty(s);
191 gen_update_pc(s, s->cur_insn_len);
192 lookup_and_goto_ptr(s);
193 s->base.is_jmp = DISAS_NORETURN;
194
195 return true;
196 }
197
198 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
199 {
200 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
201 return do_vsetvl(s, a->rd, a->rs1, s2);
202 }
203
204 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
205 {
206 TCGv s2 = tcg_constant_tl(a->zimm);
207 return do_vsetvl(s, a->rd, a->rs1, s2);
208 }
209
210 static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
211 {
212 TCGv s1 = tcg_constant_tl(a->rs1);
213 TCGv s2 = tcg_constant_tl(a->zimm);
214 return do_vsetivli(s, a->rd, s1, s2);
215 }
216
217 /* vector register offset from env */
218 static uint32_t vreg_ofs(DisasContext *s, int reg)
219 {
220 return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8;
221 }
222
223 /* check functions */
224
225 /*
226 * Vector unit-stride, strided, unit-stride segment, strided segment
227 * store check function.
228 *
229 * Rules to be checked here:
230 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
231 * 2. Destination vector register number is multiples of EMUL.
232 * (Section 3.4.2, 7.3)
233 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
234 * 4. Vector register numbers accessed by the segment load or store
235 * cannot increment past 31. (Section 7.8)
236 */
237 static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
238 {
239 int8_t emul = eew - s->sew + s->lmul;
240 return (emul >= -3 && emul <= 3) &&
241 require_align(vd, emul) &&
242 require_nf(vd, nf, emul);
243 }
244
245 /*
246 * Vector unit-stride, strided, unit-stride segment, strided segment
247 * load check function.
248 *
249 * Rules to be checked here:
250 * 1. All rules applies to store instructions are applies
251 * to load instructions.
252 * 2. Destination vector register group for a masked vector
253 * instruction cannot overlap the source mask register (v0).
254 * (Section 5.3)
255 */
256 static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
257 uint8_t eew)
258 {
259 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
260 }
261
262 /*
263 * Vector indexed, indexed segment store check function.
264 *
265 * Rules to be checked here:
266 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
267 * 2. Index vector register number is multiples of EMUL.
268 * (Section 3.4.2, 7.3)
269 * 3. Destination vector register number is multiples of LMUL.
270 * (Section 3.4.2, 7.3)
271 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
272 * 5. Vector register numbers accessed by the segment load or store
273 * cannot increment past 31. (Section 7.8)
274 */
275 static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
276 uint8_t eew)
277 {
278 int8_t emul = eew - s->sew + s->lmul;
279 bool ret = (emul >= -3 && emul <= 3) &&
280 require_align(vs2, emul) &&
281 require_align(vd, s->lmul) &&
282 require_nf(vd, nf, s->lmul);
283
284 /*
285 * V extension supports all vector load and store instructions,
286 * except V extension does not support EEW=64 for index values
287 * when XLEN=32. (Section 18.3)
288 */
289 if (get_xl(s) == MXL_RV32) {
290 ret &= (eew != MO_64);
291 }
292
293 return ret;
294 }
295
296 /*
297 * Vector indexed, indexed segment load check function.
298 *
299 * Rules to be checked here:
300 * 1. All rules applies to store instructions are applies
301 * to load instructions.
302 * 2. Destination vector register group for a masked vector
303 * instruction cannot overlap the source mask register (v0).
304 * (Section 5.3)
305 * 3. Destination vector register cannot overlap a source vector
306 * register (vs2) group.
307 * (Section 5.2)
308 * 4. Destination vector register groups cannot overlap
309 * the source vector register (vs2) group for
310 * indexed segment load instructions. (Section 7.8.3)
311 */
312 static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
313 int nf, int vm, uint8_t eew)
314 {
315 int8_t seg_vd;
316 int8_t emul = eew - s->sew + s->lmul;
317 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
318 require_vm(vm, vd);
319
320 /* Each segment register group has to follow overlap rules. */
321 for (int i = 0; i < nf; ++i) {
322 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
323
324 if (eew > s->sew) {
325 if (seg_vd != vs2) {
326 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
327 }
328 } else if (eew < s->sew) {
329 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
330 }
331
332 /*
333 * Destination vector register groups cannot overlap
334 * the source vector register (vs2) group for
335 * indexed segment load instructions.
336 */
337 if (nf > 1) {
338 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
339 vs2, 1 << MAX(emul, 0));
340 }
341 }
342 return ret;
343 }
344
345 static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
346 {
347 return require_vm(vm, vd) &&
348 require_align(vd, s->lmul) &&
349 require_align(vs, s->lmul);
350 }
351
352 /*
353 * Check function for vector instruction with format:
354 * single-width result and single-width sources (SEW = SEW op SEW)
355 *
356 * Rules to be checked here:
357 * 1. Destination vector register group for a masked vector
358 * instruction cannot overlap the source mask register (v0).
359 * (Section 5.3)
360 * 2. Destination vector register number is multiples of LMUL.
361 * (Section 3.4.2)
362 * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
363 * (Section 3.4.2)
364 */
365 static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
366 {
367 return vext_check_ss(s, vd, vs2, vm) &&
368 require_align(vs1, s->lmul);
369 }
370
371 static bool vext_check_ms(DisasContext *s, int vd, int vs)
372 {
373 bool ret = require_align(vs, s->lmul);
374 if (vd != vs) {
375 ret &= require_noover(vd, 0, vs, s->lmul);
376 }
377 return ret;
378 }
379
380 /*
381 * Check function for maskable vector instruction with format:
382 * single-width result and single-width sources (SEW = SEW op SEW)
383 *
384 * Rules to be checked here:
385 * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
386 * (Section 3.4.2)
387 * 2. Destination vector register cannot overlap a source vector
388 * register (vs2, vs1) group.
389 * (Section 5.2)
390 * 3. The destination vector register group for a masked vector
391 * instruction cannot overlap the source mask register (v0),
392 * unless the destination vector register is being written
393 * with a mask value (e.g., comparisons) or the scalar result
394 * of a reduction. (Section 5.3)
395 */
396 static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
397 {
398 bool ret = vext_check_ms(s, vd, vs2) &&
399 require_align(vs1, s->lmul);
400 if (vd != vs1) {
401 ret &= require_noover(vd, 0, vs1, s->lmul);
402 }
403 return ret;
404 }
405
406 /*
407 * Common check function for vector widening instructions
408 * of double-width result (2*SEW).
409 *
410 * Rules to be checked here:
411 * 1. The largest vector register group used by an instruction
412 * can not be greater than 8 vector registers (Section 5.2):
413 * => LMUL < 8.
414 * => SEW < 64.
415 * 2. Double-width SEW cannot greater than ELEN.
416 * 3. Destination vector register number is multiples of 2 * LMUL.
417 * (Section 3.4.2)
418 * 4. Destination vector register group for a masked vector
419 * instruction cannot overlap the source mask register (v0).
420 * (Section 5.3)
421 */
422 static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
423 {
424 return (s->lmul <= 2) &&
425 (s->sew < MO_64) &&
426 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
427 require_align(vd, s->lmul + 1) &&
428 require_vm(vm, vd);
429 }
430
431 /*
432 * Common check function for vector narrowing instructions
433 * of single-width result (SEW) and double-width source (2*SEW).
434 *
435 * Rules to be checked here:
436 * 1. The largest vector register group used by an instruction
437 * can not be greater than 8 vector registers (Section 5.2):
438 * => LMUL < 8.
439 * => SEW < 64.
440 * 2. Double-width SEW cannot greater than ELEN.
441 * 3. Source vector register number is multiples of 2 * LMUL.
442 * (Section 3.4.2)
443 * 4. Destination vector register number is multiples of LMUL.
444 * (Section 3.4.2)
445 * 5. Destination vector register group for a masked vector
446 * instruction cannot overlap the source mask register (v0).
447 * (Section 5.3)
448 */
449 static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
450 int vm)
451 {
452 return (s->lmul <= 2) &&
453 (s->sew < MO_64) &&
454 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
455 require_align(vs2, s->lmul + 1) &&
456 require_align(vd, s->lmul) &&
457 require_vm(vm, vd);
458 }
459
460 static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
461 {
462 return vext_wide_check_common(s, vd, vm) &&
463 require_align(vs, s->lmul) &&
464 require_noover(vd, s->lmul + 1, vs, s->lmul);
465 }
466
467 static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
468 {
469 return vext_wide_check_common(s, vd, vm) &&
470 require_align(vs, s->lmul + 1);
471 }
472
473 /*
474 * Check function for vector instruction with format:
475 * double-width result and single-width sources (2*SEW = SEW op SEW)
476 *
477 * Rules to be checked here:
478 * 1. All rules in defined in widen common rules are applied.
479 * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
480 * (Section 3.4.2)
481 * 3. Destination vector register cannot overlap a source vector
482 * register (vs2, vs1) group.
483 * (Section 5.2)
484 */
485 static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
486 {
487 return vext_check_ds(s, vd, vs2, vm) &&
488 require_align(vs1, s->lmul) &&
489 require_noover(vd, s->lmul + 1, vs1, s->lmul);
490 }
491
492 /*
493 * Check function for vector instruction with format:
494 * double-width result and double-width source1 and single-width
495 * source2 (2*SEW = 2*SEW op SEW)
496 *
497 * Rules to be checked here:
498 * 1. All rules in defined in widen common rules are applied.
499 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
500 * (Section 3.4.2)
501 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
502 * (Section 3.4.2)
503 * 4. Destination vector register cannot overlap a source vector
504 * register (vs1) group.
505 * (Section 5.2)
506 */
507 static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
508 {
509 return vext_check_ds(s, vd, vs1, vm) &&
510 require_align(vs2, s->lmul + 1);
511 }
512
513 static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
514 {
515 bool ret = vext_narrow_check_common(s, vd, vs, vm);
516 if (vd != vs) {
517 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
518 }
519 return ret;
520 }
521
522 /*
523 * Check function for vector instruction with format:
524 * single-width result and double-width source 1 and single-width
525 * source 2 (SEW = 2*SEW op SEW)
526 *
527 * Rules to be checked here:
528 * 1. All rules in defined in narrow common rules are applied.
529 * 2. Destination vector register cannot overlap a source vector
530 * register (vs2) group.
531 * (Section 5.2)
532 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
533 * (Section 3.4.2)
534 */
535 static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
536 {
537 return vext_check_sd(s, vd, vs2, vm) &&
538 require_align(vs1, s->lmul);
539 }
540
541 /*
542 * Check function for vector reduction instructions.
543 *
544 * Rules to be checked here:
545 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
546 * (Section 3.4.2)
547 */
548 static bool vext_check_reduction(DisasContext *s, int vs2)
549 {
550 return require_align(vs2, s->lmul) && s->vstart_eq_zero;
551 }
552
553 /*
554 * Check function for vector slide instructions.
555 *
556 * Rules to be checked here:
557 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
558 * (Section 3.4.2)
559 * 2. Destination vector register number is multiples of LMUL.
560 * (Section 3.4.2)
561 * 3. Destination vector register group for a masked vector
562 * instruction cannot overlap the source mask register (v0).
563 * (Section 5.3)
564 * 4. The destination vector register group for vslideup, vslide1up,
565 * vfslide1up, cannot overlap the source vector register (vs2) group.
566 * (Section 5.2, 16.3.1, 16.3.3)
567 */
568 static bool vext_check_slide(DisasContext *s, int vd, int vs2,
569 int vm, bool is_over)
570 {
571 bool ret = require_align(vs2, s->lmul) &&
572 require_align(vd, s->lmul) &&
573 require_vm(vm, vd);
574 if (is_over) {
575 ret &= (vd != vs2);
576 }
577 return ret;
578 }
579
580 /*
581 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
582 * So RVV is also be checked in this function.
583 */
584 static bool vext_check_isa_ill(DisasContext *s)
585 {
586 return !s->vill;
587 }
588
589 /* common translation macro */
590 #define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
591 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
592 { \
593 if (CHECK(s, a, EEW)) { \
594 return OP(s, a, EEW); \
595 } \
596 return false; \
597 }
598
599 static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
600 {
601 int8_t emul = eew - s->sew + s->lmul;
602 return emul < 0 ? 0 : emul;
603 }
604
605 /*
606 *** unit stride load and store
607 */
608 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
609 TCGv_env, TCGv_i32);
610
611 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
612 gen_helper_ldst_us *fn, DisasContext *s,
613 bool is_store)
614 {
615 TCGv_ptr dest, mask;
616 TCGv base;
617 TCGv_i32 desc;
618
619 TCGLabel *over = gen_new_label();
620 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
621 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
622
623 dest = tcg_temp_new_ptr();
624 mask = tcg_temp_new_ptr();
625 base = get_gpr(s, rs1, EXT_NONE);
626
627 /*
628 * As simd_desc supports at most 2048 bytes, and in this implementation,
629 * the max vector group length is 4096 bytes. So split it into two parts.
630 *
631 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
632 * The second part is lmul, encoded in data of simd_desc.
633 */
634 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
635 s->cfg_ptr->vlen / 8, data));
636
637 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
638 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
639
640 fn(dest, mask, base, cpu_env, desc);
641
642 if (!is_store) {
643 mark_vs_dirty(s);
644 }
645
646 gen_set_label(over);
647 return true;
648 }
649
650 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
651 {
652 uint32_t data = 0;
653 gen_helper_ldst_us *fn;
654 static gen_helper_ldst_us * const fns[2][4] = {
655 /* masked unit stride load */
656 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
657 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
658 /* unmasked unit stride load */
659 { gen_helper_vle8_v, gen_helper_vle16_v,
660 gen_helper_vle32_v, gen_helper_vle64_v }
661 };
662
663 fn = fns[a->vm][eew];
664 if (fn == NULL) {
665 return false;
666 }
667
668 /*
669 * Vector load/store instructions have the EEW encoded
670 * directly in the instructions. The maximum vector size is
671 * calculated with EMUL rather than LMUL.
672 */
673 uint8_t emul = vext_get_emul(s, eew);
674 data = FIELD_DP32(data, VDATA, VM, a->vm);
675 data = FIELD_DP32(data, VDATA, LMUL, emul);
676 data = FIELD_DP32(data, VDATA, NF, a->nf);
677 data = FIELD_DP32(data, VDATA, VTA, s->vta);
678 data = FIELD_DP32(data, VDATA, VMA, s->vma);
679 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
680 }
681
682 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
683 {
684 return require_rvv(s) &&
685 vext_check_isa_ill(s) &&
686 vext_check_load(s, a->rd, a->nf, a->vm, eew);
687 }
688
689 GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
690 GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
691 GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
692 GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
693
694 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
695 {
696 uint32_t data = 0;
697 gen_helper_ldst_us *fn;
698 static gen_helper_ldst_us * const fns[2][4] = {
699 /* masked unit stride store */
700 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
701 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
702 /* unmasked unit stride store */
703 { gen_helper_vse8_v, gen_helper_vse16_v,
704 gen_helper_vse32_v, gen_helper_vse64_v }
705 };
706
707 fn = fns[a->vm][eew];
708 if (fn == NULL) {
709 return false;
710 }
711
712 uint8_t emul = vext_get_emul(s, eew);
713 data = FIELD_DP32(data, VDATA, VM, a->vm);
714 data = FIELD_DP32(data, VDATA, LMUL, emul);
715 data = FIELD_DP32(data, VDATA, NF, a->nf);
716 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
717 }
718
719 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
720 {
721 return require_rvv(s) &&
722 vext_check_isa_ill(s) &&
723 vext_check_store(s, a->rd, a->nf, eew);
724 }
725
726 GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
727 GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
728 GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
729 GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
730
731 /*
732 *** unit stride mask load and store
733 */
734 static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
735 {
736 uint32_t data = 0;
737 gen_helper_ldst_us *fn = gen_helper_vlm_v;
738
739 /* EMUL = 1, NFIELDS = 1 */
740 data = FIELD_DP32(data, VDATA, LMUL, 0);
741 data = FIELD_DP32(data, VDATA, NF, 1);
742 /* Mask destination register are always tail-agnostic */
743 data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
744 data = FIELD_DP32(data, VDATA, VMA, s->vma);
745 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
746 }
747
748 static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
749 {
750 /* EMUL = 1, NFIELDS = 1 */
751 return require_rvv(s) && vext_check_isa_ill(s);
752 }
753
754 static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
755 {
756 uint32_t data = 0;
757 gen_helper_ldst_us *fn = gen_helper_vsm_v;
758
759 /* EMUL = 1, NFIELDS = 1 */
760 data = FIELD_DP32(data, VDATA, LMUL, 0);
761 data = FIELD_DP32(data, VDATA, NF, 1);
762 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
763 }
764
765 static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
766 {
767 /* EMUL = 1, NFIELDS = 1 */
768 return require_rvv(s) && vext_check_isa_ill(s);
769 }
770
771 GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
772 GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
773
774 /*
775 *** stride load and store
776 */
777 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
778 TCGv, TCGv_env, TCGv_i32);
779
780 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
781 uint32_t data, gen_helper_ldst_stride *fn,
782 DisasContext *s, bool is_store)
783 {
784 TCGv_ptr dest, mask;
785 TCGv base, stride;
786 TCGv_i32 desc;
787
788 TCGLabel *over = gen_new_label();
789 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
790 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
791
792 dest = tcg_temp_new_ptr();
793 mask = tcg_temp_new_ptr();
794 base = get_gpr(s, rs1, EXT_NONE);
795 stride = get_gpr(s, rs2, EXT_NONE);
796 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
797 s->cfg_ptr->vlen / 8, data));
798
799 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
800 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
801
802 fn(dest, mask, base, stride, cpu_env, desc);
803
804 if (!is_store) {
805 mark_vs_dirty(s);
806 }
807
808 gen_set_label(over);
809 return true;
810 }
811
812 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
813 {
814 uint32_t data = 0;
815 gen_helper_ldst_stride *fn;
816 static gen_helper_ldst_stride * const fns[4] = {
817 gen_helper_vlse8_v, gen_helper_vlse16_v,
818 gen_helper_vlse32_v, gen_helper_vlse64_v
819 };
820
821 fn = fns[eew];
822 if (fn == NULL) {
823 return false;
824 }
825
826 uint8_t emul = vext_get_emul(s, eew);
827 data = FIELD_DP32(data, VDATA, VM, a->vm);
828 data = FIELD_DP32(data, VDATA, LMUL, emul);
829 data = FIELD_DP32(data, VDATA, NF, a->nf);
830 data = FIELD_DP32(data, VDATA, VTA, s->vta);
831 data = FIELD_DP32(data, VDATA, VMA, s->vma);
832 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
833 }
834
835 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
836 {
837 return require_rvv(s) &&
838 vext_check_isa_ill(s) &&
839 vext_check_load(s, a->rd, a->nf, a->vm, eew);
840 }
841
842 GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
843 GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
844 GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
845 GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
846
847 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
848 {
849 uint32_t data = 0;
850 gen_helper_ldst_stride *fn;
851 static gen_helper_ldst_stride * const fns[4] = {
852 /* masked stride store */
853 gen_helper_vsse8_v, gen_helper_vsse16_v,
854 gen_helper_vsse32_v, gen_helper_vsse64_v
855 };
856
857 uint8_t emul = vext_get_emul(s, eew);
858 data = FIELD_DP32(data, VDATA, VM, a->vm);
859 data = FIELD_DP32(data, VDATA, LMUL, emul);
860 data = FIELD_DP32(data, VDATA, NF, a->nf);
861 fn = fns[eew];
862 if (fn == NULL) {
863 return false;
864 }
865
866 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
867 }
868
869 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
870 {
871 return require_rvv(s) &&
872 vext_check_isa_ill(s) &&
873 vext_check_store(s, a->rd, a->nf, eew);
874 }
875
876 GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
877 GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
878 GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
879 GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
880
881 /*
882 *** index load and store
883 */
884 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
885 TCGv_ptr, TCGv_env, TCGv_i32);
886
887 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
888 uint32_t data, gen_helper_ldst_index *fn,
889 DisasContext *s, bool is_store)
890 {
891 TCGv_ptr dest, mask, index;
892 TCGv base;
893 TCGv_i32 desc;
894
895 TCGLabel *over = gen_new_label();
896 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
897 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
898
899 dest = tcg_temp_new_ptr();
900 mask = tcg_temp_new_ptr();
901 index = tcg_temp_new_ptr();
902 base = get_gpr(s, rs1, EXT_NONE);
903 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
904 s->cfg_ptr->vlen / 8, data));
905
906 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
907 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
908 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
909
910 fn(dest, mask, base, index, cpu_env, desc);
911
912 if (!is_store) {
913 mark_vs_dirty(s);
914 }
915
916 gen_set_label(over);
917 return true;
918 }
919
920 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
921 {
922 uint32_t data = 0;
923 gen_helper_ldst_index *fn;
924 static gen_helper_ldst_index * const fns[4][4] = {
925 /*
926 * offset vector register group EEW = 8,
927 * data vector register group EEW = SEW
928 */
929 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
930 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
931 /*
932 * offset vector register group EEW = 16,
933 * data vector register group EEW = SEW
934 */
935 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
936 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
937 /*
938 * offset vector register group EEW = 32,
939 * data vector register group EEW = SEW
940 */
941 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
942 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
943 /*
944 * offset vector register group EEW = 64,
945 * data vector register group EEW = SEW
946 */
947 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
948 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
949 };
950
951 fn = fns[eew][s->sew];
952
953 uint8_t emul = vext_get_emul(s, s->sew);
954 data = FIELD_DP32(data, VDATA, VM, a->vm);
955 data = FIELD_DP32(data, VDATA, LMUL, emul);
956 data = FIELD_DP32(data, VDATA, NF, a->nf);
957 data = FIELD_DP32(data, VDATA, VTA, s->vta);
958 data = FIELD_DP32(data, VDATA, VMA, s->vma);
959 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
960 }
961
962 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
963 {
964 return require_rvv(s) &&
965 vext_check_isa_ill(s) &&
966 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
967 }
968
969 GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
970 GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
971 GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
972 GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
973
974 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
975 {
976 uint32_t data = 0;
977 gen_helper_ldst_index *fn;
978 static gen_helper_ldst_index * const fns[4][4] = {
979 /*
980 * offset vector register group EEW = 8,
981 * data vector register group EEW = SEW
982 */
983 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
984 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
985 /*
986 * offset vector register group EEW = 16,
987 * data vector register group EEW = SEW
988 */
989 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
990 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
991 /*
992 * offset vector register group EEW = 32,
993 * data vector register group EEW = SEW
994 */
995 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
996 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
997 /*
998 * offset vector register group EEW = 64,
999 * data vector register group EEW = SEW
1000 */
1001 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
1002 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
1003 };
1004
1005 fn = fns[eew][s->sew];
1006
1007 uint8_t emul = vext_get_emul(s, s->sew);
1008 data = FIELD_DP32(data, VDATA, VM, a->vm);
1009 data = FIELD_DP32(data, VDATA, LMUL, emul);
1010 data = FIELD_DP32(data, VDATA, NF, a->nf);
1011 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
1012 }
1013
1014 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
1015 {
1016 return require_rvv(s) &&
1017 vext_check_isa_ill(s) &&
1018 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
1019 }
1020
1021 GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
1022 GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
1023 GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
1024 GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
1025
1026 /*
1027 *** unit stride fault-only-first load
1028 */
1029 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
1030 gen_helper_ldst_us *fn, DisasContext *s)
1031 {
1032 TCGv_ptr dest, mask;
1033 TCGv base;
1034 TCGv_i32 desc;
1035
1036 TCGLabel *over = gen_new_label();
1037 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1038 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1039
1040 dest = tcg_temp_new_ptr();
1041 mask = tcg_temp_new_ptr();
1042 base = get_gpr(s, rs1, EXT_NONE);
1043 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1044 s->cfg_ptr->vlen / 8, data));
1045
1046 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1047 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1048
1049 fn(dest, mask, base, cpu_env, desc);
1050
1051 mark_vs_dirty(s);
1052 gen_set_label(over);
1053 return true;
1054 }
1055
1056 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
1057 {
1058 uint32_t data = 0;
1059 gen_helper_ldst_us *fn;
1060 static gen_helper_ldst_us * const fns[4] = {
1061 gen_helper_vle8ff_v, gen_helper_vle16ff_v,
1062 gen_helper_vle32ff_v, gen_helper_vle64ff_v
1063 };
1064
1065 fn = fns[eew];
1066 if (fn == NULL) {
1067 return false;
1068 }
1069
1070 uint8_t emul = vext_get_emul(s, eew);
1071 data = FIELD_DP32(data, VDATA, VM, a->vm);
1072 data = FIELD_DP32(data, VDATA, LMUL, emul);
1073 data = FIELD_DP32(data, VDATA, NF, a->nf);
1074 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1075 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1076 return ldff_trans(a->rd, a->rs1, data, fn, s);
1077 }
1078
1079 GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check)
1080 GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
1081 GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
1082 GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
1083
1084 /*
1085 * load and store whole register instructions
1086 */
1087 typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
1088
1089 static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
1090 uint32_t width, gen_helper_ldst_whole *fn,
1091 DisasContext *s, bool is_store)
1092 {
1093 uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / width;
1094 TCGLabel *over = gen_new_label();
1095 tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over);
1096
1097 TCGv_ptr dest;
1098 TCGv base;
1099 TCGv_i32 desc;
1100
1101 uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
1102 dest = tcg_temp_new_ptr();
1103 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1104 s->cfg_ptr->vlen / 8, data));
1105
1106 base = get_gpr(s, rs1, EXT_NONE);
1107 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1108
1109 fn(dest, base, cpu_env, desc);
1110
1111 if (!is_store) {
1112 mark_vs_dirty(s);
1113 }
1114 gen_set_label(over);
1115
1116 return true;
1117 }
1118
1119 /*
1120 * load and store whole register instructions ignore vtype and vl setting.
1121 * Thus, we don't need to check vill bit. (Section 7.9)
1122 */
1123 #define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, WIDTH, IS_STORE) \
1124 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1125 { \
1126 if (require_rvv(s) && \
1127 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1128 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, WIDTH, \
1129 gen_helper_##NAME, s, IS_STORE); \
1130 } \
1131 return false; \
1132 }
1133
1134 GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, 1, false)
1135 GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, 2, false)
1136 GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, 4, false)
1137 GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, 8, false)
1138 GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, 1, false)
1139 GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, 2, false)
1140 GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, 4, false)
1141 GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, 8, false)
1142 GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, 1, false)
1143 GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, 2, false)
1144 GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, 4, false)
1145 GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, 8, false)
1146 GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, 1, false)
1147 GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, 2, false)
1148 GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, 4, false)
1149 GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, 8, false)
1150
1151 /*
1152 * The vector whole register store instructions are encoded similar to
1153 * unmasked unit-stride store of elements with EEW=8.
1154 */
1155 GEN_LDST_WHOLE_TRANS(vs1r_v, 1, 1, true)
1156 GEN_LDST_WHOLE_TRANS(vs2r_v, 2, 1, true)
1157 GEN_LDST_WHOLE_TRANS(vs4r_v, 4, 1, true)
1158 GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true)
1159
1160 /*
1161 *** Vector Integer Arithmetic Instructions
1162 */
1163
1164 /*
1165 * MAXSZ returns the maximum vector size can be operated in bytes,
1166 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1167 * to accerlate vector operation.
1168 */
1169 static inline uint32_t MAXSZ(DisasContext *s)
1170 {
1171 int scale = s->lmul - 3;
1172 return s->cfg_ptr->vlen >> -scale;
1173 }
1174
1175 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1176 {
1177 return require_rvv(s) &&
1178 vext_check_isa_ill(s) &&
1179 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1180 }
1181
1182 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1183 uint32_t, uint32_t, uint32_t);
1184
1185 static inline bool
1186 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1187 gen_helper_gvec_4_ptr *fn)
1188 {
1189 TCGLabel *over = gen_new_label();
1190 if (!opivv_check(s, a)) {
1191 return false;
1192 }
1193
1194 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1195 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1196
1197 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1198 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1199 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1200 MAXSZ(s), MAXSZ(s));
1201 } else {
1202 uint32_t data = 0;
1203
1204 data = FIELD_DP32(data, VDATA, VM, a->vm);
1205 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1206 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1207 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1208 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1209 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1210 cpu_env, s->cfg_ptr->vlen / 8,
1211 s->cfg_ptr->vlen / 8, data, fn);
1212 }
1213 mark_vs_dirty(s);
1214 gen_set_label(over);
1215 return true;
1216 }
1217
1218 /* OPIVV with GVEC IR */
1219 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1220 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1221 { \
1222 static gen_helper_gvec_4_ptr * const fns[4] = { \
1223 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1224 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1225 }; \
1226 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1227 }
1228
1229 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1230 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1231
1232 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1233 TCGv_env, TCGv_i32);
1234
1235 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1236 gen_helper_opivx *fn, DisasContext *s)
1237 {
1238 TCGv_ptr dest, src2, mask;
1239 TCGv src1;
1240 TCGv_i32 desc;
1241 uint32_t data = 0;
1242
1243 TCGLabel *over = gen_new_label();
1244 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1245 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1246
1247 dest = tcg_temp_new_ptr();
1248 mask = tcg_temp_new_ptr();
1249 src2 = tcg_temp_new_ptr();
1250 src1 = get_gpr(s, rs1, EXT_SIGN);
1251
1252 data = FIELD_DP32(data, VDATA, VM, vm);
1253 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1254 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1255 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1256 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1257 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1258 s->cfg_ptr->vlen / 8, data));
1259
1260 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1261 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1262 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1263
1264 fn(dest, mask, src1, src2, cpu_env, desc);
1265
1266 mark_vs_dirty(s);
1267 gen_set_label(over);
1268 return true;
1269 }
1270
1271 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1272 {
1273 return require_rvv(s) &&
1274 vext_check_isa_ill(s) &&
1275 vext_check_ss(s, a->rd, a->rs2, a->vm);
1276 }
1277
1278 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1279 uint32_t, uint32_t);
1280
1281 static inline bool
1282 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1283 gen_helper_opivx *fn)
1284 {
1285 if (!opivx_check(s, a)) {
1286 return false;
1287 }
1288
1289 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1290 TCGv_i64 src1 = tcg_temp_new_i64();
1291
1292 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1293 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1294 src1, MAXSZ(s), MAXSZ(s));
1295
1296 mark_vs_dirty(s);
1297 return true;
1298 }
1299 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1300 }
1301
1302 /* OPIVX with GVEC IR */
1303 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1304 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1305 { \
1306 static gen_helper_opivx * const fns[4] = { \
1307 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1308 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1309 }; \
1310 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1311 }
1312
1313 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1314 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1315
1316 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1317 {
1318 tcg_gen_vec_sub8_i64(d, b, a);
1319 }
1320
1321 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1322 {
1323 tcg_gen_vec_sub16_i64(d, b, a);
1324 }
1325
1326 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1327 {
1328 tcg_gen_sub_i32(ret, arg2, arg1);
1329 }
1330
1331 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1332 {
1333 tcg_gen_sub_i64(ret, arg2, arg1);
1334 }
1335
1336 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1337 {
1338 tcg_gen_sub_vec(vece, r, b, a);
1339 }
1340
1341 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1342 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1343 {
1344 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1345 static const GVecGen2s rsub_op[4] = {
1346 { .fni8 = gen_vec_rsub8_i64,
1347 .fniv = gen_rsub_vec,
1348 .fno = gen_helper_vec_rsubs8,
1349 .opt_opc = vecop_list,
1350 .vece = MO_8 },
1351 { .fni8 = gen_vec_rsub16_i64,
1352 .fniv = gen_rsub_vec,
1353 .fno = gen_helper_vec_rsubs16,
1354 .opt_opc = vecop_list,
1355 .vece = MO_16 },
1356 { .fni4 = gen_rsub_i32,
1357 .fniv = gen_rsub_vec,
1358 .fno = gen_helper_vec_rsubs32,
1359 .opt_opc = vecop_list,
1360 .vece = MO_32 },
1361 { .fni8 = gen_rsub_i64,
1362 .fniv = gen_rsub_vec,
1363 .fno = gen_helper_vec_rsubs64,
1364 .opt_opc = vecop_list,
1365 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1366 .vece = MO_64 },
1367 };
1368
1369 tcg_debug_assert(vece <= MO_64);
1370 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1371 }
1372
1373 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1374
1375 typedef enum {
1376 IMM_ZX, /* Zero-extended */
1377 IMM_SX, /* Sign-extended */
1378 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
1379 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1380 } imm_mode_t;
1381
1382 static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1383 {
1384 switch (imm_mode) {
1385 case IMM_ZX:
1386 return extract64(imm, 0, 5);
1387 case IMM_SX:
1388 return sextract64(imm, 0, 5);
1389 case IMM_TRUNC_SEW:
1390 return extract64(imm, 0, s->sew + 3);
1391 case IMM_TRUNC_2SEW:
1392 return extract64(imm, 0, s->sew + 4);
1393 default:
1394 g_assert_not_reached();
1395 }
1396 }
1397
1398 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1399 gen_helper_opivx *fn, DisasContext *s,
1400 imm_mode_t imm_mode)
1401 {
1402 TCGv_ptr dest, src2, mask;
1403 TCGv src1;
1404 TCGv_i32 desc;
1405 uint32_t data = 0;
1406
1407 TCGLabel *over = gen_new_label();
1408 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1409 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1410
1411 dest = tcg_temp_new_ptr();
1412 mask = tcg_temp_new_ptr();
1413 src2 = tcg_temp_new_ptr();
1414 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1415
1416 data = FIELD_DP32(data, VDATA, VM, vm);
1417 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1418 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1419 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1420 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1421 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
1422 s->cfg_ptr->vlen / 8, data));
1423
1424 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1425 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1426 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1427
1428 fn(dest, mask, src1, src2, cpu_env, desc);
1429
1430 mark_vs_dirty(s);
1431 gen_set_label(over);
1432 return true;
1433 }
1434
1435 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1436 uint32_t, uint32_t);
1437
1438 static inline bool
1439 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1440 gen_helper_opivx *fn, imm_mode_t imm_mode)
1441 {
1442 if (!opivx_check(s, a)) {
1443 return false;
1444 }
1445
1446 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1447 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1448 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1449 mark_vs_dirty(s);
1450 return true;
1451 }
1452 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1453 }
1454
1455 /* OPIVI with GVEC IR */
1456 #define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1457 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1458 { \
1459 static gen_helper_opivx * const fns[4] = { \
1460 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1461 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1462 }; \
1463 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1464 fns[s->sew], IMM_MODE); \
1465 }
1466
1467 GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1468
1469 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1470 int64_t c, uint32_t oprsz, uint32_t maxsz)
1471 {
1472 TCGv_i64 tmp = tcg_constant_i64(c);
1473 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1474 }
1475
1476 GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1477
1478 /* Vector Widening Integer Add/Subtract */
1479
1480 /* OPIVV with WIDEN */
1481 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1482 {
1483 return require_rvv(s) &&
1484 vext_check_isa_ill(s) &&
1485 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1486 }
1487
1488 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1489 gen_helper_gvec_4_ptr *fn,
1490 bool (*checkfn)(DisasContext *, arg_rmrr *))
1491 {
1492 if (checkfn(s, a)) {
1493 uint32_t data = 0;
1494 TCGLabel *over = gen_new_label();
1495 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1496 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1497
1498 data = FIELD_DP32(data, VDATA, VM, a->vm);
1499 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1500 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1501 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1502 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1503 vreg_ofs(s, a->rs1),
1504 vreg_ofs(s, a->rs2),
1505 cpu_env, s->cfg_ptr->vlen / 8,
1506 s->cfg_ptr->vlen / 8,
1507 data, fn);
1508 mark_vs_dirty(s);
1509 gen_set_label(over);
1510 return true;
1511 }
1512 return false;
1513 }
1514
1515 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1516 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1517 { \
1518 static gen_helper_gvec_4_ptr * const fns[3] = { \
1519 gen_helper_##NAME##_b, \
1520 gen_helper_##NAME##_h, \
1521 gen_helper_##NAME##_w \
1522 }; \
1523 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1524 }
1525
1526 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1527 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1528 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1529 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1530
1531 /* OPIVX with WIDEN */
1532 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1533 {
1534 return require_rvv(s) &&
1535 vext_check_isa_ill(s) &&
1536 vext_check_ds(s, a->rd, a->rs2, a->vm);
1537 }
1538
1539 static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1540 gen_helper_opivx *fn)
1541 {
1542 if (opivx_widen_check(s, a)) {
1543 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1544 }
1545 return false;
1546 }
1547
1548 #define GEN_OPIVX_WIDEN_TRANS(NAME) \
1549 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1550 { \
1551 static gen_helper_opivx * const fns[3] = { \
1552 gen_helper_##NAME##_b, \
1553 gen_helper_##NAME##_h, \
1554 gen_helper_##NAME##_w \
1555 }; \
1556 return do_opivx_widen(s, a, fns[s->sew]); \
1557 }
1558
1559 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1560 GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1561 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1562 GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1563
1564 /* WIDEN OPIVV with WIDEN */
1565 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1566 {
1567 return require_rvv(s) &&
1568 vext_check_isa_ill(s) &&
1569 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1570 }
1571
1572 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1573 gen_helper_gvec_4_ptr *fn)
1574 {
1575 if (opiwv_widen_check(s, a)) {
1576 uint32_t data = 0;
1577 TCGLabel *over = gen_new_label();
1578 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1579 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
1580
1581 data = FIELD_DP32(data, VDATA, VM, a->vm);
1582 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1583 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1584 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1585 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1586 vreg_ofs(s, a->rs1),
1587 vreg_ofs(s, a->rs2),
1588 cpu_env, s->cfg_ptr->vlen / 8,
1589 s->cfg_ptr->vlen / 8, data, fn);
1590 mark_vs_dirty(s);
1591 gen_set_label(over);
1592 return true;
1593 }
1594 return false;
1595 }
1596
1597 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1598 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1599 { \
1600 static gen_helper_gvec_4_ptr * const fns[3] = { \
1601 gen_helper_##NAME##_b, \
1602 gen_helper_##NAME##_h, \
1603 gen_helper_##NAME##_w \
1604 }; \
1605 return do_opiwv_widen(s, a, fns[s->sew]); \
1606 }
1607
1608 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1609 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1610 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1611 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1612
1613 /* WIDEN OPIVX with WIDEN */
1614 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1615 {
1616 return require_rvv(s) &&
1617 vext_check_isa_ill(s) &&
1618 vext_check_dd(s, a->rd, a->rs2, a->vm);
1619 }
1620
1621 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1622 gen_helper_opivx *fn)
1623 {
1624 if (opiwx_widen_check(s, a)) {
1625 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1626 }
1627 return false;
1628 }
1629
1630 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1631 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1632 { \
1633 static gen_helper_opivx * const fns[3] = { \
1634 gen_helper_##NAME##_b, \
1635 gen_helper_##NAME##_h, \
1636 gen_helper_##NAME##_w \
1637 }; \
1638 return do_opiwx_widen(s, a, fns[s->sew]); \
1639 }
1640
1641 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1642 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1643 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1644 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1645
1646 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1647 /* OPIVV without GVEC IR */
1648 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1649 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1650 { \
1651 if (CHECK(s, a)) { \
1652 uint32_t data = 0; \
1653 static gen_helper_gvec_4_ptr * const fns[4] = { \
1654 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1655 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1656 }; \
1657 TCGLabel *over = gen_new_label(); \
1658 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1659 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
1660 \
1661 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1662 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1663 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
1664 data = \
1665 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
1666 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
1667 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1668 vreg_ofs(s, a->rs1), \
1669 vreg_ofs(s, a->rs2), cpu_env, \
1670 s->cfg_ptr->vlen / 8, \
1671 s->cfg_ptr->vlen / 8, data, \
1672 fns[s->sew]); \
1673 mark_vs_dirty(s); \
1674 gen_set_label(over); \
1675 return true; \
1676 } \
1677 return false; \
1678 }
1679
1680 /*
1681 * For vadc and vsbc, an illegal instruction exception is raised if the
1682 * destination vector register is v0 and LMUL > 1. (Section 11.4)
1683 */
1684 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1685 {
1686 return require_rvv(s) &&
1687 vext_check_isa_ill(s) &&
1688 (a->rd != 0) &&
1689 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1690 }
1691
1692 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1693 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1694
1695 /*
1696 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1697 * destination vector register overlaps a source vector register group.
1698 */
1699 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1700 {
1701 return require_rvv(s) &&
1702 vext_check_isa_ill(s) &&
1703 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1704 }
1705
1706 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1707 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1708
1709 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1710 {
1711 return require_rvv(s) &&
1712 vext_check_isa_ill(s) &&
1713 (a->rd != 0) &&
1714 vext_check_ss(s, a->rd, a->rs2, a->vm);
1715 }
1716
1717 /* OPIVX without GVEC IR */
1718 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1719 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1720 { \
1721 if (CHECK(s, a)) { \
1722 static gen_helper_opivx * const fns[4] = { \
1723 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1724 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1725 }; \
1726 \
1727 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1728 } \
1729 return false; \
1730 }
1731
1732 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1733 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1734
1735 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1736 {
1737 return require_rvv(s) &&
1738 vext_check_isa_ill(s) &&
1739 vext_check_ms(s, a->rd, a->rs2);
1740 }
1741
1742 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1743 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1744
1745 /* OPIVI without GVEC IR */
1746 #define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
1747 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1748 { \
1749 if (CHECK(s, a)) { \
1750 static gen_helper_opivx * const fns[4] = { \
1751 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1752 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1753 }; \
1754 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1755 fns[s->sew], s, IMM_MODE); \
1756 } \
1757 return false; \
1758 }
1759
1760 GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1761 GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1762
1763 /* Vector Bitwise Logical Instructions */
1764 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1765 GEN_OPIVV_GVEC_TRANS(vor_vv, or)
1766 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1767 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1768 GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
1769 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1770 GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1771 GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
1772 GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1773
1774 /* Vector Single-Width Bit Shift Instructions */
1775 GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
1776 GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
1777 GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
1778
1779 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1780 uint32_t, uint32_t);
1781
1782 static inline bool
1783 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1784 gen_helper_opivx *fn)
1785 {
1786 if (!opivx_check(s, a)) {
1787 return false;
1788 }
1789
1790 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1791 TCGv_i32 src1 = tcg_temp_new_i32();
1792
1793 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1794 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1795 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1796 src1, MAXSZ(s), MAXSZ(s));
1797
1798 mark_vs_dirty(s);
1799 return true;
1800 }
1801 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1802 }
1803
1804 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1805 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1806 { \
1807 static gen_helper_opivx * const fns[4] = { \
1808 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1809 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1810 }; \
1811 \
1812 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1813 }
1814
1815 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
1816 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
1817 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
1818
1819 GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1820 GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1821 GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1822
1823 /* Vector Narrowing Integer Right Shift Instructions */
1824 static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1825 {
1826 return require_rvv(s) &&
1827 vext_check_isa_ill(s) &&
1828 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1829 }
1830
1831 /* OPIVV with NARROW */
1832 #define GEN_OPIWV_NARROW_TRANS(NAME) \
1833 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1834 { \
1835 if (opiwv_narrow_check(s, a)) { \
1836 uint32_t data = 0; \
1837 static gen_helper_gvec_4_ptr * const fns[3] = { \
1838 gen_helper_##NAME##_b, \
1839 gen_helper_##NAME##_h, \
1840 gen_helper_##NAME##_w, \
1841 }; \
1842 TCGLabel *over = gen_new_label(); \
1843 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1844 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
1845 \
1846 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1847 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1848 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
1849 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
1850 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1851 vreg_ofs(s, a->rs1), \
1852 vreg_ofs(s, a->rs2), cpu_env, \
1853 s->cfg_ptr->vlen / 8, \
1854 s->cfg_ptr->vlen / 8, data, \
1855 fns[s->sew]); \
1856 mark_vs_dirty(s); \
1857 gen_set_label(over); \
1858 return true; \
1859 } \
1860 return false; \
1861 }
1862 GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1863 GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1864
1865 static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1866 {
1867 return require_rvv(s) &&
1868 vext_check_isa_ill(s) &&
1869 vext_check_sd(s, a->rd, a->rs2, a->vm);
1870 }
1871
1872 /* OPIVX with NARROW */
1873 #define GEN_OPIWX_NARROW_TRANS(NAME) \
1874 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1875 { \
1876 if (opiwx_narrow_check(s, a)) { \
1877 static gen_helper_opivx * const fns[3] = { \
1878 gen_helper_##NAME##_b, \
1879 gen_helper_##NAME##_h, \
1880 gen_helper_##NAME##_w, \
1881 }; \
1882 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1883 } \
1884 return false; \
1885 }
1886
1887 GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1888 GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1889
1890 /* OPIWI with NARROW */
1891 #define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
1892 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1893 { \
1894 if (opiwx_narrow_check(s, a)) { \
1895 static gen_helper_opivx * const fns[3] = { \
1896 gen_helper_##OPIVX##_b, \
1897 gen_helper_##OPIVX##_h, \
1898 gen_helper_##OPIVX##_w, \
1899 }; \
1900 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1901 fns[s->sew], s, IMM_MODE); \
1902 } \
1903 return false; \
1904 }
1905
1906 GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1907 GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1908
1909 /* Vector Integer Comparison Instructions */
1910 /*
1911 * For all comparison instructions, an illegal instruction exception is raised
1912 * if the destination vector register overlaps a source vector register group
1913 * and LMUL > 1.
1914 */
1915 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1916 {
1917 return require_rvv(s) &&
1918 vext_check_isa_ill(s) &&
1919 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1920 }
1921
1922 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1923 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1924 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1925 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1926 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1927 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1928
1929 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1930 {
1931 return require_rvv(s) &&
1932 vext_check_isa_ill(s) &&
1933 vext_check_ms(s, a->rd, a->rs2);
1934 }
1935
1936 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1937 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1938 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1939 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1940 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1941 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1942 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1943 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1944
1945 GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1946 GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1947 GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1948 GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1949 GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1950 GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1951
1952 /* Vector Integer Min/Max Instructions */
1953 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1954 GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
1955 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1956 GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
1957 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1958 GEN_OPIVX_TRANS(vmin_vx, opivx_check)
1959 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1960 GEN_OPIVX_TRANS(vmax_vx, opivx_check)
1961
1962 /* Vector Single-Width Integer Multiply Instructions */
1963
1964 static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
1965 {
1966 /*
1967 * All Zve* extensions support all vector integer instructions,
1968 * except that the vmulh integer multiply variants
1969 * that return the high word of the product
1970 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1971 * are not included for EEW=64 in Zve64*. (Section 18.2)
1972 */
1973 return opivv_check(s, a) &&
1974 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1975 }
1976
1977 static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
1978 {
1979 /*
1980 * All Zve* extensions support all vector integer instructions,
1981 * except that the vmulh integer multiply variants
1982 * that return the high word of the product
1983 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1984 * are not included for EEW=64 in Zve64*. (Section 18.2)
1985 */
1986 return opivx_check(s, a) &&
1987 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1988 }
1989
1990 GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
1991 GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check)
1992 GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check)
1993 GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check)
1994 GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
1995 GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check)
1996 GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check)
1997 GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check)
1998
1999 /* Vector Integer Divide Instructions */
2000 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
2001 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
2002 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
2003 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
2004 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
2005 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
2006 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
2007 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
2008
2009 /* Vector Widening Integer Multiply Instructions */
2010 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
2011 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
2012 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
2013 GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
2014 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
2015 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
2016
2017 /* Vector Single-Width Integer Multiply-Add Instructions */
2018 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
2019 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
2020 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
2021 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
2022 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
2023 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
2024 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
2025 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
2026
2027 /* Vector Widening Integer Multiply-Add Instructions */
2028 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
2029 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
2030 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
2031 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
2032 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
2033 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
2034 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
2035
2036 /* Vector Integer Merge and Move Instructions */
2037 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
2038 {
2039 if (require_rvv(s) &&
2040 vext_check_isa_ill(s) &&
2041 /* vmv.v.v has rs2 = 0 and vm = 1 */
2042 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
2043 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2044 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
2045 vreg_ofs(s, a->rs1),
2046 MAXSZ(s), MAXSZ(s));
2047 } else {
2048 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2049 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2050 static gen_helper_gvec_2_ptr * const fns[4] = {
2051 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
2052 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
2053 };
2054 TCGLabel *over = gen_new_label();
2055 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2056 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2057
2058 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
2059 cpu_env, s->cfg_ptr->vlen / 8,
2060 s->cfg_ptr->vlen / 8, data,
2061 fns[s->sew]);
2062 gen_set_label(over);
2063 }
2064 mark_vs_dirty(s);
2065 return true;
2066 }
2067 return false;
2068 }
2069
2070 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
2071 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
2072 {
2073 if (require_rvv(s) &&
2074 vext_check_isa_ill(s) &&
2075 /* vmv.v.x has rs2 = 0 and vm = 1 */
2076 vext_check_ss(s, a->rd, 0, 1)) {
2077 TCGv s1;
2078 TCGLabel *over = gen_new_label();
2079 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2080 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2081
2082 s1 = get_gpr(s, a->rs1, EXT_SIGN);
2083
2084 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2085 if (get_xl(s) == MXL_RV32 && s->sew == MO_64) {
2086 TCGv_i64 s1_i64 = tcg_temp_new_i64();
2087 tcg_gen_ext_tl_i64(s1_i64, s1);
2088 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2089 MAXSZ(s), MAXSZ(s), s1_i64);
2090 } else {
2091 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
2092 MAXSZ(s), MAXSZ(s), s1);
2093 }
2094 } else {
2095 TCGv_i32 desc;
2096 TCGv_i64 s1_i64 = tcg_temp_new_i64();
2097 TCGv_ptr dest = tcg_temp_new_ptr();
2098 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2099 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2100 static gen_helper_vmv_vx * const fns[4] = {
2101 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2102 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2103 };
2104
2105 tcg_gen_ext_tl_i64(s1_i64, s1);
2106 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2107 s->cfg_ptr->vlen / 8, data));
2108 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2109 fns[s->sew](dest, s1_i64, cpu_env, desc);
2110 }
2111
2112 mark_vs_dirty(s);
2113 gen_set_label(over);
2114 return true;
2115 }
2116 return false;
2117 }
2118
2119 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
2120 {
2121 if (require_rvv(s) &&
2122 vext_check_isa_ill(s) &&
2123 /* vmv.v.i has rs2 = 0 and vm = 1 */
2124 vext_check_ss(s, a->rd, 0, 1)) {
2125 int64_t simm = sextract64(a->rs1, 0, 5);
2126 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2127 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
2128 MAXSZ(s), MAXSZ(s), simm);
2129 mark_vs_dirty(s);
2130 } else {
2131 TCGv_i32 desc;
2132 TCGv_i64 s1;
2133 TCGv_ptr dest;
2134 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2135 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2136 static gen_helper_vmv_vx * const fns[4] = {
2137 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2138 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2139 };
2140 TCGLabel *over = gen_new_label();
2141 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2142 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2143
2144 s1 = tcg_constant_i64(simm);
2145 dest = tcg_temp_new_ptr();
2146 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2147 s->cfg_ptr->vlen / 8, data));
2148 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2149 fns[s->sew](dest, s1, cpu_env, desc);
2150
2151 mark_vs_dirty(s);
2152 gen_set_label(over);
2153 }
2154 return true;
2155 }
2156 return false;
2157 }
2158
2159 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
2160 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
2161 GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
2162
2163 /*
2164 *** Vector Fixed-Point Arithmetic Instructions
2165 */
2166
2167 /* Vector Single-Width Saturating Add and Subtract */
2168 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
2169 GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
2170 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
2171 GEN_OPIVV_TRANS(vssub_vv, opivv_check)
2172 GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
2173 GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
2174 GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
2175 GEN_OPIVX_TRANS(vssub_vx, opivx_check)
2176 GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2177 GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2178
2179 /* Vector Single-Width Averaging Add and Subtract */
2180 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2181 GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2182 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2183 GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2184 GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
2185 GEN_OPIVX_TRANS(vaaddu_vx, opivx_check)
2186 GEN_OPIVX_TRANS(vasub_vx, opivx_check)
2187 GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
2188
2189 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2190
2191 static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
2192 {
2193 /*
2194 * All Zve* extensions support all vector fixed-point arithmetic
2195 * instructions, except that vsmul.vv and vsmul.vx are not supported
2196 * for EEW=64 in Zve64*. (Section 18.2)
2197 */
2198 return opivv_check(s, a) &&
2199 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2200 }
2201
2202 static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
2203 {
2204 /*
2205 * All Zve* extensions support all vector fixed-point arithmetic
2206 * instructions, except that vsmul.vv and vsmul.vx are not supported
2207 * for EEW=64 in Zve64*. (Section 18.2)
2208 */
2209 return opivx_check(s, a) &&
2210 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2211 }
2212
2213 GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
2214 GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check)
2215
2216 /* Vector Single-Width Scaling Shift Instructions */
2217 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2218 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2219 GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
2220 GEN_OPIVX_TRANS(vssra_vx, opivx_check)
2221 GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2222 GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2223
2224 /* Vector Narrowing Fixed-Point Clip Instructions */
2225 GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2226 GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2227 GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2228 GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2229 GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2230 GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2231
2232 /*
2233 *** Vector Float Point Arithmetic Instructions
2234 */
2235
2236 /*
2237 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2238 * RVF and RVD can be treated equally.
2239 * We don't have to deal with the cases of: SEW > FLEN.
2240 *
2241 * If SEW < FLEN, check whether input fp register is a valid
2242 * NaN-boxed value, in which case the least-significant SEW bits
2243 * of the f regsiter are used, else the canonical NaN value is used.
2244 */
2245 static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2246 {
2247 switch (s->sew) {
2248 case 1:
2249 gen_check_nanbox_h(out, in);
2250 break;
2251 case 2:
2252 gen_check_nanbox_s(out, in);
2253 break;
2254 case 3:
2255 tcg_gen_mov_i64(out, in);
2256 break;
2257 default:
2258 g_assert_not_reached();
2259 }
2260 }
2261
2262 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2263
2264 /*
2265 * If the current SEW does not correspond to a supported IEEE floating-point
2266 * type, an illegal instruction exception is raised.
2267 */
2268 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2269 {
2270 return require_rvv(s) &&
2271 require_rvf(s) &&
2272 vext_check_isa_ill(s) &&
2273 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2274 }
2275
2276 /* OPFVV without GVEC IR */
2277 #define GEN_OPFVV_TRANS(NAME, CHECK) \
2278 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2279 { \
2280 if (CHECK(s, a)) { \
2281 uint32_t data = 0; \
2282 static gen_helper_gvec_4_ptr * const fns[3] = { \
2283 gen_helper_##NAME##_h, \
2284 gen_helper_##NAME##_w, \
2285 gen_helper_##NAME##_d, \
2286 }; \
2287 TCGLabel *over = gen_new_label(); \
2288 gen_set_rm(s, RISCV_FRM_DYN); \
2289 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2290 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2291 \
2292 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2293 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2294 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2295 data = \
2296 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2297 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2298 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2299 vreg_ofs(s, a->rs1), \
2300 vreg_ofs(s, a->rs2), cpu_env, \
2301 s->cfg_ptr->vlen / 8, \
2302 s->cfg_ptr->vlen / 8, data, \
2303 fns[s->sew - 1]); \
2304 mark_vs_dirty(s); \
2305 gen_set_label(over); \
2306 return true; \
2307 } \
2308 return false; \
2309 }
2310 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2311 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2312
2313 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2314 TCGv_env, TCGv_i32);
2315
2316 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2317 uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2318 {
2319 TCGv_ptr dest, src2, mask;
2320 TCGv_i32 desc;
2321 TCGv_i64 t1;
2322
2323 TCGLabel *over = gen_new_label();
2324 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2325 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2326
2327 dest = tcg_temp_new_ptr();
2328 mask = tcg_temp_new_ptr();
2329 src2 = tcg_temp_new_ptr();
2330 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2331 s->cfg_ptr->vlen / 8, data));
2332
2333 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
2334 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
2335 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2336
2337 /* NaN-box f[rs1] */
2338 t1 = tcg_temp_new_i64();
2339 do_nanbox(s, t1, cpu_fpr[rs1]);
2340
2341 fn(dest, mask, t1, src2, cpu_env, desc);
2342
2343 mark_vs_dirty(s);
2344 gen_set_label(over);
2345 return true;
2346 }
2347
2348 /*
2349 * If the current SEW does not correspond to a supported IEEE floating-point
2350 * type, an illegal instruction exception is raised
2351 */
2352 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2353 {
2354 return require_rvv(s) &&
2355 require_rvf(s) &&
2356 vext_check_isa_ill(s) &&
2357 vext_check_ss(s, a->rd, a->rs2, a->vm);
2358 }
2359
2360 /* OPFVF without GVEC IR */
2361 #define GEN_OPFVF_TRANS(NAME, CHECK) \
2362 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2363 { \
2364 if (CHECK(s, a)) { \
2365 uint32_t data = 0; \
2366 static gen_helper_opfvf *const fns[3] = { \
2367 gen_helper_##NAME##_h, \
2368 gen_helper_##NAME##_w, \
2369 gen_helper_##NAME##_d, \
2370 }; \
2371 gen_set_rm(s, RISCV_FRM_DYN); \
2372 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2373 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2374 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2375 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, \
2376 s->cfg_vta_all_1s); \
2377 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2378 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2379 fns[s->sew - 1], s); \
2380 } \
2381 return false; \
2382 }
2383
2384 GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
2385 GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
2386 GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
2387
2388 /* Vector Widening Floating-Point Add/Subtract Instructions */
2389 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2390 {
2391 return require_rvv(s) &&
2392 require_scale_rvf(s) &&
2393 (s->sew != MO_8) &&
2394 vext_check_isa_ill(s) &&
2395 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2396 }
2397
2398 /* OPFVV with WIDEN */
2399 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
2400 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2401 { \
2402 if (CHECK(s, a)) { \
2403 uint32_t data = 0; \
2404 static gen_helper_gvec_4_ptr * const fns[2] = { \
2405 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2406 }; \
2407 TCGLabel *over = gen_new_label(); \
2408 gen_set_rm(s, RISCV_FRM_DYN); \
2409 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2410 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\
2411 \
2412 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2413 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2414 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2415 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2416 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2417 vreg_ofs(s, a->rs1), \
2418 vreg_ofs(s, a->rs2), cpu_env, \
2419 s->cfg_ptr->vlen / 8, \
2420 s->cfg_ptr->vlen / 8, data, \
2421 fns[s->sew - 1]); \
2422 mark_vs_dirty(s); \
2423 gen_set_label(over); \
2424 return true; \
2425 } \
2426 return false; \
2427 }
2428
2429 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2430 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2431
2432 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2433 {
2434 return require_rvv(s) &&
2435 require_scale_rvf(s) &&
2436 (s->sew != MO_8) &&
2437 vext_check_isa_ill(s) &&
2438 vext_check_ds(s, a->rd, a->rs2, a->vm);
2439 }
2440
2441 /* OPFVF with WIDEN */
2442 #define GEN_OPFVF_WIDEN_TRANS(NAME) \
2443 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2444 { \
2445 if (opfvf_widen_check(s, a)) { \
2446 uint32_t data = 0; \
2447 static gen_helper_opfvf *const fns[2] = { \
2448 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2449 }; \
2450 gen_set_rm(s, RISCV_FRM_DYN); \
2451 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2452 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2453 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2454 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2455 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2456 fns[s->sew - 1], s); \
2457 } \
2458 return false; \
2459 }
2460
2461 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2462 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2463
2464 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2465 {
2466 return require_rvv(s) &&
2467 require_scale_rvf(s) &&
2468 (s->sew != MO_8) &&
2469 vext_check_isa_ill(s) &&
2470 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2471 }
2472
2473 /* WIDEN OPFVV with WIDEN */
2474 #define GEN_OPFWV_WIDEN_TRANS(NAME) \
2475 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2476 { \
2477 if (opfwv_widen_check(s, a)) { \
2478 uint32_t data = 0; \
2479 static gen_helper_gvec_4_ptr * const fns[2] = { \
2480 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2481 }; \
2482 TCGLabel *over = gen_new_label(); \
2483 gen_set_rm(s, RISCV_FRM_DYN); \
2484 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2485 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2486 \
2487 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2488 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2489 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2490 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2491 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2492 vreg_ofs(s, a->rs1), \
2493 vreg_ofs(s, a->rs2), cpu_env, \
2494 s->cfg_ptr->vlen / 8, \
2495 s->cfg_ptr->vlen / 8, data, \
2496 fns[s->sew - 1]); \
2497 mark_vs_dirty(s); \
2498 gen_set_label(over); \
2499 return true; \
2500 } \
2501 return false; \
2502 }
2503
2504 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2505 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2506
2507 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2508 {
2509 return require_rvv(s) &&
2510 require_scale_rvf(s) &&
2511 (s->sew != MO_8) &&
2512 vext_check_isa_ill(s) &&
2513 vext_check_dd(s, a->rd, a->rs2, a->vm);
2514 }
2515
2516 /* WIDEN OPFVF with WIDEN */
2517 #define GEN_OPFWF_WIDEN_TRANS(NAME) \
2518 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2519 { \
2520 if (opfwf_widen_check(s, a)) { \
2521 uint32_t data = 0; \
2522 static gen_helper_opfvf *const fns[2] = { \
2523 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2524 }; \
2525 gen_set_rm(s, RISCV_FRM_DYN); \
2526 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2527 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2528 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2529 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2530 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2531 fns[s->sew - 1], s); \
2532 } \
2533 return false; \
2534 }
2535
2536 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2537 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2538
2539 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2540 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2541 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2542 GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
2543 GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
2544 GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
2545
2546 /* Vector Widening Floating-Point Multiply */
2547 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2548 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2549
2550 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2551 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2552 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2553 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2554 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2555 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2556 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2557 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2558 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2559 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2560 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2561 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2562 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2563 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2564 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2565 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2566 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2567
2568 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2569 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2570 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2571 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2572 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2573 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2574 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2575 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2576 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2577
2578 /* Vector Floating-Point Square-Root Instruction */
2579
2580 /*
2581 * If the current SEW does not correspond to a supported IEEE floating-point
2582 * type, an illegal instruction exception is raised
2583 */
2584 static bool opfv_check(DisasContext *s, arg_rmr *a)
2585 {
2586 return require_rvv(s) &&
2587 require_rvf(s) &&
2588 vext_check_isa_ill(s) &&
2589 /* OPFV instructions ignore vs1 check */
2590 vext_check_ss(s, a->rd, a->rs2, a->vm);
2591 }
2592
2593 static bool do_opfv(DisasContext *s, arg_rmr *a,
2594 gen_helper_gvec_3_ptr *fn,
2595 bool (*checkfn)(DisasContext *, arg_rmr *),
2596 int rm)
2597 {
2598 if (checkfn(s, a)) {
2599 uint32_t data = 0;
2600 TCGLabel *over = gen_new_label();
2601 gen_set_rm_chkfrm(s, rm);
2602 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2603 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2604
2605 data = FIELD_DP32(data, VDATA, VM, a->vm);
2606 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2607 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2608 data = FIELD_DP32(data, VDATA, VMA, s->vma);
2609 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2610 vreg_ofs(s, a->rs2), cpu_env,
2611 s->cfg_ptr->vlen / 8,
2612 s->cfg_ptr->vlen / 8, data, fn);
2613 mark_vs_dirty(s);
2614 gen_set_label(over);
2615 return true;
2616 }
2617 return false;
2618 }
2619
2620 #define GEN_OPFV_TRANS(NAME, CHECK, FRM) \
2621 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2622 { \
2623 static gen_helper_gvec_3_ptr * const fns[3] = { \
2624 gen_helper_##NAME##_h, \
2625 gen_helper_##NAME##_w, \
2626 gen_helper_##NAME##_d \
2627 }; \
2628 return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2629 }
2630
2631 GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
2632 GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
2633 GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN)
2634
2635 /* Vector Floating-Point MIN/MAX Instructions */
2636 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2637 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2638 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2639 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2640
2641 /* Vector Floating-Point Sign-Injection Instructions */
2642 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2643 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2644 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2645 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2646 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2647 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2648
2649 /* Vector Floating-Point Compare Instructions */
2650 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2651 {
2652 return require_rvv(s) &&
2653 require_rvf(s) &&
2654 vext_check_isa_ill(s) &&
2655 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2656 }
2657
2658 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2659 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2660 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2661 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2662
2663 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2664 {
2665 return require_rvv(s) &&
2666 require_rvf(s) &&
2667 vext_check_isa_ill(s) &&
2668 vext_check_ms(s, a->rd, a->rs2);
2669 }
2670
2671 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2672 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2673 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2674 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2675 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2676 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2677
2678 /* Vector Floating-Point Classify Instruction */
2679 GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
2680
2681 /* Vector Floating-Point Merge Instruction */
2682 GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
2683
2684 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2685 {
2686 if (require_rvv(s) &&
2687 require_rvf(s) &&
2688 vext_check_isa_ill(s) &&
2689 require_align(a->rd, s->lmul)) {
2690 gen_set_rm(s, RISCV_FRM_DYN);
2691
2692 TCGv_i64 t1;
2693
2694 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2695 t1 = tcg_temp_new_i64();
2696 /* NaN-box f[rs1] */
2697 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2698
2699 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2700 MAXSZ(s), MAXSZ(s), t1);
2701 mark_vs_dirty(s);
2702 } else {
2703 TCGv_ptr dest;
2704 TCGv_i32 desc;
2705 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2706 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2707 data = FIELD_DP32(data, VDATA, VMA, s->vma);
2708 static gen_helper_vmv_vx * const fns[3] = {
2709 gen_helper_vmv_v_x_h,
2710 gen_helper_vmv_v_x_w,
2711 gen_helper_vmv_v_x_d,
2712 };
2713 TCGLabel *over = gen_new_label();
2714 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2715 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
2716
2717 t1 = tcg_temp_new_i64();
2718 /* NaN-box f[rs1] */
2719 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2720
2721 dest = tcg_temp_new_ptr();
2722 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
2723 s->cfg_ptr->vlen / 8, data));
2724 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2725
2726 fns[s->sew - 1](dest, t1, cpu_env, desc);
2727
2728 mark_vs_dirty(s);
2729 gen_set_label(over);
2730 }
2731 return true;
2732 }
2733 return false;
2734 }
2735
2736 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2737 #define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM) \
2738 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2739 { \
2740 static gen_helper_gvec_3_ptr * const fns[3] = { \
2741 gen_helper_##HELPER##_h, \
2742 gen_helper_##HELPER##_w, \
2743 gen_helper_##HELPER##_d \
2744 }; \
2745 return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2746 }
2747
2748 GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
2749 GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
2750 GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
2751 GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
2752 /* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
2753 GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
2754 GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
2755
2756 /* Widening Floating-Point/Integer Type-Convert Instructions */
2757
2758 /*
2759 * If the current SEW does not correspond to a supported IEEE floating-point
2760 * type, an illegal instruction exception is raised
2761 */
2762 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2763 {
2764 return require_rvv(s) &&
2765 vext_check_isa_ill(s) &&
2766 vext_check_ds(s, a->rd, a->rs2, a->vm);
2767 }
2768
2769 static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
2770 {
2771 return opfv_widen_check(s, a) &&
2772 require_rvf(s);
2773 }
2774
2775 static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
2776 {
2777 return opfv_widen_check(s, a) &&
2778 require_scale_rvfmin(s) &&
2779 (s->sew != MO_8);
2780 }
2781
2782 #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
2783 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2784 { \
2785 if (CHECK(s, a)) { \
2786 uint32_t data = 0; \
2787 static gen_helper_gvec_3_ptr * const fns[2] = { \
2788 gen_helper_##HELPER##_h, \
2789 gen_helper_##HELPER##_w, \
2790 }; \
2791 TCGLabel *over = gen_new_label(); \
2792 gen_set_rm_chkfrm(s, FRM); \
2793 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2794 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2795 \
2796 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2797 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2798 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2799 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2800 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2801 vreg_ofs(s, a->rs2), cpu_env, \
2802 s->cfg_ptr->vlen / 8, \
2803 s->cfg_ptr->vlen / 8, data, \
2804 fns[s->sew - 1]); \
2805 mark_vs_dirty(s); \
2806 gen_set_label(over); \
2807 return true; \
2808 } \
2809 return false; \
2810 }
2811
2812 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2813 RISCV_FRM_DYN)
2814 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2815 RISCV_FRM_DYN)
2816 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
2817 RISCV_FRM_DYN)
2818 /* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
2819 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2820 RISCV_FRM_RTZ)
2821 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2822 RISCV_FRM_RTZ)
2823
2824 static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2825 {
2826 return require_rvv(s) &&
2827 require_scale_rvf(s) &&
2828 vext_check_isa_ill(s) &&
2829 /* OPFV widening instructions ignore vs1 check */
2830 vext_check_ds(s, a->rd, a->rs2, a->vm);
2831 }
2832
2833 #define GEN_OPFXV_WIDEN_TRANS(NAME) \
2834 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2835 { \
2836 if (opfxv_widen_check(s, a)) { \
2837 uint32_t data = 0; \
2838 static gen_helper_gvec_3_ptr * const fns[3] = { \
2839 gen_helper_##NAME##_b, \
2840 gen_helper_##NAME##_h, \
2841 gen_helper_##NAME##_w, \
2842 }; \
2843 TCGLabel *over = gen_new_label(); \
2844 gen_set_rm(s, RISCV_FRM_DYN); \
2845 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2846 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2847 \
2848 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2849 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2850 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2851 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2852 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2853 vreg_ofs(s, a->rs2), cpu_env, \
2854 s->cfg_ptr->vlen / 8, \
2855 s->cfg_ptr->vlen / 8, data, \
2856 fns[s->sew]); \
2857 mark_vs_dirty(s); \
2858 gen_set_label(over); \
2859 return true; \
2860 } \
2861 return false; \
2862 }
2863
2864 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
2865 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
2866
2867 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2868
2869 /*
2870 * If the current SEW does not correspond to a supported IEEE floating-point
2871 * type, an illegal instruction exception is raised
2872 */
2873 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2874 {
2875 return require_rvv(s) &&
2876 vext_check_isa_ill(s) &&
2877 /* OPFV narrowing instructions ignore vs1 check */
2878 vext_check_sd(s, a->rd, a->rs2, a->vm);
2879 }
2880
2881 static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
2882 {
2883 return opfv_narrow_check(s, a) &&
2884 require_rvf(s) &&
2885 (s->sew != MO_64);
2886 }
2887
2888 static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
2889 {
2890 return opfv_narrow_check(s, a) &&
2891 require_scale_rvfmin(s) &&
2892 (s->sew != MO_8);
2893 }
2894
2895 static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
2896 {
2897 return opfv_narrow_check(s, a) &&
2898 require_scale_rvf(s) &&
2899 (s->sew != MO_8);
2900 }
2901
2902 #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
2903 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2904 { \
2905 if (CHECK(s, a)) { \
2906 uint32_t data = 0; \
2907 static gen_helper_gvec_3_ptr * const fns[2] = { \
2908 gen_helper_##HELPER##_h, \
2909 gen_helper_##HELPER##_w, \
2910 }; \
2911 TCGLabel *over = gen_new_label(); \
2912 gen_set_rm_chkfrm(s, FRM); \
2913 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2914 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2915 \
2916 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2917 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2918 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2919 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2920 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2921 vreg_ofs(s, a->rs2), cpu_env, \
2922 s->cfg_ptr->vlen / 8, \
2923 s->cfg_ptr->vlen / 8, data, \
2924 fns[s->sew - 1]); \
2925 mark_vs_dirty(s); \
2926 gen_set_label(over); \
2927 return true; \
2928 } \
2929 return false; \
2930 }
2931
2932 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
2933 RISCV_FRM_DYN)
2934 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
2935 RISCV_FRM_DYN)
2936 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
2937 RISCV_FRM_DYN)
2938 /* Reuse the helper function from vfncvt.f.f.w */
2939 GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
2940 RISCV_FRM_ROD)
2941
2942 static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2943 {
2944 return require_rvv(s) &&
2945 require_scale_rvf(s) &&
2946 vext_check_isa_ill(s) &&
2947 /* OPFV narrowing instructions ignore vs1 check */
2948 vext_check_sd(s, a->rd, a->rs2, a->vm);
2949 }
2950
2951 #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
2952 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2953 { \
2954 if (opxfv_narrow_check(s, a)) { \
2955 uint32_t data = 0; \
2956 static gen_helper_gvec_3_ptr * const fns[3] = { \
2957 gen_helper_##HELPER##_b, \
2958 gen_helper_##HELPER##_h, \
2959 gen_helper_##HELPER##_w, \
2960 }; \
2961 TCGLabel *over = gen_new_label(); \
2962 gen_set_rm_chkfrm(s, FRM); \
2963 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2964 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
2965 \
2966 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2967 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2968 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2969 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2970 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2971 vreg_ofs(s, a->rs2), cpu_env, \
2972 s->cfg_ptr->vlen / 8, \
2973 s->cfg_ptr->vlen / 8, data, \
2974 fns[s->sew]); \
2975 mark_vs_dirty(s); \
2976 gen_set_label(over); \
2977 return true; \
2978 } \
2979 return false; \
2980 }
2981
2982 GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
2983 GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
2984 /* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
2985 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
2986 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
2987
2988 /*
2989 *** Vector Reduction Operations
2990 */
2991 /* Vector Single-Width Integer Reduction Instructions */
2992 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2993 {
2994 return require_rvv(s) &&
2995 vext_check_isa_ill(s) &&
2996 vext_check_reduction(s, a->rs2);
2997 }
2998
2999 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
3000 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
3001 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
3002 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
3003 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
3004 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
3005 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
3006 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
3007
3008 /* Vector Widening Integer Reduction Instructions */
3009 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
3010 {
3011 return reduction_check(s, a) && (s->sew < MO_64) &&
3012 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4));
3013 }
3014
3015 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
3016 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
3017
3018 /* Vector Single-Width Floating-Point Reduction Instructions */
3019 static bool freduction_check(DisasContext *s, arg_rmrr *a)
3020 {
3021 return reduction_check(s, a) &&
3022 require_rvf(s);
3023 }
3024
3025 GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
3026 GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
3027 GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
3028 GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
3029
3030 /* Vector Widening Floating-Point Reduction Instructions */
3031 static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
3032 {
3033 return reduction_widen_check(s, a) &&
3034 require_scale_rvf(s) &&
3035 (s->sew != MO_8);
3036 }
3037
3038 GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
3039 GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
3040
3041 /*
3042 *** Vector Mask Operations
3043 */
3044
3045 /* Vector Mask-Register Logical Instructions */
3046 #define GEN_MM_TRANS(NAME) \
3047 static bool trans_##NAME(DisasContext *s, arg_r *a) \
3048 { \
3049 if (require_rvv(s) && \
3050 vext_check_isa_ill(s)) { \
3051 uint32_t data = 0; \
3052 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
3053 TCGLabel *over = gen_new_label(); \
3054 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
3055 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
3056 \
3057 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
3058 data = \
3059 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3060 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
3061 vreg_ofs(s, a->rs1), \
3062 vreg_ofs(s, a->rs2), cpu_env, \
3063 s->cfg_ptr->vlen / 8, \
3064 s->cfg_ptr->vlen / 8, data, fn); \
3065 mark_vs_dirty(s); \
3066 gen_set_label(over); \
3067 return true; \
3068 } \
3069 return false; \
3070 }
3071
3072 GEN_MM_TRANS(vmand_mm)
3073 GEN_MM_TRANS(vmnand_mm)
3074 GEN_MM_TRANS(vmandn_mm)
3075 GEN_MM_TRANS(vmxor_mm)
3076 GEN_MM_TRANS(vmor_mm)
3077 GEN_MM_TRANS(vmnor_mm)
3078 GEN_MM_TRANS(vmorn_mm)
3079 GEN_MM_TRANS(vmxnor_mm)
3080
3081 /* Vector count population in mask vcpop */
3082 static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
3083 {
3084 if (require_rvv(s) &&
3085 vext_check_isa_ill(s) &&
3086 s->vstart_eq_zero) {
3087 TCGv_ptr src2, mask;
3088 TCGv dst;
3089 TCGv_i32 desc;
3090 uint32_t data = 0;
3091 data = FIELD_DP32(data, VDATA, VM, a->vm);
3092 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3093
3094 mask = tcg_temp_new_ptr();
3095 src2 = tcg_temp_new_ptr();
3096 dst = dest_gpr(s, a->rd);
3097 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
3098 s->cfg_ptr->vlen / 8, data));
3099
3100 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
3101 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
3102
3103 gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc);
3104 gen_set_gpr(s, a->rd, dst);
3105 return true;
3106 }
3107 return false;
3108 }
3109
3110 /* vmfirst find-first-set mask bit */
3111 static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
3112 {
3113 if (require_rvv(s) &&
3114 vext_check_isa_ill(s) &&
3115 s->vstart_eq_zero) {
3116 TCGv_ptr src2, mask;
3117 TCGv dst;
3118 TCGv_i32 desc;
3119 uint32_t data = 0;
3120 data = FIELD_DP32(data, VDATA, VM, a->vm);
3121 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3122
3123 mask = tcg_temp_new_ptr();
3124 src2 = tcg_temp_new_ptr();
3125 dst = dest_gpr(s, a->rd);
3126 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
3127 s->cfg_ptr->vlen / 8, data));
3128
3129 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
3130 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
3131
3132 gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc);
3133 gen_set_gpr(s, a->rd, dst);
3134 return true;
3135 }
3136 return false;
3137 }
3138
3139 /*
3140 * vmsbf.m set-before-first mask bit
3141 * vmsif.m set-including-first mask bit
3142 * vmsof.m set-only-first mask bit
3143 */
3144 #define GEN_M_TRANS(NAME) \
3145 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3146 { \
3147 if (require_rvv(s) && \
3148 vext_check_isa_ill(s) && \
3149 require_vm(a->vm, a->rd) && \
3150 (a->rd != a->rs2) && \
3151 s->vstart_eq_zero) { \
3152 uint32_t data = 0; \
3153 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
3154 TCGLabel *over = gen_new_label(); \
3155 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
3156 \
3157 data = FIELD_DP32(data, VDATA, VM, a->vm); \
3158 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
3159 data = \
3160 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3161 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
3162 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
3163 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
3164 cpu_env, s->cfg_ptr->vlen / 8, \
3165 s->cfg_ptr->vlen / 8, \
3166 data, fn); \
3167 mark_vs_dirty(s); \
3168 gen_set_label(over); \
3169 return true; \
3170 } \
3171 return false; \
3172 }
3173
3174 GEN_M_TRANS(vmsbf_m)
3175 GEN_M_TRANS(vmsif_m)
3176 GEN_M_TRANS(vmsof_m)
3177
3178 /*
3179 * Vector Iota Instruction
3180 *
3181 * 1. The destination register cannot overlap the source register.
3182 * 2. If masked, cannot overlap the mask register ('v0').
3183 * 3. An illegal instruction exception is raised if vstart is non-zero.
3184 */
3185 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
3186 {
3187 if (require_rvv(s) &&
3188 vext_check_isa_ill(s) &&
3189 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
3190 require_vm(a->vm, a->rd) &&
3191 require_align(a->rd, s->lmul) &&
3192 s->vstart_eq_zero) {
3193 uint32_t data = 0;
3194 TCGLabel *over = gen_new_label();
3195 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3196
3197 data = FIELD_DP32(data, VDATA, VM, a->vm);
3198 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3199 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3200 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3201 static gen_helper_gvec_3_ptr * const fns[4] = {
3202 gen_helper_viota_m_b, gen_helper_viota_m_h,
3203 gen_helper_viota_m_w, gen_helper_viota_m_d,
3204 };
3205 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3206 vreg_ofs(s, a->rs2), cpu_env,
3207 s->cfg_ptr->vlen / 8,
3208 s->cfg_ptr->vlen / 8, data, fns[s->sew]);
3209 mark_vs_dirty(s);
3210 gen_set_label(over);
3211 return true;
3212 }
3213 return false;
3214 }
3215
3216 /* Vector Element Index Instruction */
3217 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
3218 {
3219 if (require_rvv(s) &&
3220 vext_check_isa_ill(s) &&
3221 require_align(a->rd, s->lmul) &&
3222 require_vm(a->vm, a->rd)) {
3223 uint32_t data = 0;
3224 TCGLabel *over = gen_new_label();
3225 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3226 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3227
3228 data = FIELD_DP32(data, VDATA, VM, a->vm);
3229 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3230 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3231 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3232 static gen_helper_gvec_2_ptr * const fns[4] = {
3233 gen_helper_vid_v_b, gen_helper_vid_v_h,
3234 gen_helper_vid_v_w, gen_helper_vid_v_d,
3235 };
3236 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3237 cpu_env, s->cfg_ptr->vlen / 8,
3238 s->cfg_ptr->vlen / 8,
3239 data, fns[s->sew]);
3240 mark_vs_dirty(s);
3241 gen_set_label(over);
3242 return true;
3243 }
3244 return false;
3245 }
3246
3247 /*
3248 *** Vector Permutation Instructions
3249 */
3250
3251 static void load_element(TCGv_i64 dest, TCGv_ptr base,
3252 int ofs, int sew, bool sign)
3253 {
3254 switch (sew) {
3255 case MO_8:
3256 if (!sign) {
3257 tcg_gen_ld8u_i64(dest, base, ofs);
3258 } else {
3259 tcg_gen_ld8s_i64(dest, base, ofs);
3260 }
3261 break;
3262 case MO_16:
3263 if (!sign) {
3264 tcg_gen_ld16u_i64(dest, base, ofs);
3265 } else {
3266 tcg_gen_ld16s_i64(dest, base, ofs);
3267 }
3268 break;
3269 case MO_32:
3270 if (!sign) {
3271 tcg_gen_ld32u_i64(dest, base, ofs);
3272 } else {
3273 tcg_gen_ld32s_i64(dest, base, ofs);
3274 }
3275 break;
3276 case MO_64:
3277 tcg_gen_ld_i64(dest, base, ofs);
3278 break;
3279 default:
3280 g_assert_not_reached();
3281 break;
3282 }
3283 }
3284
3285 /* offset of the idx element with base regsiter r */
3286 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
3287 {
3288 #if HOST_BIG_ENDIAN
3289 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
3290 #else
3291 return vreg_ofs(s, r) + (idx << s->sew);
3292 #endif
3293 }
3294
3295 /* adjust the index according to the endian */
3296 static void endian_adjust(TCGv_i32 ofs, int sew)
3297 {
3298 #if HOST_BIG_ENDIAN
3299 tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
3300 #endif
3301 }
3302
3303 /* Load idx >= VLMAX ? 0 : vreg[idx] */
3304 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
3305 int vreg, TCGv idx, int vlmax)
3306 {
3307 TCGv_i32 ofs = tcg_temp_new_i32();
3308 TCGv_ptr base = tcg_temp_new_ptr();
3309 TCGv_i64 t_idx = tcg_temp_new_i64();
3310 TCGv_i64 t_vlmax, t_zero;
3311
3312 /*
3313 * Mask the index to the length so that we do
3314 * not produce an out-of-range load.
3315 */
3316 tcg_gen_trunc_tl_i32(ofs, idx);
3317 tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
3318
3319 /* Convert the index to an offset. */
3320 endian_adjust(ofs, s->sew);
3321 tcg_gen_shli_i32(ofs, ofs, s->sew);
3322
3323 /* Convert the index to a pointer. */
3324 tcg_gen_ext_i32_ptr(base, ofs);
3325 tcg_gen_add_ptr(base, base, cpu_env);
3326
3327 /* Perform the load. */
3328 load_element(dest, base,
3329 vreg_ofs(s, vreg), s->sew, false);
3330
3331 /* Flush out-of-range indexing to zero. */
3332 t_vlmax = tcg_constant_i64(vlmax);
3333 t_zero = tcg_constant_i64(0);
3334 tcg_gen_extu_tl_i64(t_idx, idx);
3335
3336 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
3337 t_vlmax, dest, t_zero);
3338 }
3339
3340 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
3341 int vreg, int idx, bool sign)
3342 {
3343 load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign);
3344 }
3345
3346 /* Integer Scalar Move Instruction */
3347
3348 static void store_element(TCGv_i64 val, TCGv_ptr base,
3349 int ofs, int sew)
3350 {
3351 switch (sew) {
3352 case MO_8:
3353 tcg_gen_st8_i64(val, base, ofs);
3354 break;
3355 case MO_16:
3356 tcg_gen_st16_i64(val, base, ofs);
3357 break;
3358 case MO_32:
3359 tcg_gen_st32_i64(val, base, ofs);
3360 break;
3361 case MO_64:
3362 tcg_gen_st_i64(val, base, ofs);
3363 break;
3364 default:
3365 g_assert_not_reached();
3366 break;
3367 }
3368 }
3369
3370 /*
3371 * Store vreg[idx] = val.
3372 * The index must be in range of VLMAX.
3373 */
3374 static void vec_element_storei(DisasContext *s, int vreg,
3375 int idx, TCGv_i64 val)
3376 {
3377 store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
3378 }
3379
3380 /* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3381 static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3382 {
3383 if (require_rvv(s) &&
3384 vext_check_isa_ill(s)) {
3385 TCGv_i64 t1;
3386 TCGv dest;
3387
3388 t1 = tcg_temp_new_i64();
3389 dest = tcg_temp_new();
3390 /*
3391 * load vreg and sign-extend to 64 bits,
3392 * then truncate to XLEN bits before storing to gpr.
3393 */
3394 vec_element_loadi(s, t1, a->rs2, 0, true);
3395 tcg_gen_trunc_i64_tl(dest, t1);
3396 gen_set_gpr(s, a->rd, dest);
3397 return true;
3398 }
3399 return false;
3400 }
3401
3402 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
3403 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3404 {
3405 if (require_rvv(s) &&
3406 vext_check_isa_ill(s)) {
3407 /* This instruction ignores LMUL and vector register groups */
3408 TCGv_i64 t1;
3409 TCGv s1;
3410 TCGLabel *over = gen_new_label();
3411
3412 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3413 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3414
3415 t1 = tcg_temp_new_i64();
3416
3417 /*
3418 * load gpr and sign-extend to 64 bits,
3419 * then truncate to SEW bits when storing to vreg.
3420 */
3421 s1 = get_gpr(s, a->rs1, EXT_NONE);
3422 tcg_gen_ext_tl_i64(t1, s1);
3423 vec_element_storei(s, a->rd, 0, t1);
3424 mark_vs_dirty(s);
3425 gen_set_label(over);
3426 return true;
3427 }
3428 return false;
3429 }
3430
3431 /* Floating-Point Scalar Move Instructions */
3432 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3433 {
3434 if (require_rvv(s) &&
3435 require_rvf(s) &&
3436 vext_check_isa_ill(s)) {
3437 gen_set_rm(s, RISCV_FRM_DYN);
3438
3439 unsigned int ofs = (8 << s->sew);
3440 unsigned int len = 64 - ofs;
3441 TCGv_i64 t_nan;
3442
3443 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3444 /* NaN-box f[rd] as necessary for SEW */
3445 if (len) {
3446 t_nan = tcg_constant_i64(UINT64_MAX);
3447 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3448 t_nan, ofs, len);
3449 }
3450
3451 mark_fs_dirty(s);
3452 return true;
3453 }
3454 return false;
3455 }
3456
3457 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3458 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3459 {
3460 if (require_rvv(s) &&
3461 require_rvf(s) &&
3462 vext_check_isa_ill(s)) {
3463 gen_set_rm(s, RISCV_FRM_DYN);
3464
3465 /* The instructions ignore LMUL and vector register group. */
3466 TCGv_i64 t1;
3467 TCGLabel *over = gen_new_label();
3468
3469 /* if vl == 0 or vstart >= vl, skip vector register write back */
3470 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3471 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3472
3473 /* NaN-box f[rs1] */
3474 t1 = tcg_temp_new_i64();
3475 do_nanbox(s, t1, cpu_fpr[a->rs1]);
3476
3477 vec_element_storei(s, a->rd, 0, t1);
3478 mark_vs_dirty(s);
3479 gen_set_label(over);
3480 return true;
3481 }
3482 return false;
3483 }
3484
3485 /* Vector Slide Instructions */
3486 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3487 {
3488 return require_rvv(s) &&
3489 vext_check_isa_ill(s) &&
3490 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3491 }
3492
3493 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3494 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3495 GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3496
3497 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3498 {
3499 return require_rvv(s) &&
3500 vext_check_isa_ill(s) &&
3501 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3502 }
3503
3504 GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3505 GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3506 GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3507
3508 /* Vector Floating-Point Slide Instructions */
3509 static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3510 {
3511 return slideup_check(s, a) &&
3512 require_rvf(s);
3513 }
3514
3515 static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3516 {
3517 return slidedown_check(s, a) &&
3518 require_rvf(s);
3519 }
3520
3521 GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3522 GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3523
3524 /* Vector Register Gather Instruction */
3525 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3526 {
3527 return require_rvv(s) &&
3528 vext_check_isa_ill(s) &&
3529 require_align(a->rd, s->lmul) &&
3530 require_align(a->rs1, s->lmul) &&
3531 require_align(a->rs2, s->lmul) &&
3532 (a->rd != a->rs2 && a->rd != a->rs1) &&
3533 require_vm(a->vm, a->rd);
3534 }
3535
3536 static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3537 {
3538 int8_t emul = MO_16 - s->sew + s->lmul;
3539 return require_rvv(s) &&
3540 vext_check_isa_ill(s) &&
3541 (emul >= -3 && emul <= 3) &&
3542 require_align(a->rd, s->lmul) &&
3543 require_align(a->rs1, emul) &&
3544 require_align(a->rs2, s->lmul) &&
3545 (a->rd != a->rs2 && a->rd != a->rs1) &&
3546 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3547 a->rs1, 1 << MAX(emul, 0)) &&
3548 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3549 a->rs2, 1 << MAX(s->lmul, 0)) &&
3550 require_vm(a->vm, a->rd);
3551 }
3552
3553 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3554 GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3555
3556 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3557 {
3558 return require_rvv(s) &&
3559 vext_check_isa_ill(s) &&
3560 require_align(a->rd, s->lmul) &&
3561 require_align(a->rs2, s->lmul) &&
3562 (a->rd != a->rs2) &&
3563 require_vm(a->vm, a->rd);
3564 }
3565
3566 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3567 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3568 {
3569 if (!vrgather_vx_check(s, a)) {
3570 return false;
3571 }
3572
3573 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3574 int scale = s->lmul - (s->sew + 3);
3575 int vlmax = s->cfg_ptr->vlen >> -scale;
3576 TCGv_i64 dest = tcg_temp_new_i64();
3577
3578 if (a->rs1 == 0) {
3579 vec_element_loadi(s, dest, a->rs2, 0, false);
3580 } else {
3581 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3582 }
3583
3584 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3585 MAXSZ(s), MAXSZ(s), dest);
3586 mark_vs_dirty(s);
3587 } else {
3588 static gen_helper_opivx * const fns[4] = {
3589 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3590 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3591 };
3592 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3593 }
3594 return true;
3595 }
3596
3597 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3598 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3599 {
3600 if (!vrgather_vx_check(s, a)) {
3601 return false;
3602 }
3603
3604 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3605 int scale = s->lmul - (s->sew + 3);
3606 int vlmax = s->cfg_ptr->vlen >> -scale;
3607 if (a->rs1 >= vlmax) {
3608 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3609 MAXSZ(s), MAXSZ(s), 0);
3610 } else {
3611 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3612 endian_ofs(s, a->rs2, a->rs1),
3613 MAXSZ(s), MAXSZ(s));
3614 }
3615 mark_vs_dirty(s);
3616 } else {
3617 static gen_helper_opivx * const fns[4] = {
3618 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3619 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3620 };
3621 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3622 s, IMM_ZX);
3623 }
3624 return true;
3625 }
3626
3627 /*
3628 * Vector Compress Instruction
3629 *
3630 * The destination vector register group cannot overlap the
3631 * source vector register group or the source mask register.
3632 */
3633 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3634 {
3635 return require_rvv(s) &&
3636 vext_check_isa_ill(s) &&
3637 require_align(a->rd, s->lmul) &&
3638 require_align(a->rs2, s->lmul) &&
3639 (a->rd != a->rs2) &&
3640 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
3641 s->vstart_eq_zero;
3642 }
3643
3644 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3645 {
3646 if (vcompress_vm_check(s, a)) {
3647 uint32_t data = 0;
3648 static gen_helper_gvec_4_ptr * const fns[4] = {
3649 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3650 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3651 };
3652 TCGLabel *over = gen_new_label();
3653 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3654
3655 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3656 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3657 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3658 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3659 cpu_env, s->cfg_ptr->vlen / 8,
3660 s->cfg_ptr->vlen / 8, data,
3661 fns[s->sew]);
3662 mark_vs_dirty(s);
3663 gen_set_label(over);
3664 return true;
3665 }
3666 return false;
3667 }
3668
3669 /*
3670 * Whole Vector Register Move Instructions ignore vtype and vl setting.
3671 * Thus, we don't need to check vill bit. (Section 16.6)
3672 */
3673 #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
3674 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
3675 { \
3676 if (require_rvv(s) && \
3677 QEMU_IS_ALIGNED(a->rd, LEN) && \
3678 QEMU_IS_ALIGNED(a->rs2, LEN)) { \
3679 uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \
3680 if (s->vstart_eq_zero) { \
3681 /* EEW = 8 */ \
3682 tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
3683 vreg_ofs(s, a->rs2), maxsz, maxsz); \
3684 mark_vs_dirty(s); \
3685 } else { \
3686 TCGLabel *over = gen_new_label(); \
3687 tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \
3688 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
3689 cpu_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
3690 mark_vs_dirty(s); \
3691 gen_set_label(over); \
3692 } \
3693 return true; \
3694 } \
3695 return false; \
3696 }
3697
3698 GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3699 GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3700 GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3701 GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3702
3703 static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3704 {
3705 uint8_t from = (s->sew + 3) - div;
3706 bool ret = require_rvv(s) &&
3707 (from >= 3 && from <= 8) &&
3708 (a->rd != a->rs2) &&
3709 require_align(a->rd, s->lmul) &&
3710 require_align(a->rs2, s->lmul - div) &&
3711 require_vm(a->vm, a->rd) &&
3712 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3713 return ret;
3714 }
3715
3716 static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3717 {
3718 uint32_t data = 0;
3719 gen_helper_gvec_3_ptr *fn;
3720 TCGLabel *over = gen_new_label();
3721 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3722 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3723
3724 static gen_helper_gvec_3_ptr * const fns[6][4] = {
3725 {
3726 NULL, gen_helper_vzext_vf2_h,
3727 gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3728 },
3729 {
3730 NULL, NULL,
3731 gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3732 },
3733 {
3734 NULL, NULL,
3735 NULL, gen_helper_vzext_vf8_d
3736 },
3737 {
3738 NULL, gen_helper_vsext_vf2_h,
3739 gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3740 },
3741 {
3742 NULL, NULL,
3743 gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3744 },
3745 {
3746 NULL, NULL,
3747 NULL, gen_helper_vsext_vf8_d
3748 }
3749 };
3750
3751 fn = fns[seq][s->sew];
3752 if (fn == NULL) {
3753 return false;
3754 }
3755
3756 data = FIELD_DP32(data, VDATA, VM, a->vm);
3757 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3758 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3759 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3760
3761 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3762 vreg_ofs(s, a->rs2), cpu_env,
3763 s->cfg_ptr->vlen / 8,
3764 s->cfg_ptr->vlen / 8, data, fn);
3765
3766 mark_vs_dirty(s);
3767 gen_set_label(over);
3768 return true;
3769 }
3770
3771 /* Vector Integer Extension */
3772 #define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
3773 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3774 { \
3775 if (int_ext_check(s, a, DIV)) { \
3776 return int_ext_op(s, a, SEQ); \
3777 } \
3778 return false; \
3779 }
3780
3781 GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3782 GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3783 GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3784 GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3785 GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3786 GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)