2 * RISC-V Vector Extension Helpers for QEMU.
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/memop.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "tcg/tcg-gvec-desc.h"
25 #include "internals.h"
28 target_ulong
HELPER(vsetvl
)(CPURISCVState
*env
, target_ulong s1
,
32 RISCVCPU
*cpu
= env_archcpu(env
);
33 uint16_t sew
= 8 << FIELD_EX64(s2
, VTYPE
, VSEW
);
34 uint8_t ediv
= FIELD_EX64(s2
, VTYPE
, VEDIV
);
35 bool vill
= FIELD_EX64(s2
, VTYPE
, VILL
);
36 target_ulong reserved
= FIELD_EX64(s2
, VTYPE
, RESERVED
);
38 if ((sew
> cpu
->cfg
.elen
) || vill
|| (ediv
!= 0) || (reserved
!= 0)) {
39 /* only set vill bit. */
40 env
->vtype
= FIELD_DP64(0, VTYPE
, VILL
, 1);
46 vlmax
= vext_get_vlmax(cpu
, s2
);
59 * Note that vector data is stored in host-endian 64-bit chunks,
60 * so addressing units smaller than that needs a host-endian fixup.
62 #ifdef HOST_WORDS_BIGENDIAN
63 #define H1(x) ((x) ^ 7)
64 #define H1_2(x) ((x) ^ 6)
65 #define H1_4(x) ((x) ^ 4)
66 #define H2(x) ((x) ^ 3)
67 #define H4(x) ((x) ^ 1)
78 static inline uint32_t vext_nf(uint32_t desc
)
80 return FIELD_EX32(simd_data(desc
), VDATA
, NF
);
83 static inline uint32_t vext_mlen(uint32_t desc
)
85 return FIELD_EX32(simd_data(desc
), VDATA
, MLEN
);
88 static inline uint32_t vext_vm(uint32_t desc
)
90 return FIELD_EX32(simd_data(desc
), VDATA
, VM
);
93 static inline uint32_t vext_lmul(uint32_t desc
)
95 return FIELD_EX32(simd_data(desc
), VDATA
, LMUL
);
98 static uint32_t vext_wd(uint32_t desc
)
100 return (simd_data(desc
) >> 11) & 0x1;
104 * Get vector group length in bytes. Its range is [64, 2048].
106 * As simd_desc support at most 256, the max vlen is 512 bits.
107 * So vlen in bytes is encoded as maxsz.
109 static inline uint32_t vext_maxsz(uint32_t desc
)
111 return simd_maxsz(desc
) << vext_lmul(desc
);
115 * This function checks watchpoint before real load operation.
117 * In softmmu mode, the TLB API probe_access is enough for watchpoint check.
118 * In user mode, there is no watchpoint support now.
120 * It will trigger an exception if there is no mapping in TLB
121 * and page table walk can't fill the TLB entry. Then the guest
122 * software can return here after process the exception or never return.
124 static void probe_pages(CPURISCVState
*env
, target_ulong addr
,
125 target_ulong len
, uintptr_t ra
,
126 MMUAccessType access_type
)
128 target_ulong pagelen
= -(addr
| TARGET_PAGE_MASK
);
129 target_ulong curlen
= MIN(pagelen
, len
);
131 probe_access(env
, addr
, curlen
, access_type
,
132 cpu_mmu_index(env
, false), ra
);
135 curlen
= len
- curlen
;
136 probe_access(env
, addr
, curlen
, access_type
,
137 cpu_mmu_index(env
, false), ra
);
141 #ifdef HOST_WORDS_BIGENDIAN
142 static void vext_clear(void *tail
, uint32_t cnt
, uint32_t tot
)
145 * Split the remaining range to two parts.
146 * The first part is in the last uint64_t unit.
147 * The second part start from the next uint64_t unit.
149 int part1
= 0, part2
= tot
- cnt
;
151 part1
= 8 - (cnt
% 8);
152 part2
= tot
- cnt
- part1
;
153 memset((void *)((uintptr_t)tail
& ~(7ULL)), 0, part1
);
154 memset((void *)(((uintptr_t)tail
+ 8) & ~(7ULL)), 0, part2
);
156 memset(tail
, 0, part2
);
160 static void vext_clear(void *tail
, uint32_t cnt
, uint32_t tot
)
162 memset(tail
, 0, tot
- cnt
);
166 static void clearb(void *vd
, uint32_t idx
, uint32_t cnt
, uint32_t tot
)
168 int8_t *cur
= ((int8_t *)vd
+ H1(idx
));
169 vext_clear(cur
, cnt
, tot
);
172 static void clearh(void *vd
, uint32_t idx
, uint32_t cnt
, uint32_t tot
)
174 int16_t *cur
= ((int16_t *)vd
+ H2(idx
));
175 vext_clear(cur
, cnt
, tot
);
178 static void clearl(void *vd
, uint32_t idx
, uint32_t cnt
, uint32_t tot
)
180 int32_t *cur
= ((int32_t *)vd
+ H4(idx
));
181 vext_clear(cur
, cnt
, tot
);
184 static void clearq(void *vd
, uint32_t idx
, uint32_t cnt
, uint32_t tot
)
186 int64_t *cur
= (int64_t *)vd
+ idx
;
187 vext_clear(cur
, cnt
, tot
);
190 static inline void vext_set_elem_mask(void *v0
, int mlen
, int index
,
193 int idx
= (index
* mlen
) / 64;
194 int pos
= (index
* mlen
) % 64;
195 uint64_t old
= ((uint64_t *)v0
)[idx
];
196 ((uint64_t *)v0
)[idx
] = deposit64(old
, pos
, mlen
, value
);
199 static inline int vext_elem_mask(void *v0
, int mlen
, int index
)
201 int idx
= (index
* mlen
) / 64;
202 int pos
= (index
* mlen
) % 64;
203 return (((uint64_t *)v0
)[idx
] >> pos
) & 1;
206 /* elements operations for load and store */
207 typedef void vext_ldst_elem_fn(CPURISCVState
*env
, target_ulong addr
,
208 uint32_t idx
, void *vd
, uintptr_t retaddr
);
209 typedef void clear_fn(void *vd
, uint32_t idx
, uint32_t cnt
, uint32_t tot
);
211 #define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \
212 static void NAME(CPURISCVState *env, abi_ptr addr, \
213 uint32_t idx, void *vd, uintptr_t retaddr)\
216 ETYPE *cur = ((ETYPE *)vd + H(idx)); \
217 data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
221 GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb)
222 GEN_VEXT_LD_ELEM(ldb_h
, int8_t, int16_t, H2
, ldsb
)
223 GEN_VEXT_LD_ELEM(ldb_w
, int8_t, int32_t, H4
, ldsb
)
224 GEN_VEXT_LD_ELEM(ldb_d
, int8_t, int64_t, H8
, ldsb
)
225 GEN_VEXT_LD_ELEM(ldh_h
, int16_t, int16_t, H2
, ldsw
)
226 GEN_VEXT_LD_ELEM(ldh_w
, int16_t, int32_t, H4
, ldsw
)
227 GEN_VEXT_LD_ELEM(ldh_d
, int16_t, int64_t, H8
, ldsw
)
228 GEN_VEXT_LD_ELEM(ldw_w
, int32_t, int32_t, H4
, ldl
)
229 GEN_VEXT_LD_ELEM(ldw_d
, int32_t, int64_t, H8
, ldl
)
230 GEN_VEXT_LD_ELEM(lde_b
, int8_t, int8_t, H1
, ldsb
)
231 GEN_VEXT_LD_ELEM(lde_h
, int16_t, int16_t, H2
, ldsw
)
232 GEN_VEXT_LD_ELEM(lde_w
, int32_t, int32_t, H4
, ldl
)
233 GEN_VEXT_LD_ELEM(lde_d
, int64_t, int64_t, H8
, ldq
)
234 GEN_VEXT_LD_ELEM(ldbu_b
, uint8_t, uint8_t, H1
, ldub
)
235 GEN_VEXT_LD_ELEM(ldbu_h
, uint8_t, uint16_t, H2
, ldub
)
236 GEN_VEXT_LD_ELEM(ldbu_w
, uint8_t, uint32_t, H4
, ldub
)
237 GEN_VEXT_LD_ELEM(ldbu_d
, uint8_t, uint64_t, H8
, ldub
)
238 GEN_VEXT_LD_ELEM(ldhu_h
, uint16_t, uint16_t, H2
, lduw
)
239 GEN_VEXT_LD_ELEM(ldhu_w
, uint16_t, uint32_t, H4
, lduw
)
240 GEN_VEXT_LD_ELEM(ldhu_d
, uint16_t, uint64_t, H8
, lduw
)
241 GEN_VEXT_LD_ELEM(ldwu_w
, uint32_t, uint32_t, H4
, ldl
)
242 GEN_VEXT_LD_ELEM(ldwu_d
, uint32_t, uint64_t, H8
, ldl
)
244 #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
245 static void NAME(CPURISCVState *env, abi_ptr addr, \
246 uint32_t idx, void *vd, uintptr_t retaddr)\
248 ETYPE data = *((ETYPE *)vd + H(idx)); \
249 cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
252 GEN_VEXT_ST_ELEM(stb_b
, int8_t, H1
, stb
)
253 GEN_VEXT_ST_ELEM(stb_h
, int16_t, H2
, stb
)
254 GEN_VEXT_ST_ELEM(stb_w
, int32_t, H4
, stb
)
255 GEN_VEXT_ST_ELEM(stb_d
, int64_t, H8
, stb
)
256 GEN_VEXT_ST_ELEM(sth_h
, int16_t, H2
, stw
)
257 GEN_VEXT_ST_ELEM(sth_w
, int32_t, H4
, stw
)
258 GEN_VEXT_ST_ELEM(sth_d
, int64_t, H8
, stw
)
259 GEN_VEXT_ST_ELEM(stw_w
, int32_t, H4
, stl
)
260 GEN_VEXT_ST_ELEM(stw_d
, int64_t, H8
, stl
)
261 GEN_VEXT_ST_ELEM(ste_b
, int8_t, H1
, stb
)
262 GEN_VEXT_ST_ELEM(ste_h
, int16_t, H2
, stw
)
263 GEN_VEXT_ST_ELEM(ste_w
, int32_t, H4
, stl
)
264 GEN_VEXT_ST_ELEM(ste_d
, int64_t, H8
, stq
)
267 *** stride: access vector element from strided memory
270 vext_ldst_stride(void *vd
, void *v0
, target_ulong base
,
271 target_ulong stride
, CPURISCVState
*env
,
272 uint32_t desc
, uint32_t vm
,
273 vext_ldst_elem_fn
*ldst_elem
, clear_fn
*clear_elem
,
274 uint32_t esz
, uint32_t msz
, uintptr_t ra
,
275 MMUAccessType access_type
)
278 uint32_t nf
= vext_nf(desc
);
279 uint32_t mlen
= vext_mlen(desc
);
280 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
282 /* probe every access*/
283 for (i
= 0; i
< env
->vl
; i
++) {
284 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
287 probe_pages(env
, base
+ stride
* i
, nf
* msz
, ra
, access_type
);
290 for (i
= 0; i
< env
->vl
; i
++) {
292 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
296 target_ulong addr
= base
+ stride
* i
+ k
* msz
;
297 ldst_elem(env
, addr
, i
+ k
* vlmax
, vd
, ra
);
301 /* clear tail elements */
303 for (k
= 0; k
< nf
; k
++) {
304 clear_elem(vd
, env
->vl
+ k
* vlmax
, env
->vl
* esz
, vlmax
* esz
);
309 #define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
310 void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
311 target_ulong stride, CPURISCVState *env, \
314 uint32_t vm = vext_vm(desc); \
315 vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
316 CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
317 GETPC(), MMU_DATA_LOAD); \
320 GEN_VEXT_LD_STRIDE(vlsb_v_b
, int8_t, int8_t, ldb_b
, clearb
)
321 GEN_VEXT_LD_STRIDE(vlsb_v_h
, int8_t, int16_t, ldb_h
, clearh
)
322 GEN_VEXT_LD_STRIDE(vlsb_v_w
, int8_t, int32_t, ldb_w
, clearl
)
323 GEN_VEXT_LD_STRIDE(vlsb_v_d
, int8_t, int64_t, ldb_d
, clearq
)
324 GEN_VEXT_LD_STRIDE(vlsh_v_h
, int16_t, int16_t, ldh_h
, clearh
)
325 GEN_VEXT_LD_STRIDE(vlsh_v_w
, int16_t, int32_t, ldh_w
, clearl
)
326 GEN_VEXT_LD_STRIDE(vlsh_v_d
, int16_t, int64_t, ldh_d
, clearq
)
327 GEN_VEXT_LD_STRIDE(vlsw_v_w
, int32_t, int32_t, ldw_w
, clearl
)
328 GEN_VEXT_LD_STRIDE(vlsw_v_d
, int32_t, int64_t, ldw_d
, clearq
)
329 GEN_VEXT_LD_STRIDE(vlse_v_b
, int8_t, int8_t, lde_b
, clearb
)
330 GEN_VEXT_LD_STRIDE(vlse_v_h
, int16_t, int16_t, lde_h
, clearh
)
331 GEN_VEXT_LD_STRIDE(vlse_v_w
, int32_t, int32_t, lde_w
, clearl
)
332 GEN_VEXT_LD_STRIDE(vlse_v_d
, int64_t, int64_t, lde_d
, clearq
)
333 GEN_VEXT_LD_STRIDE(vlsbu_v_b
, uint8_t, uint8_t, ldbu_b
, clearb
)
334 GEN_VEXT_LD_STRIDE(vlsbu_v_h
, uint8_t, uint16_t, ldbu_h
, clearh
)
335 GEN_VEXT_LD_STRIDE(vlsbu_v_w
, uint8_t, uint32_t, ldbu_w
, clearl
)
336 GEN_VEXT_LD_STRIDE(vlsbu_v_d
, uint8_t, uint64_t, ldbu_d
, clearq
)
337 GEN_VEXT_LD_STRIDE(vlshu_v_h
, uint16_t, uint16_t, ldhu_h
, clearh
)
338 GEN_VEXT_LD_STRIDE(vlshu_v_w
, uint16_t, uint32_t, ldhu_w
, clearl
)
339 GEN_VEXT_LD_STRIDE(vlshu_v_d
, uint16_t, uint64_t, ldhu_d
, clearq
)
340 GEN_VEXT_LD_STRIDE(vlswu_v_w
, uint32_t, uint32_t, ldwu_w
, clearl
)
341 GEN_VEXT_LD_STRIDE(vlswu_v_d
, uint32_t, uint64_t, ldwu_d
, clearq
)
343 #define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \
344 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
345 target_ulong stride, CPURISCVState *env, \
348 uint32_t vm = vext_vm(desc); \
349 vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
350 NULL, sizeof(ETYPE), sizeof(MTYPE), \
351 GETPC(), MMU_DATA_STORE); \
354 GEN_VEXT_ST_STRIDE(vssb_v_b
, int8_t, int8_t, stb_b
)
355 GEN_VEXT_ST_STRIDE(vssb_v_h
, int8_t, int16_t, stb_h
)
356 GEN_VEXT_ST_STRIDE(vssb_v_w
, int8_t, int32_t, stb_w
)
357 GEN_VEXT_ST_STRIDE(vssb_v_d
, int8_t, int64_t, stb_d
)
358 GEN_VEXT_ST_STRIDE(vssh_v_h
, int16_t, int16_t, sth_h
)
359 GEN_VEXT_ST_STRIDE(vssh_v_w
, int16_t, int32_t, sth_w
)
360 GEN_VEXT_ST_STRIDE(vssh_v_d
, int16_t, int64_t, sth_d
)
361 GEN_VEXT_ST_STRIDE(vssw_v_w
, int32_t, int32_t, stw_w
)
362 GEN_VEXT_ST_STRIDE(vssw_v_d
, int32_t, int64_t, stw_d
)
363 GEN_VEXT_ST_STRIDE(vsse_v_b
, int8_t, int8_t, ste_b
)
364 GEN_VEXT_ST_STRIDE(vsse_v_h
, int16_t, int16_t, ste_h
)
365 GEN_VEXT_ST_STRIDE(vsse_v_w
, int32_t, int32_t, ste_w
)
366 GEN_VEXT_ST_STRIDE(vsse_v_d
, int64_t, int64_t, ste_d
)
369 *** unit-stride: access elements stored contiguously in memory
372 /* unmasked unit-stride load and store operation*/
374 vext_ldst_us(void *vd
, target_ulong base
, CPURISCVState
*env
, uint32_t desc
,
375 vext_ldst_elem_fn
*ldst_elem
, clear_fn
*clear_elem
,
376 uint32_t esz
, uint32_t msz
, uintptr_t ra
,
377 MMUAccessType access_type
)
380 uint32_t nf
= vext_nf(desc
);
381 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
383 /* probe every access */
384 probe_pages(env
, base
, env
->vl
* nf
* msz
, ra
, access_type
);
385 /* load bytes from guest memory */
386 for (i
= 0; i
< env
->vl
; i
++) {
389 target_ulong addr
= base
+ (i
* nf
+ k
) * msz
;
390 ldst_elem(env
, addr
, i
+ k
* vlmax
, vd
, ra
);
394 /* clear tail elements */
396 for (k
= 0; k
< nf
; k
++) {
397 clear_elem(vd
, env
->vl
+ k
* vlmax
, env
->vl
* esz
, vlmax
* esz
);
403 * masked unit-stride load and store operation will be a special case of stride,
404 * stride = NF * sizeof (MTYPE)
407 #define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
408 void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
409 CPURISCVState *env, uint32_t desc) \
411 uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
412 vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
413 CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
414 GETPC(), MMU_DATA_LOAD); \
417 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
418 CPURISCVState *env, uint32_t desc) \
420 vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN, \
421 sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \
424 GEN_VEXT_LD_US(vlb_v_b
, int8_t, int8_t, ldb_b
, clearb
)
425 GEN_VEXT_LD_US(vlb_v_h
, int8_t, int16_t, ldb_h
, clearh
)
426 GEN_VEXT_LD_US(vlb_v_w
, int8_t, int32_t, ldb_w
, clearl
)
427 GEN_VEXT_LD_US(vlb_v_d
, int8_t, int64_t, ldb_d
, clearq
)
428 GEN_VEXT_LD_US(vlh_v_h
, int16_t, int16_t, ldh_h
, clearh
)
429 GEN_VEXT_LD_US(vlh_v_w
, int16_t, int32_t, ldh_w
, clearl
)
430 GEN_VEXT_LD_US(vlh_v_d
, int16_t, int64_t, ldh_d
, clearq
)
431 GEN_VEXT_LD_US(vlw_v_w
, int32_t, int32_t, ldw_w
, clearl
)
432 GEN_VEXT_LD_US(vlw_v_d
, int32_t, int64_t, ldw_d
, clearq
)
433 GEN_VEXT_LD_US(vle_v_b
, int8_t, int8_t, lde_b
, clearb
)
434 GEN_VEXT_LD_US(vle_v_h
, int16_t, int16_t, lde_h
, clearh
)
435 GEN_VEXT_LD_US(vle_v_w
, int32_t, int32_t, lde_w
, clearl
)
436 GEN_VEXT_LD_US(vle_v_d
, int64_t, int64_t, lde_d
, clearq
)
437 GEN_VEXT_LD_US(vlbu_v_b
, uint8_t, uint8_t, ldbu_b
, clearb
)
438 GEN_VEXT_LD_US(vlbu_v_h
, uint8_t, uint16_t, ldbu_h
, clearh
)
439 GEN_VEXT_LD_US(vlbu_v_w
, uint8_t, uint32_t, ldbu_w
, clearl
)
440 GEN_VEXT_LD_US(vlbu_v_d
, uint8_t, uint64_t, ldbu_d
, clearq
)
441 GEN_VEXT_LD_US(vlhu_v_h
, uint16_t, uint16_t, ldhu_h
, clearh
)
442 GEN_VEXT_LD_US(vlhu_v_w
, uint16_t, uint32_t, ldhu_w
, clearl
)
443 GEN_VEXT_LD_US(vlhu_v_d
, uint16_t, uint64_t, ldhu_d
, clearq
)
444 GEN_VEXT_LD_US(vlwu_v_w
, uint32_t, uint32_t, ldwu_w
, clearl
)
445 GEN_VEXT_LD_US(vlwu_v_d
, uint32_t, uint64_t, ldwu_d
, clearq
)
447 #define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \
448 void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
449 CPURISCVState *env, uint32_t desc) \
451 uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
452 vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
453 NULL, sizeof(ETYPE), sizeof(MTYPE), \
454 GETPC(), MMU_DATA_STORE); \
457 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
458 CPURISCVState *env, uint32_t desc) \
460 vext_ldst_us(vd, base, env, desc, STORE_FN, NULL, \
461 sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\
464 GEN_VEXT_ST_US(vsb_v_b
, int8_t, int8_t , stb_b
)
465 GEN_VEXT_ST_US(vsb_v_h
, int8_t, int16_t, stb_h
)
466 GEN_VEXT_ST_US(vsb_v_w
, int8_t, int32_t, stb_w
)
467 GEN_VEXT_ST_US(vsb_v_d
, int8_t, int64_t, stb_d
)
468 GEN_VEXT_ST_US(vsh_v_h
, int16_t, int16_t, sth_h
)
469 GEN_VEXT_ST_US(vsh_v_w
, int16_t, int32_t, sth_w
)
470 GEN_VEXT_ST_US(vsh_v_d
, int16_t, int64_t, sth_d
)
471 GEN_VEXT_ST_US(vsw_v_w
, int32_t, int32_t, stw_w
)
472 GEN_VEXT_ST_US(vsw_v_d
, int32_t, int64_t, stw_d
)
473 GEN_VEXT_ST_US(vse_v_b
, int8_t, int8_t , ste_b
)
474 GEN_VEXT_ST_US(vse_v_h
, int16_t, int16_t, ste_h
)
475 GEN_VEXT_ST_US(vse_v_w
, int32_t, int32_t, ste_w
)
476 GEN_VEXT_ST_US(vse_v_d
, int64_t, int64_t, ste_d
)
479 *** index: access vector element from indexed memory
481 typedef target_ulong
vext_get_index_addr(target_ulong base
,
482 uint32_t idx
, void *vs2
);
484 #define GEN_VEXT_GET_INDEX_ADDR(NAME, ETYPE, H) \
485 static target_ulong NAME(target_ulong base, \
486 uint32_t idx, void *vs2) \
488 return (base + *((ETYPE *)vs2 + H(idx))); \
491 GEN_VEXT_GET_INDEX_ADDR(idx_b
, int8_t, H1
)
492 GEN_VEXT_GET_INDEX_ADDR(idx_h
, int16_t, H2
)
493 GEN_VEXT_GET_INDEX_ADDR(idx_w
, int32_t, H4
)
494 GEN_VEXT_GET_INDEX_ADDR(idx_d
, int64_t, H8
)
497 vext_ldst_index(void *vd
, void *v0
, target_ulong base
,
498 void *vs2
, CPURISCVState
*env
, uint32_t desc
,
499 vext_get_index_addr get_index_addr
,
500 vext_ldst_elem_fn
*ldst_elem
,
501 clear_fn
*clear_elem
,
502 uint32_t esz
, uint32_t msz
, uintptr_t ra
,
503 MMUAccessType access_type
)
506 uint32_t nf
= vext_nf(desc
);
507 uint32_t vm
= vext_vm(desc
);
508 uint32_t mlen
= vext_mlen(desc
);
509 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
511 /* probe every access*/
512 for (i
= 0; i
< env
->vl
; i
++) {
513 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
516 probe_pages(env
, get_index_addr(base
, i
, vs2
), nf
* msz
, ra
,
519 /* load bytes from guest memory */
520 for (i
= 0; i
< env
->vl
; i
++) {
522 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
526 abi_ptr addr
= get_index_addr(base
, i
, vs2
) + k
* msz
;
527 ldst_elem(env
, addr
, i
+ k
* vlmax
, vd
, ra
);
531 /* clear tail elements */
533 for (k
= 0; k
< nf
; k
++) {
534 clear_elem(vd
, env
->vl
+ k
* vlmax
, env
->vl
* esz
, vlmax
* esz
);
539 #define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \
540 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
541 void *vs2, CPURISCVState *env, uint32_t desc) \
543 vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
544 LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
545 GETPC(), MMU_DATA_LOAD); \
548 GEN_VEXT_LD_INDEX(vlxb_v_b
, int8_t, int8_t, idx_b
, ldb_b
, clearb
)
549 GEN_VEXT_LD_INDEX(vlxb_v_h
, int8_t, int16_t, idx_h
, ldb_h
, clearh
)
550 GEN_VEXT_LD_INDEX(vlxb_v_w
, int8_t, int32_t, idx_w
, ldb_w
, clearl
)
551 GEN_VEXT_LD_INDEX(vlxb_v_d
, int8_t, int64_t, idx_d
, ldb_d
, clearq
)
552 GEN_VEXT_LD_INDEX(vlxh_v_h
, int16_t, int16_t, idx_h
, ldh_h
, clearh
)
553 GEN_VEXT_LD_INDEX(vlxh_v_w
, int16_t, int32_t, idx_w
, ldh_w
, clearl
)
554 GEN_VEXT_LD_INDEX(vlxh_v_d
, int16_t, int64_t, idx_d
, ldh_d
, clearq
)
555 GEN_VEXT_LD_INDEX(vlxw_v_w
, int32_t, int32_t, idx_w
, ldw_w
, clearl
)
556 GEN_VEXT_LD_INDEX(vlxw_v_d
, int32_t, int64_t, idx_d
, ldw_d
, clearq
)
557 GEN_VEXT_LD_INDEX(vlxe_v_b
, int8_t, int8_t, idx_b
, lde_b
, clearb
)
558 GEN_VEXT_LD_INDEX(vlxe_v_h
, int16_t, int16_t, idx_h
, lde_h
, clearh
)
559 GEN_VEXT_LD_INDEX(vlxe_v_w
, int32_t, int32_t, idx_w
, lde_w
, clearl
)
560 GEN_VEXT_LD_INDEX(vlxe_v_d
, int64_t, int64_t, idx_d
, lde_d
, clearq
)
561 GEN_VEXT_LD_INDEX(vlxbu_v_b
, uint8_t, uint8_t, idx_b
, ldbu_b
, clearb
)
562 GEN_VEXT_LD_INDEX(vlxbu_v_h
, uint8_t, uint16_t, idx_h
, ldbu_h
, clearh
)
563 GEN_VEXT_LD_INDEX(vlxbu_v_w
, uint8_t, uint32_t, idx_w
, ldbu_w
, clearl
)
564 GEN_VEXT_LD_INDEX(vlxbu_v_d
, uint8_t, uint64_t, idx_d
, ldbu_d
, clearq
)
565 GEN_VEXT_LD_INDEX(vlxhu_v_h
, uint16_t, uint16_t, idx_h
, ldhu_h
, clearh
)
566 GEN_VEXT_LD_INDEX(vlxhu_v_w
, uint16_t, uint32_t, idx_w
, ldhu_w
, clearl
)
567 GEN_VEXT_LD_INDEX(vlxhu_v_d
, uint16_t, uint64_t, idx_d
, ldhu_d
, clearq
)
568 GEN_VEXT_LD_INDEX(vlxwu_v_w
, uint32_t, uint32_t, idx_w
, ldwu_w
, clearl
)
569 GEN_VEXT_LD_INDEX(vlxwu_v_d
, uint32_t, uint64_t, idx_d
, ldwu_d
, clearq
)
571 #define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
572 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
573 void *vs2, CPURISCVState *env, uint32_t desc) \
575 vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
576 STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\
577 GETPC(), MMU_DATA_STORE); \
580 GEN_VEXT_ST_INDEX(vsxb_v_b
, int8_t, int8_t, idx_b
, stb_b
)
581 GEN_VEXT_ST_INDEX(vsxb_v_h
, int8_t, int16_t, idx_h
, stb_h
)
582 GEN_VEXT_ST_INDEX(vsxb_v_w
, int8_t, int32_t, idx_w
, stb_w
)
583 GEN_VEXT_ST_INDEX(vsxb_v_d
, int8_t, int64_t, idx_d
, stb_d
)
584 GEN_VEXT_ST_INDEX(vsxh_v_h
, int16_t, int16_t, idx_h
, sth_h
)
585 GEN_VEXT_ST_INDEX(vsxh_v_w
, int16_t, int32_t, idx_w
, sth_w
)
586 GEN_VEXT_ST_INDEX(vsxh_v_d
, int16_t, int64_t, idx_d
, sth_d
)
587 GEN_VEXT_ST_INDEX(vsxw_v_w
, int32_t, int32_t, idx_w
, stw_w
)
588 GEN_VEXT_ST_INDEX(vsxw_v_d
, int32_t, int64_t, idx_d
, stw_d
)
589 GEN_VEXT_ST_INDEX(vsxe_v_b
, int8_t, int8_t, idx_b
, ste_b
)
590 GEN_VEXT_ST_INDEX(vsxe_v_h
, int16_t, int16_t, idx_h
, ste_h
)
591 GEN_VEXT_ST_INDEX(vsxe_v_w
, int32_t, int32_t, idx_w
, ste_w
)
592 GEN_VEXT_ST_INDEX(vsxe_v_d
, int64_t, int64_t, idx_d
, ste_d
)
595 *** unit-stride fault-only-fisrt load instructions
598 vext_ldff(void *vd
, void *v0
, target_ulong base
,
599 CPURISCVState
*env
, uint32_t desc
,
600 vext_ldst_elem_fn
*ldst_elem
,
601 clear_fn
*clear_elem
,
602 uint32_t esz
, uint32_t msz
, uintptr_t ra
)
605 uint32_t i
, k
, vl
= 0;
606 uint32_t mlen
= vext_mlen(desc
);
607 uint32_t nf
= vext_nf(desc
);
608 uint32_t vm
= vext_vm(desc
);
609 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
610 target_ulong addr
, offset
, remain
;
612 /* probe every access*/
613 for (i
= 0; i
< env
->vl
; i
++) {
614 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
617 addr
= base
+ nf
* i
* msz
;
619 probe_pages(env
, addr
, nf
* msz
, ra
, MMU_DATA_LOAD
);
621 /* if it triggers an exception, no need to check watchpoint */
624 offset
= -(addr
| TARGET_PAGE_MASK
);
625 host
= tlb_vaddr_to_host(env
, addr
, MMU_DATA_LOAD
,
626 cpu_mmu_index(env
, false));
628 #ifdef CONFIG_USER_ONLY
629 if (page_check_range(addr
, nf
* msz
, PAGE_READ
) < 0) {
634 probe_pages(env
, addr
, nf
* msz
, ra
, MMU_DATA_LOAD
);
640 if (remain
<= offset
) {
649 /* load bytes from guest memory */
653 for (i
= 0; i
< env
->vl
; i
++) {
655 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
659 target_ulong addr
= base
+ (i
* nf
+ k
) * msz
;
660 ldst_elem(env
, addr
, i
+ k
* vlmax
, vd
, ra
);
664 /* clear tail elements */
668 for (k
= 0; k
< nf
; k
++) {
669 clear_elem(vd
, env
->vl
+ k
* vlmax
, env
->vl
* esz
, vlmax
* esz
);
673 #define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
674 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
675 CPURISCVState *env, uint32_t desc) \
677 vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \
678 sizeof(ETYPE), sizeof(MTYPE), GETPC()); \
681 GEN_VEXT_LDFF(vlbff_v_b
, int8_t, int8_t, ldb_b
, clearb
)
682 GEN_VEXT_LDFF(vlbff_v_h
, int8_t, int16_t, ldb_h
, clearh
)
683 GEN_VEXT_LDFF(vlbff_v_w
, int8_t, int32_t, ldb_w
, clearl
)
684 GEN_VEXT_LDFF(vlbff_v_d
, int8_t, int64_t, ldb_d
, clearq
)
685 GEN_VEXT_LDFF(vlhff_v_h
, int16_t, int16_t, ldh_h
, clearh
)
686 GEN_VEXT_LDFF(vlhff_v_w
, int16_t, int32_t, ldh_w
, clearl
)
687 GEN_VEXT_LDFF(vlhff_v_d
, int16_t, int64_t, ldh_d
, clearq
)
688 GEN_VEXT_LDFF(vlwff_v_w
, int32_t, int32_t, ldw_w
, clearl
)
689 GEN_VEXT_LDFF(vlwff_v_d
, int32_t, int64_t, ldw_d
, clearq
)
690 GEN_VEXT_LDFF(vleff_v_b
, int8_t, int8_t, lde_b
, clearb
)
691 GEN_VEXT_LDFF(vleff_v_h
, int16_t, int16_t, lde_h
, clearh
)
692 GEN_VEXT_LDFF(vleff_v_w
, int32_t, int32_t, lde_w
, clearl
)
693 GEN_VEXT_LDFF(vleff_v_d
, int64_t, int64_t, lde_d
, clearq
)
694 GEN_VEXT_LDFF(vlbuff_v_b
, uint8_t, uint8_t, ldbu_b
, clearb
)
695 GEN_VEXT_LDFF(vlbuff_v_h
, uint8_t, uint16_t, ldbu_h
, clearh
)
696 GEN_VEXT_LDFF(vlbuff_v_w
, uint8_t, uint32_t, ldbu_w
, clearl
)
697 GEN_VEXT_LDFF(vlbuff_v_d
, uint8_t, uint64_t, ldbu_d
, clearq
)
698 GEN_VEXT_LDFF(vlhuff_v_h
, uint16_t, uint16_t, ldhu_h
, clearh
)
699 GEN_VEXT_LDFF(vlhuff_v_w
, uint16_t, uint32_t, ldhu_w
, clearl
)
700 GEN_VEXT_LDFF(vlhuff_v_d
, uint16_t, uint64_t, ldhu_d
, clearq
)
701 GEN_VEXT_LDFF(vlwuff_v_w
, uint32_t, uint32_t, ldwu_w
, clearl
)
702 GEN_VEXT_LDFF(vlwuff_v_d
, uint32_t, uint64_t, ldwu_d
, clearq
)
705 *** Vector AMO Operations (Zvamo)
707 typedef void vext_amo_noatomic_fn(void *vs3
, target_ulong addr
,
708 uint32_t wd
, uint32_t idx
, CPURISCVState
*env
,
711 /* no atomic opreation for vector atomic insructions */
712 #define DO_SWAP(N, M) (M)
713 #define DO_AND(N, M) (N & M)
714 #define DO_XOR(N, M) (N ^ M)
715 #define DO_OR(N, M) (N | M)
716 #define DO_ADD(N, M) (N + M)
718 #define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \
720 vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \
721 uint32_t wd, uint32_t idx, \
722 CPURISCVState *env, uintptr_t retaddr)\
724 typedef int##ESZ##_t ETYPE; \
725 typedef int##MSZ##_t MTYPE; \
726 typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \
727 ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \
728 MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \
730 cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \
737 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
738 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
740 /* Unsigned min/max */
741 #define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
742 #define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
744 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w
, 32, 32, H4
, DO_SWAP
, l
)
745 GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w
, 32, 32, H4
, DO_ADD
, l
)
746 GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w
, 32, 32, H4
, DO_XOR
, l
)
747 GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w
, 32, 32, H4
, DO_AND
, l
)
748 GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w
, 32, 32, H4
, DO_OR
, l
)
749 GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w
, 32, 32, H4
, DO_MIN
, l
)
750 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w
, 32, 32, H4
, DO_MAX
, l
)
751 GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w
, 32, 32, H4
, DO_MINU
, l
)
752 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w
, 32, 32, H4
, DO_MAXU
, l
)
753 #ifdef TARGET_RISCV64
754 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d
, 64, 32, H8
, DO_SWAP
, l
)
755 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d
, 64, 64, H8
, DO_SWAP
, q
)
756 GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d
, 64, 32, H8
, DO_ADD
, l
)
757 GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d
, 64, 64, H8
, DO_ADD
, q
)
758 GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d
, 64, 32, H8
, DO_XOR
, l
)
759 GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d
, 64, 64, H8
, DO_XOR
, q
)
760 GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d
, 64, 32, H8
, DO_AND
, l
)
761 GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d
, 64, 64, H8
, DO_AND
, q
)
762 GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d
, 64, 32, H8
, DO_OR
, l
)
763 GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d
, 64, 64, H8
, DO_OR
, q
)
764 GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d
, 64, 32, H8
, DO_MIN
, l
)
765 GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d
, 64, 64, H8
, DO_MIN
, q
)
766 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d
, 64, 32, H8
, DO_MAX
, l
)
767 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d
, 64, 64, H8
, DO_MAX
, q
)
768 GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d
, 64, 32, H8
, DO_MINU
, l
)
769 GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d
, 64, 64, H8
, DO_MINU
, q
)
770 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d
, 64, 32, H8
, DO_MAXU
, l
)
771 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d
, 64, 64, H8
, DO_MAXU
, q
)
775 vext_amo_noatomic(void *vs3
, void *v0
, target_ulong base
,
776 void *vs2
, CPURISCVState
*env
, uint32_t desc
,
777 vext_get_index_addr get_index_addr
,
778 vext_amo_noatomic_fn
*noatomic_op
,
779 clear_fn
*clear_elem
,
780 uint32_t esz
, uint32_t msz
, uintptr_t ra
)
784 uint32_t wd
= vext_wd(desc
);
785 uint32_t vm
= vext_vm(desc
);
786 uint32_t mlen
= vext_mlen(desc
);
787 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
789 for (i
= 0; i
< env
->vl
; i
++) {
790 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
793 probe_pages(env
, get_index_addr(base
, i
, vs2
), msz
, ra
, MMU_DATA_LOAD
);
794 probe_pages(env
, get_index_addr(base
, i
, vs2
), msz
, ra
, MMU_DATA_STORE
);
796 for (i
= 0; i
< env
->vl
; i
++) {
797 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
800 addr
= get_index_addr(base
, i
, vs2
);
801 noatomic_op(vs3
, addr
, wd
, i
, env
, ra
);
803 clear_elem(vs3
, env
->vl
, env
->vl
* esz
, vlmax
* esz
);
806 #define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \
807 void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \
808 void *vs2, CPURISCVState *env, uint32_t desc) \
810 vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
811 INDEX_FN, vext_##NAME##_noatomic_op, \
812 CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
816 #ifdef TARGET_RISCV64
817 GEN_VEXT_AMO(vamoswapw_v_d
, int32_t, int64_t, idx_d
, clearq
)
818 GEN_VEXT_AMO(vamoswapd_v_d
, int64_t, int64_t, idx_d
, clearq
)
819 GEN_VEXT_AMO(vamoaddw_v_d
, int32_t, int64_t, idx_d
, clearq
)
820 GEN_VEXT_AMO(vamoaddd_v_d
, int64_t, int64_t, idx_d
, clearq
)
821 GEN_VEXT_AMO(vamoxorw_v_d
, int32_t, int64_t, idx_d
, clearq
)
822 GEN_VEXT_AMO(vamoxord_v_d
, int64_t, int64_t, idx_d
, clearq
)
823 GEN_VEXT_AMO(vamoandw_v_d
, int32_t, int64_t, idx_d
, clearq
)
824 GEN_VEXT_AMO(vamoandd_v_d
, int64_t, int64_t, idx_d
, clearq
)
825 GEN_VEXT_AMO(vamoorw_v_d
, int32_t, int64_t, idx_d
, clearq
)
826 GEN_VEXT_AMO(vamoord_v_d
, int64_t, int64_t, idx_d
, clearq
)
827 GEN_VEXT_AMO(vamominw_v_d
, int32_t, int64_t, idx_d
, clearq
)
828 GEN_VEXT_AMO(vamomind_v_d
, int64_t, int64_t, idx_d
, clearq
)
829 GEN_VEXT_AMO(vamomaxw_v_d
, int32_t, int64_t, idx_d
, clearq
)
830 GEN_VEXT_AMO(vamomaxd_v_d
, int64_t, int64_t, idx_d
, clearq
)
831 GEN_VEXT_AMO(vamominuw_v_d
, uint32_t, uint64_t, idx_d
, clearq
)
832 GEN_VEXT_AMO(vamominud_v_d
, uint64_t, uint64_t, idx_d
, clearq
)
833 GEN_VEXT_AMO(vamomaxuw_v_d
, uint32_t, uint64_t, idx_d
, clearq
)
834 GEN_VEXT_AMO(vamomaxud_v_d
, uint64_t, uint64_t, idx_d
, clearq
)
836 GEN_VEXT_AMO(vamoswapw_v_w
, int32_t, int32_t, idx_w
, clearl
)
837 GEN_VEXT_AMO(vamoaddw_v_w
, int32_t, int32_t, idx_w
, clearl
)
838 GEN_VEXT_AMO(vamoxorw_v_w
, int32_t, int32_t, idx_w
, clearl
)
839 GEN_VEXT_AMO(vamoandw_v_w
, int32_t, int32_t, idx_w
, clearl
)
840 GEN_VEXT_AMO(vamoorw_v_w
, int32_t, int32_t, idx_w
, clearl
)
841 GEN_VEXT_AMO(vamominw_v_w
, int32_t, int32_t, idx_w
, clearl
)
842 GEN_VEXT_AMO(vamomaxw_v_w
, int32_t, int32_t, idx_w
, clearl
)
843 GEN_VEXT_AMO(vamominuw_v_w
, uint32_t, uint32_t, idx_w
, clearl
)
844 GEN_VEXT_AMO(vamomaxuw_v_w
, uint32_t, uint32_t, idx_w
, clearl
)
847 *** Vector Integer Arithmetic Instructions
850 /* expand macro args before macro */
851 #define RVVCALL(macro, ...) macro(__VA_ARGS__)
853 /* (TD, T1, T2, TX1, TX2) */
854 #define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
855 #define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
856 #define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
857 #define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
858 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
859 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
860 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
861 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
862 #define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
863 #define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
864 #define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
865 #define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
866 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
867 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
868 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
869 #define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
870 #define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
871 #define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
872 #define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t
873 #define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t
874 #define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t
875 #define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t
876 #define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t
877 #define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t
879 /* operation of two vector elements */
880 typedef void opivv2_fn(void *vd
, void *vs1
, void *vs2
, int i
);
882 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
883 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
885 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
886 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
887 *((TD *)vd + HD(i)) = OP(s2, s1); \
889 #define DO_SUB(N, M) (N - M)
890 #define DO_RSUB(N, M) (M - N)
892 RVVCALL(OPIVV2
, vadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_ADD
)
893 RVVCALL(OPIVV2
, vadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_ADD
)
894 RVVCALL(OPIVV2
, vadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_ADD
)
895 RVVCALL(OPIVV2
, vadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_ADD
)
896 RVVCALL(OPIVV2
, vsub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_SUB
)
897 RVVCALL(OPIVV2
, vsub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_SUB
)
898 RVVCALL(OPIVV2
, vsub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_SUB
)
899 RVVCALL(OPIVV2
, vsub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_SUB
)
901 static void do_vext_vv(void *vd
, void *v0
, void *vs1
, void *vs2
,
902 CPURISCVState
*env
, uint32_t desc
,
903 uint32_t esz
, uint32_t dsz
,
904 opivv2_fn
*fn
, clear_fn
*clearfn
)
906 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
907 uint32_t mlen
= vext_mlen(desc
);
908 uint32_t vm
= vext_vm(desc
);
909 uint32_t vl
= env
->vl
;
912 for (i
= 0; i
< vl
; i
++) {
913 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
918 clearfn(vd
, vl
, vl
* dsz
, vlmax
* dsz
);
921 /* generate the helpers for OPIVV */
922 #define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN) \
923 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
924 void *vs2, CPURISCVState *env, \
927 do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
928 do_##NAME, CLEAR_FN); \
931 GEN_VEXT_VV(vadd_vv_b
, 1, 1, clearb
)
932 GEN_VEXT_VV(vadd_vv_h
, 2, 2, clearh
)
933 GEN_VEXT_VV(vadd_vv_w
, 4, 4, clearl
)
934 GEN_VEXT_VV(vadd_vv_d
, 8, 8, clearq
)
935 GEN_VEXT_VV(vsub_vv_b
, 1, 1, clearb
)
936 GEN_VEXT_VV(vsub_vv_h
, 2, 2, clearh
)
937 GEN_VEXT_VV(vsub_vv_w
, 4, 4, clearl
)
938 GEN_VEXT_VV(vsub_vv_d
, 8, 8, clearq
)
940 typedef void opivx2_fn(void *vd
, target_long s1
, void *vs2
, int i
);
943 * (T1)s1 gives the real operator type.
944 * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
946 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
947 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
949 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
950 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
953 RVVCALL(OPIVX2
, vadd_vx_b
, OP_SSS_B
, H1
, H1
, DO_ADD
)
954 RVVCALL(OPIVX2
, vadd_vx_h
, OP_SSS_H
, H2
, H2
, DO_ADD
)
955 RVVCALL(OPIVX2
, vadd_vx_w
, OP_SSS_W
, H4
, H4
, DO_ADD
)
956 RVVCALL(OPIVX2
, vadd_vx_d
, OP_SSS_D
, H8
, H8
, DO_ADD
)
957 RVVCALL(OPIVX2
, vsub_vx_b
, OP_SSS_B
, H1
, H1
, DO_SUB
)
958 RVVCALL(OPIVX2
, vsub_vx_h
, OP_SSS_H
, H2
, H2
, DO_SUB
)
959 RVVCALL(OPIVX2
, vsub_vx_w
, OP_SSS_W
, H4
, H4
, DO_SUB
)
960 RVVCALL(OPIVX2
, vsub_vx_d
, OP_SSS_D
, H8
, H8
, DO_SUB
)
961 RVVCALL(OPIVX2
, vrsub_vx_b
, OP_SSS_B
, H1
, H1
, DO_RSUB
)
962 RVVCALL(OPIVX2
, vrsub_vx_h
, OP_SSS_H
, H2
, H2
, DO_RSUB
)
963 RVVCALL(OPIVX2
, vrsub_vx_w
, OP_SSS_W
, H4
, H4
, DO_RSUB
)
964 RVVCALL(OPIVX2
, vrsub_vx_d
, OP_SSS_D
, H8
, H8
, DO_RSUB
)
966 static void do_vext_vx(void *vd
, void *v0
, target_long s1
, void *vs2
,
967 CPURISCVState
*env
, uint32_t desc
,
968 uint32_t esz
, uint32_t dsz
,
969 opivx2_fn fn
, clear_fn
*clearfn
)
971 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
972 uint32_t mlen
= vext_mlen(desc
);
973 uint32_t vm
= vext_vm(desc
);
974 uint32_t vl
= env
->vl
;
977 for (i
= 0; i
< vl
; i
++) {
978 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
983 clearfn(vd
, vl
, vl
* dsz
, vlmax
* dsz
);
986 /* generate the helpers for OPIVX */
987 #define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN) \
988 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
989 void *vs2, CPURISCVState *env, \
992 do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
993 do_##NAME, CLEAR_FN); \
996 GEN_VEXT_VX(vadd_vx_b
, 1, 1, clearb
)
997 GEN_VEXT_VX(vadd_vx_h
, 2, 2, clearh
)
998 GEN_VEXT_VX(vadd_vx_w
, 4, 4, clearl
)
999 GEN_VEXT_VX(vadd_vx_d
, 8, 8, clearq
)
1000 GEN_VEXT_VX(vsub_vx_b
, 1, 1, clearb
)
1001 GEN_VEXT_VX(vsub_vx_h
, 2, 2, clearh
)
1002 GEN_VEXT_VX(vsub_vx_w
, 4, 4, clearl
)
1003 GEN_VEXT_VX(vsub_vx_d
, 8, 8, clearq
)
1004 GEN_VEXT_VX(vrsub_vx_b
, 1, 1, clearb
)
1005 GEN_VEXT_VX(vrsub_vx_h
, 2, 2, clearh
)
1006 GEN_VEXT_VX(vrsub_vx_w
, 4, 4, clearl
)
1007 GEN_VEXT_VX(vrsub_vx_d
, 8, 8, clearq
)
1009 void HELPER(vec_rsubs8
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
1011 intptr_t oprsz
= simd_oprsz(desc
);
1014 for (i
= 0; i
< oprsz
; i
+= sizeof(uint8_t)) {
1015 *(uint8_t *)(d
+ i
) = (uint8_t)b
- *(uint8_t *)(a
+ i
);
1019 void HELPER(vec_rsubs16
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
1021 intptr_t oprsz
= simd_oprsz(desc
);
1024 for (i
= 0; i
< oprsz
; i
+= sizeof(uint16_t)) {
1025 *(uint16_t *)(d
+ i
) = (uint16_t)b
- *(uint16_t *)(a
+ i
);
1029 void HELPER(vec_rsubs32
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
1031 intptr_t oprsz
= simd_oprsz(desc
);
1034 for (i
= 0; i
< oprsz
; i
+= sizeof(uint32_t)) {
1035 *(uint32_t *)(d
+ i
) = (uint32_t)b
- *(uint32_t *)(a
+ i
);
1039 void HELPER(vec_rsubs64
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
1041 intptr_t oprsz
= simd_oprsz(desc
);
1044 for (i
= 0; i
< oprsz
; i
+= sizeof(uint64_t)) {
1045 *(uint64_t *)(d
+ i
) = b
- *(uint64_t *)(a
+ i
);
1049 /* Vector Widening Integer Add/Subtract */
1050 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
1051 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
1052 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
1053 #define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
1054 #define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
1055 #define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
1056 #define WOP_WUUU_B uint16_t, uint8_t, uint16_t, uint16_t, uint16_t
1057 #define WOP_WUUU_H uint32_t, uint16_t, uint32_t, uint32_t, uint32_t
1058 #define WOP_WUUU_W uint64_t, uint32_t, uint64_t, uint64_t, uint64_t
1059 #define WOP_WSSS_B int16_t, int8_t, int16_t, int16_t, int16_t
1060 #define WOP_WSSS_H int32_t, int16_t, int32_t, int32_t, int32_t
1061 #define WOP_WSSS_W int64_t, int32_t, int64_t, int64_t, int64_t
1062 RVVCALL(OPIVV2
, vwaddu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_ADD
)
1063 RVVCALL(OPIVV2
, vwaddu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_ADD
)
1064 RVVCALL(OPIVV2
, vwaddu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_ADD
)
1065 RVVCALL(OPIVV2
, vwsubu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_SUB
)
1066 RVVCALL(OPIVV2
, vwsubu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_SUB
)
1067 RVVCALL(OPIVV2
, vwsubu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_SUB
)
1068 RVVCALL(OPIVV2
, vwadd_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_ADD
)
1069 RVVCALL(OPIVV2
, vwadd_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_ADD
)
1070 RVVCALL(OPIVV2
, vwadd_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_ADD
)
1071 RVVCALL(OPIVV2
, vwsub_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_SUB
)
1072 RVVCALL(OPIVV2
, vwsub_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_SUB
)
1073 RVVCALL(OPIVV2
, vwsub_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_SUB
)
1074 RVVCALL(OPIVV2
, vwaddu_wv_b
, WOP_WUUU_B
, H2
, H1
, H1
, DO_ADD
)
1075 RVVCALL(OPIVV2
, vwaddu_wv_h
, WOP_WUUU_H
, H4
, H2
, H2
, DO_ADD
)
1076 RVVCALL(OPIVV2
, vwaddu_wv_w
, WOP_WUUU_W
, H8
, H4
, H4
, DO_ADD
)
1077 RVVCALL(OPIVV2
, vwsubu_wv_b
, WOP_WUUU_B
, H2
, H1
, H1
, DO_SUB
)
1078 RVVCALL(OPIVV2
, vwsubu_wv_h
, WOP_WUUU_H
, H4
, H2
, H2
, DO_SUB
)
1079 RVVCALL(OPIVV2
, vwsubu_wv_w
, WOP_WUUU_W
, H8
, H4
, H4
, DO_SUB
)
1080 RVVCALL(OPIVV2
, vwadd_wv_b
, WOP_WSSS_B
, H2
, H1
, H1
, DO_ADD
)
1081 RVVCALL(OPIVV2
, vwadd_wv_h
, WOP_WSSS_H
, H4
, H2
, H2
, DO_ADD
)
1082 RVVCALL(OPIVV2
, vwadd_wv_w
, WOP_WSSS_W
, H8
, H4
, H4
, DO_ADD
)
1083 RVVCALL(OPIVV2
, vwsub_wv_b
, WOP_WSSS_B
, H2
, H1
, H1
, DO_SUB
)
1084 RVVCALL(OPIVV2
, vwsub_wv_h
, WOP_WSSS_H
, H4
, H2
, H2
, DO_SUB
)
1085 RVVCALL(OPIVV2
, vwsub_wv_w
, WOP_WSSS_W
, H8
, H4
, H4
, DO_SUB
)
1086 GEN_VEXT_VV(vwaddu_vv_b
, 1, 2, clearh
)
1087 GEN_VEXT_VV(vwaddu_vv_h
, 2, 4, clearl
)
1088 GEN_VEXT_VV(vwaddu_vv_w
, 4, 8, clearq
)
1089 GEN_VEXT_VV(vwsubu_vv_b
, 1, 2, clearh
)
1090 GEN_VEXT_VV(vwsubu_vv_h
, 2, 4, clearl
)
1091 GEN_VEXT_VV(vwsubu_vv_w
, 4, 8, clearq
)
1092 GEN_VEXT_VV(vwadd_vv_b
, 1, 2, clearh
)
1093 GEN_VEXT_VV(vwadd_vv_h
, 2, 4, clearl
)
1094 GEN_VEXT_VV(vwadd_vv_w
, 4, 8, clearq
)
1095 GEN_VEXT_VV(vwsub_vv_b
, 1, 2, clearh
)
1096 GEN_VEXT_VV(vwsub_vv_h
, 2, 4, clearl
)
1097 GEN_VEXT_VV(vwsub_vv_w
, 4, 8, clearq
)
1098 GEN_VEXT_VV(vwaddu_wv_b
, 1, 2, clearh
)
1099 GEN_VEXT_VV(vwaddu_wv_h
, 2, 4, clearl
)
1100 GEN_VEXT_VV(vwaddu_wv_w
, 4, 8, clearq
)
1101 GEN_VEXT_VV(vwsubu_wv_b
, 1, 2, clearh
)
1102 GEN_VEXT_VV(vwsubu_wv_h
, 2, 4, clearl
)
1103 GEN_VEXT_VV(vwsubu_wv_w
, 4, 8, clearq
)
1104 GEN_VEXT_VV(vwadd_wv_b
, 1, 2, clearh
)
1105 GEN_VEXT_VV(vwadd_wv_h
, 2, 4, clearl
)
1106 GEN_VEXT_VV(vwadd_wv_w
, 4, 8, clearq
)
1107 GEN_VEXT_VV(vwsub_wv_b
, 1, 2, clearh
)
1108 GEN_VEXT_VV(vwsub_wv_h
, 2, 4, clearl
)
1109 GEN_VEXT_VV(vwsub_wv_w
, 4, 8, clearq
)
1111 RVVCALL(OPIVX2
, vwaddu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_ADD
)
1112 RVVCALL(OPIVX2
, vwaddu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_ADD
)
1113 RVVCALL(OPIVX2
, vwaddu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_ADD
)
1114 RVVCALL(OPIVX2
, vwsubu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_SUB
)
1115 RVVCALL(OPIVX2
, vwsubu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_SUB
)
1116 RVVCALL(OPIVX2
, vwsubu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_SUB
)
1117 RVVCALL(OPIVX2
, vwadd_vx_b
, WOP_SSS_B
, H2
, H1
, DO_ADD
)
1118 RVVCALL(OPIVX2
, vwadd_vx_h
, WOP_SSS_H
, H4
, H2
, DO_ADD
)
1119 RVVCALL(OPIVX2
, vwadd_vx_w
, WOP_SSS_W
, H8
, H4
, DO_ADD
)
1120 RVVCALL(OPIVX2
, vwsub_vx_b
, WOP_SSS_B
, H2
, H1
, DO_SUB
)
1121 RVVCALL(OPIVX2
, vwsub_vx_h
, WOP_SSS_H
, H4
, H2
, DO_SUB
)
1122 RVVCALL(OPIVX2
, vwsub_vx_w
, WOP_SSS_W
, H8
, H4
, DO_SUB
)
1123 RVVCALL(OPIVX2
, vwaddu_wx_b
, WOP_WUUU_B
, H2
, H1
, DO_ADD
)
1124 RVVCALL(OPIVX2
, vwaddu_wx_h
, WOP_WUUU_H
, H4
, H2
, DO_ADD
)
1125 RVVCALL(OPIVX2
, vwaddu_wx_w
, WOP_WUUU_W
, H8
, H4
, DO_ADD
)
1126 RVVCALL(OPIVX2
, vwsubu_wx_b
, WOP_WUUU_B
, H2
, H1
, DO_SUB
)
1127 RVVCALL(OPIVX2
, vwsubu_wx_h
, WOP_WUUU_H
, H4
, H2
, DO_SUB
)
1128 RVVCALL(OPIVX2
, vwsubu_wx_w
, WOP_WUUU_W
, H8
, H4
, DO_SUB
)
1129 RVVCALL(OPIVX2
, vwadd_wx_b
, WOP_WSSS_B
, H2
, H1
, DO_ADD
)
1130 RVVCALL(OPIVX2
, vwadd_wx_h
, WOP_WSSS_H
, H4
, H2
, DO_ADD
)
1131 RVVCALL(OPIVX2
, vwadd_wx_w
, WOP_WSSS_W
, H8
, H4
, DO_ADD
)
1132 RVVCALL(OPIVX2
, vwsub_wx_b
, WOP_WSSS_B
, H2
, H1
, DO_SUB
)
1133 RVVCALL(OPIVX2
, vwsub_wx_h
, WOP_WSSS_H
, H4
, H2
, DO_SUB
)
1134 RVVCALL(OPIVX2
, vwsub_wx_w
, WOP_WSSS_W
, H8
, H4
, DO_SUB
)
1135 GEN_VEXT_VX(vwaddu_vx_b
, 1, 2, clearh
)
1136 GEN_VEXT_VX(vwaddu_vx_h
, 2, 4, clearl
)
1137 GEN_VEXT_VX(vwaddu_vx_w
, 4, 8, clearq
)
1138 GEN_VEXT_VX(vwsubu_vx_b
, 1, 2, clearh
)
1139 GEN_VEXT_VX(vwsubu_vx_h
, 2, 4, clearl
)
1140 GEN_VEXT_VX(vwsubu_vx_w
, 4, 8, clearq
)
1141 GEN_VEXT_VX(vwadd_vx_b
, 1, 2, clearh
)
1142 GEN_VEXT_VX(vwadd_vx_h
, 2, 4, clearl
)
1143 GEN_VEXT_VX(vwadd_vx_w
, 4, 8, clearq
)
1144 GEN_VEXT_VX(vwsub_vx_b
, 1, 2, clearh
)
1145 GEN_VEXT_VX(vwsub_vx_h
, 2, 4, clearl
)
1146 GEN_VEXT_VX(vwsub_vx_w
, 4, 8, clearq
)
1147 GEN_VEXT_VX(vwaddu_wx_b
, 1, 2, clearh
)
1148 GEN_VEXT_VX(vwaddu_wx_h
, 2, 4, clearl
)
1149 GEN_VEXT_VX(vwaddu_wx_w
, 4, 8, clearq
)
1150 GEN_VEXT_VX(vwsubu_wx_b
, 1, 2, clearh
)
1151 GEN_VEXT_VX(vwsubu_wx_h
, 2, 4, clearl
)
1152 GEN_VEXT_VX(vwsubu_wx_w
, 4, 8, clearq
)
1153 GEN_VEXT_VX(vwadd_wx_b
, 1, 2, clearh
)
1154 GEN_VEXT_VX(vwadd_wx_h
, 2, 4, clearl
)
1155 GEN_VEXT_VX(vwadd_wx_w
, 4, 8, clearq
)
1156 GEN_VEXT_VX(vwsub_wx_b
, 1, 2, clearh
)
1157 GEN_VEXT_VX(vwsub_wx_h
, 2, 4, clearl
)
1158 GEN_VEXT_VX(vwsub_wx_w
, 4, 8, clearq
)
1160 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1161 #define DO_VADC(N, M, C) (N + M + C)
1162 #define DO_VSBC(N, M, C) (N - M - C)
1164 #define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \
1165 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1166 CPURISCVState *env, uint32_t desc) \
1168 uint32_t mlen = vext_mlen(desc); \
1169 uint32_t vl = env->vl; \
1170 uint32_t esz = sizeof(ETYPE); \
1171 uint32_t vlmax = vext_maxsz(desc) / esz; \
1174 for (i = 0; i < vl; i++) { \
1175 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
1176 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1177 uint8_t carry = vext_elem_mask(v0, mlen, i); \
1179 *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
1181 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
1184 GEN_VEXT_VADC_VVM(vadc_vvm_b
, uint8_t, H1
, DO_VADC
, clearb
)
1185 GEN_VEXT_VADC_VVM(vadc_vvm_h
, uint16_t, H2
, DO_VADC
, clearh
)
1186 GEN_VEXT_VADC_VVM(vadc_vvm_w
, uint32_t, H4
, DO_VADC
, clearl
)
1187 GEN_VEXT_VADC_VVM(vadc_vvm_d
, uint64_t, H8
, DO_VADC
, clearq
)
1189 GEN_VEXT_VADC_VVM(vsbc_vvm_b
, uint8_t, H1
, DO_VSBC
, clearb
)
1190 GEN_VEXT_VADC_VVM(vsbc_vvm_h
, uint16_t, H2
, DO_VSBC
, clearh
)
1191 GEN_VEXT_VADC_VVM(vsbc_vvm_w
, uint32_t, H4
, DO_VSBC
, clearl
)
1192 GEN_VEXT_VADC_VVM(vsbc_vvm_d
, uint64_t, H8
, DO_VSBC
, clearq
)
1194 #define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \
1195 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
1196 CPURISCVState *env, uint32_t desc) \
1198 uint32_t mlen = vext_mlen(desc); \
1199 uint32_t vl = env->vl; \
1200 uint32_t esz = sizeof(ETYPE); \
1201 uint32_t vlmax = vext_maxsz(desc) / esz; \
1204 for (i = 0; i < vl; i++) { \
1205 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1206 uint8_t carry = vext_elem_mask(v0, mlen, i); \
1208 *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
1210 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
1213 GEN_VEXT_VADC_VXM(vadc_vxm_b
, uint8_t, H1
, DO_VADC
, clearb
)
1214 GEN_VEXT_VADC_VXM(vadc_vxm_h
, uint16_t, H2
, DO_VADC
, clearh
)
1215 GEN_VEXT_VADC_VXM(vadc_vxm_w
, uint32_t, H4
, DO_VADC
, clearl
)
1216 GEN_VEXT_VADC_VXM(vadc_vxm_d
, uint64_t, H8
, DO_VADC
, clearq
)
1218 GEN_VEXT_VADC_VXM(vsbc_vxm_b
, uint8_t, H1
, DO_VSBC
, clearb
)
1219 GEN_VEXT_VADC_VXM(vsbc_vxm_h
, uint16_t, H2
, DO_VSBC
, clearh
)
1220 GEN_VEXT_VADC_VXM(vsbc_vxm_w
, uint32_t, H4
, DO_VSBC
, clearl
)
1221 GEN_VEXT_VADC_VXM(vsbc_vxm_d
, uint64_t, H8
, DO_VSBC
, clearq
)
1223 #define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \
1224 (__typeof(N))(N + M) < N)
1225 #define DO_MSBC(N, M, C) (C ? N <= M : N < M)
1227 #define GEN_VEXT_VMADC_VVM(NAME, ETYPE, H, DO_OP) \
1228 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1229 CPURISCVState *env, uint32_t desc) \
1231 uint32_t mlen = vext_mlen(desc); \
1232 uint32_t vl = env->vl; \
1233 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
1236 for (i = 0; i < vl; i++) { \
1237 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
1238 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1239 uint8_t carry = vext_elem_mask(v0, mlen, i); \
1241 vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\
1243 for (; i < vlmax; i++) { \
1244 vext_set_elem_mask(vd, mlen, i, 0); \
1248 GEN_VEXT_VMADC_VVM(vmadc_vvm_b
, uint8_t, H1
, DO_MADC
)
1249 GEN_VEXT_VMADC_VVM(vmadc_vvm_h
, uint16_t, H2
, DO_MADC
)
1250 GEN_VEXT_VMADC_VVM(vmadc_vvm_w
, uint32_t, H4
, DO_MADC
)
1251 GEN_VEXT_VMADC_VVM(vmadc_vvm_d
, uint64_t, H8
, DO_MADC
)
1253 GEN_VEXT_VMADC_VVM(vmsbc_vvm_b
, uint8_t, H1
, DO_MSBC
)
1254 GEN_VEXT_VMADC_VVM(vmsbc_vvm_h
, uint16_t, H2
, DO_MSBC
)
1255 GEN_VEXT_VMADC_VVM(vmsbc_vvm_w
, uint32_t, H4
, DO_MSBC
)
1256 GEN_VEXT_VMADC_VVM(vmsbc_vvm_d
, uint64_t, H8
, DO_MSBC
)
1258 #define GEN_VEXT_VMADC_VXM(NAME, ETYPE, H, DO_OP) \
1259 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
1260 void *vs2, CPURISCVState *env, uint32_t desc) \
1262 uint32_t mlen = vext_mlen(desc); \
1263 uint32_t vl = env->vl; \
1264 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
1267 for (i = 0; i < vl; i++) { \
1268 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1269 uint8_t carry = vext_elem_mask(v0, mlen, i); \
1271 vext_set_elem_mask(vd, mlen, i, \
1272 DO_OP(s2, (ETYPE)(target_long)s1, carry)); \
1274 for (; i < vlmax; i++) { \
1275 vext_set_elem_mask(vd, mlen, i, 0); \
1279 GEN_VEXT_VMADC_VXM(vmadc_vxm_b
, uint8_t, H1
, DO_MADC
)
1280 GEN_VEXT_VMADC_VXM(vmadc_vxm_h
, uint16_t, H2
, DO_MADC
)
1281 GEN_VEXT_VMADC_VXM(vmadc_vxm_w
, uint32_t, H4
, DO_MADC
)
1282 GEN_VEXT_VMADC_VXM(vmadc_vxm_d
, uint64_t, H8
, DO_MADC
)
1284 GEN_VEXT_VMADC_VXM(vmsbc_vxm_b
, uint8_t, H1
, DO_MSBC
)
1285 GEN_VEXT_VMADC_VXM(vmsbc_vxm_h
, uint16_t, H2
, DO_MSBC
)
1286 GEN_VEXT_VMADC_VXM(vmsbc_vxm_w
, uint32_t, H4
, DO_MSBC
)
1287 GEN_VEXT_VMADC_VXM(vmsbc_vxm_d
, uint64_t, H8
, DO_MSBC
)
1289 /* Vector Bitwise Logical Instructions */
1290 RVVCALL(OPIVV2
, vand_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_AND
)
1291 RVVCALL(OPIVV2
, vand_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_AND
)
1292 RVVCALL(OPIVV2
, vand_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_AND
)
1293 RVVCALL(OPIVV2
, vand_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_AND
)
1294 RVVCALL(OPIVV2
, vor_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_OR
)
1295 RVVCALL(OPIVV2
, vor_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_OR
)
1296 RVVCALL(OPIVV2
, vor_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_OR
)
1297 RVVCALL(OPIVV2
, vor_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_OR
)
1298 RVVCALL(OPIVV2
, vxor_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_XOR
)
1299 RVVCALL(OPIVV2
, vxor_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_XOR
)
1300 RVVCALL(OPIVV2
, vxor_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_XOR
)
1301 RVVCALL(OPIVV2
, vxor_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_XOR
)
1302 GEN_VEXT_VV(vand_vv_b
, 1, 1, clearb
)
1303 GEN_VEXT_VV(vand_vv_h
, 2, 2, clearh
)
1304 GEN_VEXT_VV(vand_vv_w
, 4, 4, clearl
)
1305 GEN_VEXT_VV(vand_vv_d
, 8, 8, clearq
)
1306 GEN_VEXT_VV(vor_vv_b
, 1, 1, clearb
)
1307 GEN_VEXT_VV(vor_vv_h
, 2, 2, clearh
)
1308 GEN_VEXT_VV(vor_vv_w
, 4, 4, clearl
)
1309 GEN_VEXT_VV(vor_vv_d
, 8, 8, clearq
)
1310 GEN_VEXT_VV(vxor_vv_b
, 1, 1, clearb
)
1311 GEN_VEXT_VV(vxor_vv_h
, 2, 2, clearh
)
1312 GEN_VEXT_VV(vxor_vv_w
, 4, 4, clearl
)
1313 GEN_VEXT_VV(vxor_vv_d
, 8, 8, clearq
)
1315 RVVCALL(OPIVX2
, vand_vx_b
, OP_SSS_B
, H1
, H1
, DO_AND
)
1316 RVVCALL(OPIVX2
, vand_vx_h
, OP_SSS_H
, H2
, H2
, DO_AND
)
1317 RVVCALL(OPIVX2
, vand_vx_w
, OP_SSS_W
, H4
, H4
, DO_AND
)
1318 RVVCALL(OPIVX2
, vand_vx_d
, OP_SSS_D
, H8
, H8
, DO_AND
)
1319 RVVCALL(OPIVX2
, vor_vx_b
, OP_SSS_B
, H1
, H1
, DO_OR
)
1320 RVVCALL(OPIVX2
, vor_vx_h
, OP_SSS_H
, H2
, H2
, DO_OR
)
1321 RVVCALL(OPIVX2
, vor_vx_w
, OP_SSS_W
, H4
, H4
, DO_OR
)
1322 RVVCALL(OPIVX2
, vor_vx_d
, OP_SSS_D
, H8
, H8
, DO_OR
)
1323 RVVCALL(OPIVX2
, vxor_vx_b
, OP_SSS_B
, H1
, H1
, DO_XOR
)
1324 RVVCALL(OPIVX2
, vxor_vx_h
, OP_SSS_H
, H2
, H2
, DO_XOR
)
1325 RVVCALL(OPIVX2
, vxor_vx_w
, OP_SSS_W
, H4
, H4
, DO_XOR
)
1326 RVVCALL(OPIVX2
, vxor_vx_d
, OP_SSS_D
, H8
, H8
, DO_XOR
)
1327 GEN_VEXT_VX(vand_vx_b
, 1, 1, clearb
)
1328 GEN_VEXT_VX(vand_vx_h
, 2, 2, clearh
)
1329 GEN_VEXT_VX(vand_vx_w
, 4, 4, clearl
)
1330 GEN_VEXT_VX(vand_vx_d
, 8, 8, clearq
)
1331 GEN_VEXT_VX(vor_vx_b
, 1, 1, clearb
)
1332 GEN_VEXT_VX(vor_vx_h
, 2, 2, clearh
)
1333 GEN_VEXT_VX(vor_vx_w
, 4, 4, clearl
)
1334 GEN_VEXT_VX(vor_vx_d
, 8, 8, clearq
)
1335 GEN_VEXT_VX(vxor_vx_b
, 1, 1, clearb
)
1336 GEN_VEXT_VX(vxor_vx_h
, 2, 2, clearh
)
1337 GEN_VEXT_VX(vxor_vx_w
, 4, 4, clearl
)
1338 GEN_VEXT_VX(vxor_vx_d
, 8, 8, clearq
)
1340 /* Vector Single-Width Bit Shift Instructions */
1341 #define DO_SLL(N, M) (N << (M))
1342 #define DO_SRL(N, M) (N >> (M))
1344 /* generate the helpers for shift instructions with two vector operators */
1345 #define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN) \
1346 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
1347 void *vs2, CPURISCVState *env, uint32_t desc) \
1349 uint32_t mlen = vext_mlen(desc); \
1350 uint32_t vm = vext_vm(desc); \
1351 uint32_t vl = env->vl; \
1352 uint32_t esz = sizeof(TS1); \
1353 uint32_t vlmax = vext_maxsz(desc) / esz; \
1356 for (i = 0; i < vl; i++) { \
1357 if (!vm && !vext_elem_mask(v0, mlen, i)) { \
1360 TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
1361 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
1362 *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \
1364 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
1367 GEN_VEXT_SHIFT_VV(vsll_vv_b
, uint8_t, uint8_t, H1
, H1
, DO_SLL
, 0x7, clearb
)
1368 GEN_VEXT_SHIFT_VV(vsll_vv_h
, uint16_t, uint16_t, H2
, H2
, DO_SLL
, 0xf, clearh
)
1369 GEN_VEXT_SHIFT_VV(vsll_vv_w
, uint32_t, uint32_t, H4
, H4
, DO_SLL
, 0x1f, clearl
)
1370 GEN_VEXT_SHIFT_VV(vsll_vv_d
, uint64_t, uint64_t, H8
, H8
, DO_SLL
, 0x3f, clearq
)
1372 GEN_VEXT_SHIFT_VV(vsrl_vv_b
, uint8_t, uint8_t, H1
, H1
, DO_SRL
, 0x7, clearb
)
1373 GEN_VEXT_SHIFT_VV(vsrl_vv_h
, uint16_t, uint16_t, H2
, H2
, DO_SRL
, 0xf, clearh
)
1374 GEN_VEXT_SHIFT_VV(vsrl_vv_w
, uint32_t, uint32_t, H4
, H4
, DO_SRL
, 0x1f, clearl
)
1375 GEN_VEXT_SHIFT_VV(vsrl_vv_d
, uint64_t, uint64_t, H8
, H8
, DO_SRL
, 0x3f, clearq
)
1377 GEN_VEXT_SHIFT_VV(vsra_vv_b
, uint8_t, int8_t, H1
, H1
, DO_SRL
, 0x7, clearb
)
1378 GEN_VEXT_SHIFT_VV(vsra_vv_h
, uint16_t, int16_t, H2
, H2
, DO_SRL
, 0xf, clearh
)
1379 GEN_VEXT_SHIFT_VV(vsra_vv_w
, uint32_t, int32_t, H4
, H4
, DO_SRL
, 0x1f, clearl
)
1380 GEN_VEXT_SHIFT_VV(vsra_vv_d
, uint64_t, int64_t, H8
, H8
, DO_SRL
, 0x3f, clearq
)
1382 /* generate the helpers for shift instructions with one vector and one scalar */
1383 #define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \
1384 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
1385 void *vs2, CPURISCVState *env, uint32_t desc) \
1387 uint32_t mlen = vext_mlen(desc); \
1388 uint32_t vm = vext_vm(desc); \
1389 uint32_t vl = env->vl; \
1390 uint32_t esz = sizeof(TD); \
1391 uint32_t vlmax = vext_maxsz(desc) / esz; \
1394 for (i = 0; i < vl; i++) { \
1395 if (!vm && !vext_elem_mask(v0, mlen, i)) { \
1398 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
1399 *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
1401 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
1404 GEN_VEXT_SHIFT_VX(vsll_vx_b
, uint8_t, int8_t, H1
, H1
, DO_SLL
, 0x7, clearb
)
1405 GEN_VEXT_SHIFT_VX(vsll_vx_h
, uint16_t, int16_t, H2
, H2
, DO_SLL
, 0xf, clearh
)
1406 GEN_VEXT_SHIFT_VX(vsll_vx_w
, uint32_t, int32_t, H4
, H4
, DO_SLL
, 0x1f, clearl
)
1407 GEN_VEXT_SHIFT_VX(vsll_vx_d
, uint64_t, int64_t, H8
, H8
, DO_SLL
, 0x3f, clearq
)
1409 GEN_VEXT_SHIFT_VX(vsrl_vx_b
, uint8_t, uint8_t, H1
, H1
, DO_SRL
, 0x7, clearb
)
1410 GEN_VEXT_SHIFT_VX(vsrl_vx_h
, uint16_t, uint16_t, H2
, H2
, DO_SRL
, 0xf, clearh
)
1411 GEN_VEXT_SHIFT_VX(vsrl_vx_w
, uint32_t, uint32_t, H4
, H4
, DO_SRL
, 0x1f, clearl
)
1412 GEN_VEXT_SHIFT_VX(vsrl_vx_d
, uint64_t, uint64_t, H8
, H8
, DO_SRL
, 0x3f, clearq
)
1414 GEN_VEXT_SHIFT_VX(vsra_vx_b
, int8_t, int8_t, H1
, H1
, DO_SRL
, 0x7, clearb
)
1415 GEN_VEXT_SHIFT_VX(vsra_vx_h
, int16_t, int16_t, H2
, H2
, DO_SRL
, 0xf, clearh
)
1416 GEN_VEXT_SHIFT_VX(vsra_vx_w
, int32_t, int32_t, H4
, H4
, DO_SRL
, 0x1f, clearl
)
1417 GEN_VEXT_SHIFT_VX(vsra_vx_d
, int64_t, int64_t, H8
, H8
, DO_SRL
, 0x3f, clearq
)
1419 /* Vector Narrowing Integer Right Shift Instructions */
1420 GEN_VEXT_SHIFT_VV(vnsrl_vv_b
, uint8_t, uint16_t, H1
, H2
, DO_SRL
, 0xf, clearb
)
1421 GEN_VEXT_SHIFT_VV(vnsrl_vv_h
, uint16_t, uint32_t, H2
, H4
, DO_SRL
, 0x1f, clearh
)
1422 GEN_VEXT_SHIFT_VV(vnsrl_vv_w
, uint32_t, uint64_t, H4
, H8
, DO_SRL
, 0x3f, clearl
)
1423 GEN_VEXT_SHIFT_VV(vnsra_vv_b
, uint8_t, int16_t, H1
, H2
, DO_SRL
, 0xf, clearb
)
1424 GEN_VEXT_SHIFT_VV(vnsra_vv_h
, uint16_t, int32_t, H2
, H4
, DO_SRL
, 0x1f, clearh
)
1425 GEN_VEXT_SHIFT_VV(vnsra_vv_w
, uint32_t, int64_t, H4
, H8
, DO_SRL
, 0x3f, clearl
)
1426 GEN_VEXT_SHIFT_VX(vnsrl_vx_b
, uint8_t, uint16_t, H1
, H2
, DO_SRL
, 0xf, clearb
)
1427 GEN_VEXT_SHIFT_VX(vnsrl_vx_h
, uint16_t, uint32_t, H2
, H4
, DO_SRL
, 0x1f, clearh
)
1428 GEN_VEXT_SHIFT_VX(vnsrl_vx_w
, uint32_t, uint64_t, H4
, H8
, DO_SRL
, 0x3f, clearl
)
1429 GEN_VEXT_SHIFT_VX(vnsra_vx_b
, int8_t, int16_t, H1
, H2
, DO_SRL
, 0xf, clearb
)
1430 GEN_VEXT_SHIFT_VX(vnsra_vx_h
, int16_t, int32_t, H2
, H4
, DO_SRL
, 0x1f, clearh
)
1431 GEN_VEXT_SHIFT_VX(vnsra_vx_w
, int32_t, int64_t, H4
, H8
, DO_SRL
, 0x3f, clearl
)
1433 /* Vector Integer Comparison Instructions */
1434 #define DO_MSEQ(N, M) (N == M)
1435 #define DO_MSNE(N, M) (N != M)
1436 #define DO_MSLT(N, M) (N < M)
1437 #define DO_MSLE(N, M) (N <= M)
1438 #define DO_MSGT(N, M) (N > M)
1440 #define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \
1441 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1442 CPURISCVState *env, uint32_t desc) \
1444 uint32_t mlen = vext_mlen(desc); \
1445 uint32_t vm = vext_vm(desc); \
1446 uint32_t vl = env->vl; \
1447 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
1450 for (i = 0; i < vl; i++) { \
1451 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
1452 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1453 if (!vm && !vext_elem_mask(v0, mlen, i)) { \
1456 vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1)); \
1458 for (; i < vlmax; i++) { \
1459 vext_set_elem_mask(vd, mlen, i, 0); \
1463 GEN_VEXT_CMP_VV(vmseq_vv_b
, uint8_t, H1
, DO_MSEQ
)
1464 GEN_VEXT_CMP_VV(vmseq_vv_h
, uint16_t, H2
, DO_MSEQ
)
1465 GEN_VEXT_CMP_VV(vmseq_vv_w
, uint32_t, H4
, DO_MSEQ
)
1466 GEN_VEXT_CMP_VV(vmseq_vv_d
, uint64_t, H8
, DO_MSEQ
)
1468 GEN_VEXT_CMP_VV(vmsne_vv_b
, uint8_t, H1
, DO_MSNE
)
1469 GEN_VEXT_CMP_VV(vmsne_vv_h
, uint16_t, H2
, DO_MSNE
)
1470 GEN_VEXT_CMP_VV(vmsne_vv_w
, uint32_t, H4
, DO_MSNE
)
1471 GEN_VEXT_CMP_VV(vmsne_vv_d
, uint64_t, H8
, DO_MSNE
)
1473 GEN_VEXT_CMP_VV(vmsltu_vv_b
, uint8_t, H1
, DO_MSLT
)
1474 GEN_VEXT_CMP_VV(vmsltu_vv_h
, uint16_t, H2
, DO_MSLT
)
1475 GEN_VEXT_CMP_VV(vmsltu_vv_w
, uint32_t, H4
, DO_MSLT
)
1476 GEN_VEXT_CMP_VV(vmsltu_vv_d
, uint64_t, H8
, DO_MSLT
)
1478 GEN_VEXT_CMP_VV(vmslt_vv_b
, int8_t, H1
, DO_MSLT
)
1479 GEN_VEXT_CMP_VV(vmslt_vv_h
, int16_t, H2
, DO_MSLT
)
1480 GEN_VEXT_CMP_VV(vmslt_vv_w
, int32_t, H4
, DO_MSLT
)
1481 GEN_VEXT_CMP_VV(vmslt_vv_d
, int64_t, H8
, DO_MSLT
)
1483 GEN_VEXT_CMP_VV(vmsleu_vv_b
, uint8_t, H1
, DO_MSLE
)
1484 GEN_VEXT_CMP_VV(vmsleu_vv_h
, uint16_t, H2
, DO_MSLE
)
1485 GEN_VEXT_CMP_VV(vmsleu_vv_w
, uint32_t, H4
, DO_MSLE
)
1486 GEN_VEXT_CMP_VV(vmsleu_vv_d
, uint64_t, H8
, DO_MSLE
)
1488 GEN_VEXT_CMP_VV(vmsle_vv_b
, int8_t, H1
, DO_MSLE
)
1489 GEN_VEXT_CMP_VV(vmsle_vv_h
, int16_t, H2
, DO_MSLE
)
1490 GEN_VEXT_CMP_VV(vmsle_vv_w
, int32_t, H4
, DO_MSLE
)
1491 GEN_VEXT_CMP_VV(vmsle_vv_d
, int64_t, H8
, DO_MSLE
)
1493 #define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \
1494 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
1495 CPURISCVState *env, uint32_t desc) \
1497 uint32_t mlen = vext_mlen(desc); \
1498 uint32_t vm = vext_vm(desc); \
1499 uint32_t vl = env->vl; \
1500 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
1503 for (i = 0; i < vl; i++) { \
1504 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1505 if (!vm && !vext_elem_mask(v0, mlen, i)) { \
1508 vext_set_elem_mask(vd, mlen, i, \
1509 DO_OP(s2, (ETYPE)(target_long)s1)); \
1511 for (; i < vlmax; i++) { \
1512 vext_set_elem_mask(vd, mlen, i, 0); \
1516 GEN_VEXT_CMP_VX(vmseq_vx_b
, uint8_t, H1
, DO_MSEQ
)
1517 GEN_VEXT_CMP_VX(vmseq_vx_h
, uint16_t, H2
, DO_MSEQ
)
1518 GEN_VEXT_CMP_VX(vmseq_vx_w
, uint32_t, H4
, DO_MSEQ
)
1519 GEN_VEXT_CMP_VX(vmseq_vx_d
, uint64_t, H8
, DO_MSEQ
)
1521 GEN_VEXT_CMP_VX(vmsne_vx_b
, uint8_t, H1
, DO_MSNE
)
1522 GEN_VEXT_CMP_VX(vmsne_vx_h
, uint16_t, H2
, DO_MSNE
)
1523 GEN_VEXT_CMP_VX(vmsne_vx_w
, uint32_t, H4
, DO_MSNE
)
1524 GEN_VEXT_CMP_VX(vmsne_vx_d
, uint64_t, H8
, DO_MSNE
)
1526 GEN_VEXT_CMP_VX(vmsltu_vx_b
, uint8_t, H1
, DO_MSLT
)
1527 GEN_VEXT_CMP_VX(vmsltu_vx_h
, uint16_t, H2
, DO_MSLT
)
1528 GEN_VEXT_CMP_VX(vmsltu_vx_w
, uint32_t, H4
, DO_MSLT
)
1529 GEN_VEXT_CMP_VX(vmsltu_vx_d
, uint64_t, H8
, DO_MSLT
)
1531 GEN_VEXT_CMP_VX(vmslt_vx_b
, int8_t, H1
, DO_MSLT
)
1532 GEN_VEXT_CMP_VX(vmslt_vx_h
, int16_t, H2
, DO_MSLT
)
1533 GEN_VEXT_CMP_VX(vmslt_vx_w
, int32_t, H4
, DO_MSLT
)
1534 GEN_VEXT_CMP_VX(vmslt_vx_d
, int64_t, H8
, DO_MSLT
)
1536 GEN_VEXT_CMP_VX(vmsleu_vx_b
, uint8_t, H1
, DO_MSLE
)
1537 GEN_VEXT_CMP_VX(vmsleu_vx_h
, uint16_t, H2
, DO_MSLE
)
1538 GEN_VEXT_CMP_VX(vmsleu_vx_w
, uint32_t, H4
, DO_MSLE
)
1539 GEN_VEXT_CMP_VX(vmsleu_vx_d
, uint64_t, H8
, DO_MSLE
)
1541 GEN_VEXT_CMP_VX(vmsle_vx_b
, int8_t, H1
, DO_MSLE
)
1542 GEN_VEXT_CMP_VX(vmsle_vx_h
, int16_t, H2
, DO_MSLE
)
1543 GEN_VEXT_CMP_VX(vmsle_vx_w
, int32_t, H4
, DO_MSLE
)
1544 GEN_VEXT_CMP_VX(vmsle_vx_d
, int64_t, H8
, DO_MSLE
)
1546 GEN_VEXT_CMP_VX(vmsgtu_vx_b
, uint8_t, H1
, DO_MSGT
)
1547 GEN_VEXT_CMP_VX(vmsgtu_vx_h
, uint16_t, H2
, DO_MSGT
)
1548 GEN_VEXT_CMP_VX(vmsgtu_vx_w
, uint32_t, H4
, DO_MSGT
)
1549 GEN_VEXT_CMP_VX(vmsgtu_vx_d
, uint64_t, H8
, DO_MSGT
)
1551 GEN_VEXT_CMP_VX(vmsgt_vx_b
, int8_t, H1
, DO_MSGT
)
1552 GEN_VEXT_CMP_VX(vmsgt_vx_h
, int16_t, H2
, DO_MSGT
)
1553 GEN_VEXT_CMP_VX(vmsgt_vx_w
, int32_t, H4
, DO_MSGT
)
1554 GEN_VEXT_CMP_VX(vmsgt_vx_d
, int64_t, H8
, DO_MSGT
)
1556 /* Vector Integer Min/Max Instructions */
1557 RVVCALL(OPIVV2
, vminu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_MIN
)
1558 RVVCALL(OPIVV2
, vminu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_MIN
)
1559 RVVCALL(OPIVV2
, vminu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_MIN
)
1560 RVVCALL(OPIVV2
, vminu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_MIN
)
1561 RVVCALL(OPIVV2
, vmin_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MIN
)
1562 RVVCALL(OPIVV2
, vmin_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MIN
)
1563 RVVCALL(OPIVV2
, vmin_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MIN
)
1564 RVVCALL(OPIVV2
, vmin_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MIN
)
1565 RVVCALL(OPIVV2
, vmaxu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_MAX
)
1566 RVVCALL(OPIVV2
, vmaxu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_MAX
)
1567 RVVCALL(OPIVV2
, vmaxu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_MAX
)
1568 RVVCALL(OPIVV2
, vmaxu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_MAX
)
1569 RVVCALL(OPIVV2
, vmax_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MAX
)
1570 RVVCALL(OPIVV2
, vmax_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MAX
)
1571 RVVCALL(OPIVV2
, vmax_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MAX
)
1572 RVVCALL(OPIVV2
, vmax_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MAX
)
1573 GEN_VEXT_VV(vminu_vv_b
, 1, 1, clearb
)
1574 GEN_VEXT_VV(vminu_vv_h
, 2, 2, clearh
)
1575 GEN_VEXT_VV(vminu_vv_w
, 4, 4, clearl
)
1576 GEN_VEXT_VV(vminu_vv_d
, 8, 8, clearq
)
1577 GEN_VEXT_VV(vmin_vv_b
, 1, 1, clearb
)
1578 GEN_VEXT_VV(vmin_vv_h
, 2, 2, clearh
)
1579 GEN_VEXT_VV(vmin_vv_w
, 4, 4, clearl
)
1580 GEN_VEXT_VV(vmin_vv_d
, 8, 8, clearq
)
1581 GEN_VEXT_VV(vmaxu_vv_b
, 1, 1, clearb
)
1582 GEN_VEXT_VV(vmaxu_vv_h
, 2, 2, clearh
)
1583 GEN_VEXT_VV(vmaxu_vv_w
, 4, 4, clearl
)
1584 GEN_VEXT_VV(vmaxu_vv_d
, 8, 8, clearq
)
1585 GEN_VEXT_VV(vmax_vv_b
, 1, 1, clearb
)
1586 GEN_VEXT_VV(vmax_vv_h
, 2, 2, clearh
)
1587 GEN_VEXT_VV(vmax_vv_w
, 4, 4, clearl
)
1588 GEN_VEXT_VV(vmax_vv_d
, 8, 8, clearq
)
1590 RVVCALL(OPIVX2
, vminu_vx_b
, OP_UUU_B
, H1
, H1
, DO_MIN
)
1591 RVVCALL(OPIVX2
, vminu_vx_h
, OP_UUU_H
, H2
, H2
, DO_MIN
)
1592 RVVCALL(OPIVX2
, vminu_vx_w
, OP_UUU_W
, H4
, H4
, DO_MIN
)
1593 RVVCALL(OPIVX2
, vminu_vx_d
, OP_UUU_D
, H8
, H8
, DO_MIN
)
1594 RVVCALL(OPIVX2
, vmin_vx_b
, OP_SSS_B
, H1
, H1
, DO_MIN
)
1595 RVVCALL(OPIVX2
, vmin_vx_h
, OP_SSS_H
, H2
, H2
, DO_MIN
)
1596 RVVCALL(OPIVX2
, vmin_vx_w
, OP_SSS_W
, H4
, H4
, DO_MIN
)
1597 RVVCALL(OPIVX2
, vmin_vx_d
, OP_SSS_D
, H8
, H8
, DO_MIN
)
1598 RVVCALL(OPIVX2
, vmaxu_vx_b
, OP_UUU_B
, H1
, H1
, DO_MAX
)
1599 RVVCALL(OPIVX2
, vmaxu_vx_h
, OP_UUU_H
, H2
, H2
, DO_MAX
)
1600 RVVCALL(OPIVX2
, vmaxu_vx_w
, OP_UUU_W
, H4
, H4
, DO_MAX
)
1601 RVVCALL(OPIVX2
, vmaxu_vx_d
, OP_UUU_D
, H8
, H8
, DO_MAX
)
1602 RVVCALL(OPIVX2
, vmax_vx_b
, OP_SSS_B
, H1
, H1
, DO_MAX
)
1603 RVVCALL(OPIVX2
, vmax_vx_h
, OP_SSS_H
, H2
, H2
, DO_MAX
)
1604 RVVCALL(OPIVX2
, vmax_vx_w
, OP_SSS_W
, H4
, H4
, DO_MAX
)
1605 RVVCALL(OPIVX2
, vmax_vx_d
, OP_SSS_D
, H8
, H8
, DO_MAX
)
1606 GEN_VEXT_VX(vminu_vx_b
, 1, 1, clearb
)
1607 GEN_VEXT_VX(vminu_vx_h
, 2, 2, clearh
)
1608 GEN_VEXT_VX(vminu_vx_w
, 4, 4, clearl
)
1609 GEN_VEXT_VX(vminu_vx_d
, 8, 8, clearq
)
1610 GEN_VEXT_VX(vmin_vx_b
, 1, 1, clearb
)
1611 GEN_VEXT_VX(vmin_vx_h
, 2, 2, clearh
)
1612 GEN_VEXT_VX(vmin_vx_w
, 4, 4, clearl
)
1613 GEN_VEXT_VX(vmin_vx_d
, 8, 8, clearq
)
1614 GEN_VEXT_VX(vmaxu_vx_b
, 1, 1, clearb
)
1615 GEN_VEXT_VX(vmaxu_vx_h
, 2, 2, clearh
)
1616 GEN_VEXT_VX(vmaxu_vx_w
, 4, 4, clearl
)
1617 GEN_VEXT_VX(vmaxu_vx_d
, 8, 8, clearq
)
1618 GEN_VEXT_VX(vmax_vx_b
, 1, 1, clearb
)
1619 GEN_VEXT_VX(vmax_vx_h
, 2, 2, clearh
)
1620 GEN_VEXT_VX(vmax_vx_w
, 4, 4, clearl
)
1621 GEN_VEXT_VX(vmax_vx_d
, 8, 8, clearq
)
1623 /* Vector Single-Width Integer Multiply Instructions */
1624 #define DO_MUL(N, M) (N * M)
1625 RVVCALL(OPIVV2
, vmul_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MUL
)
1626 RVVCALL(OPIVV2
, vmul_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MUL
)
1627 RVVCALL(OPIVV2
, vmul_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MUL
)
1628 RVVCALL(OPIVV2
, vmul_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MUL
)
1629 GEN_VEXT_VV(vmul_vv_b
, 1, 1, clearb
)
1630 GEN_VEXT_VV(vmul_vv_h
, 2, 2, clearh
)
1631 GEN_VEXT_VV(vmul_vv_w
, 4, 4, clearl
)
1632 GEN_VEXT_VV(vmul_vv_d
, 8, 8, clearq
)
1634 static int8_t do_mulh_b(int8_t s2
, int8_t s1
)
1636 return (int16_t)s2
* (int16_t)s1
>> 8;
1639 static int16_t do_mulh_h(int16_t s2
, int16_t s1
)
1641 return (int32_t)s2
* (int32_t)s1
>> 16;
1644 static int32_t do_mulh_w(int32_t s2
, int32_t s1
)
1646 return (int64_t)s2
* (int64_t)s1
>> 32;
1649 static int64_t do_mulh_d(int64_t s2
, int64_t s1
)
1651 uint64_t hi_64
, lo_64
;
1653 muls64(&lo_64
, &hi_64
, s1
, s2
);
1657 static uint8_t do_mulhu_b(uint8_t s2
, uint8_t s1
)
1659 return (uint16_t)s2
* (uint16_t)s1
>> 8;
1662 static uint16_t do_mulhu_h(uint16_t s2
, uint16_t s1
)
1664 return (uint32_t)s2
* (uint32_t)s1
>> 16;
1667 static uint32_t do_mulhu_w(uint32_t s2
, uint32_t s1
)
1669 return (uint64_t)s2
* (uint64_t)s1
>> 32;
1672 static uint64_t do_mulhu_d(uint64_t s2
, uint64_t s1
)
1674 uint64_t hi_64
, lo_64
;
1676 mulu64(&lo_64
, &hi_64
, s2
, s1
);
1680 static int8_t do_mulhsu_b(int8_t s2
, uint8_t s1
)
1682 return (int16_t)s2
* (uint16_t)s1
>> 8;
1685 static int16_t do_mulhsu_h(int16_t s2
, uint16_t s1
)
1687 return (int32_t)s2
* (uint32_t)s1
>> 16;
1690 static int32_t do_mulhsu_w(int32_t s2
, uint32_t s1
)
1692 return (int64_t)s2
* (uint64_t)s1
>> 32;
1696 * Let A = signed operand,
1697 * B = unsigned operand
1698 * P = mulu64(A, B), unsigned product
1700 * LET X = 2 ** 64 - A, 2's complement of A
1701 * SP = signed product
1705 * = -(2 ** 64 - A) * B
1706 * = A * B - 2 ** 64 * B
1711 * HI_P -= (A < 0 ? B : 0)
1714 static int64_t do_mulhsu_d(int64_t s2
, uint64_t s1
)
1716 uint64_t hi_64
, lo_64
;
1718 mulu64(&lo_64
, &hi_64
, s2
, s1
);
1720 hi_64
-= s2
< 0 ? s1
: 0;
1724 RVVCALL(OPIVV2
, vmulh_vv_b
, OP_SSS_B
, H1
, H1
, H1
, do_mulh_b
)
1725 RVVCALL(OPIVV2
, vmulh_vv_h
, OP_SSS_H
, H2
, H2
, H2
, do_mulh_h
)
1726 RVVCALL(OPIVV2
, vmulh_vv_w
, OP_SSS_W
, H4
, H4
, H4
, do_mulh_w
)
1727 RVVCALL(OPIVV2
, vmulh_vv_d
, OP_SSS_D
, H8
, H8
, H8
, do_mulh_d
)
1728 RVVCALL(OPIVV2
, vmulhu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, do_mulhu_b
)
1729 RVVCALL(OPIVV2
, vmulhu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, do_mulhu_h
)
1730 RVVCALL(OPIVV2
, vmulhu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, do_mulhu_w
)
1731 RVVCALL(OPIVV2
, vmulhu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, do_mulhu_d
)
1732 RVVCALL(OPIVV2
, vmulhsu_vv_b
, OP_SUS_B
, H1
, H1
, H1
, do_mulhsu_b
)
1733 RVVCALL(OPIVV2
, vmulhsu_vv_h
, OP_SUS_H
, H2
, H2
, H2
, do_mulhsu_h
)
1734 RVVCALL(OPIVV2
, vmulhsu_vv_w
, OP_SUS_W
, H4
, H4
, H4
, do_mulhsu_w
)
1735 RVVCALL(OPIVV2
, vmulhsu_vv_d
, OP_SUS_D
, H8
, H8
, H8
, do_mulhsu_d
)
1736 GEN_VEXT_VV(vmulh_vv_b
, 1, 1, clearb
)
1737 GEN_VEXT_VV(vmulh_vv_h
, 2, 2, clearh
)
1738 GEN_VEXT_VV(vmulh_vv_w
, 4, 4, clearl
)
1739 GEN_VEXT_VV(vmulh_vv_d
, 8, 8, clearq
)
1740 GEN_VEXT_VV(vmulhu_vv_b
, 1, 1, clearb
)
1741 GEN_VEXT_VV(vmulhu_vv_h
, 2, 2, clearh
)
1742 GEN_VEXT_VV(vmulhu_vv_w
, 4, 4, clearl
)
1743 GEN_VEXT_VV(vmulhu_vv_d
, 8, 8, clearq
)
1744 GEN_VEXT_VV(vmulhsu_vv_b
, 1, 1, clearb
)
1745 GEN_VEXT_VV(vmulhsu_vv_h
, 2, 2, clearh
)
1746 GEN_VEXT_VV(vmulhsu_vv_w
, 4, 4, clearl
)
1747 GEN_VEXT_VV(vmulhsu_vv_d
, 8, 8, clearq
)
1749 RVVCALL(OPIVX2
, vmul_vx_b
, OP_SSS_B
, H1
, H1
, DO_MUL
)
1750 RVVCALL(OPIVX2
, vmul_vx_h
, OP_SSS_H
, H2
, H2
, DO_MUL
)
1751 RVVCALL(OPIVX2
, vmul_vx_w
, OP_SSS_W
, H4
, H4
, DO_MUL
)
1752 RVVCALL(OPIVX2
, vmul_vx_d
, OP_SSS_D
, H8
, H8
, DO_MUL
)
1753 RVVCALL(OPIVX2
, vmulh_vx_b
, OP_SSS_B
, H1
, H1
, do_mulh_b
)
1754 RVVCALL(OPIVX2
, vmulh_vx_h
, OP_SSS_H
, H2
, H2
, do_mulh_h
)
1755 RVVCALL(OPIVX2
, vmulh_vx_w
, OP_SSS_W
, H4
, H4
, do_mulh_w
)
1756 RVVCALL(OPIVX2
, vmulh_vx_d
, OP_SSS_D
, H8
, H8
, do_mulh_d
)
1757 RVVCALL(OPIVX2
, vmulhu_vx_b
, OP_UUU_B
, H1
, H1
, do_mulhu_b
)
1758 RVVCALL(OPIVX2
, vmulhu_vx_h
, OP_UUU_H
, H2
, H2
, do_mulhu_h
)
1759 RVVCALL(OPIVX2
, vmulhu_vx_w
, OP_UUU_W
, H4
, H4
, do_mulhu_w
)
1760 RVVCALL(OPIVX2
, vmulhu_vx_d
, OP_UUU_D
, H8
, H8
, do_mulhu_d
)
1761 RVVCALL(OPIVX2
, vmulhsu_vx_b
, OP_SUS_B
, H1
, H1
, do_mulhsu_b
)
1762 RVVCALL(OPIVX2
, vmulhsu_vx_h
, OP_SUS_H
, H2
, H2
, do_mulhsu_h
)
1763 RVVCALL(OPIVX2
, vmulhsu_vx_w
, OP_SUS_W
, H4
, H4
, do_mulhsu_w
)
1764 RVVCALL(OPIVX2
, vmulhsu_vx_d
, OP_SUS_D
, H8
, H8
, do_mulhsu_d
)
1765 GEN_VEXT_VX(vmul_vx_b
, 1, 1, clearb
)
1766 GEN_VEXT_VX(vmul_vx_h
, 2, 2, clearh
)
1767 GEN_VEXT_VX(vmul_vx_w
, 4, 4, clearl
)
1768 GEN_VEXT_VX(vmul_vx_d
, 8, 8, clearq
)
1769 GEN_VEXT_VX(vmulh_vx_b
, 1, 1, clearb
)
1770 GEN_VEXT_VX(vmulh_vx_h
, 2, 2, clearh
)
1771 GEN_VEXT_VX(vmulh_vx_w
, 4, 4, clearl
)
1772 GEN_VEXT_VX(vmulh_vx_d
, 8, 8, clearq
)
1773 GEN_VEXT_VX(vmulhu_vx_b
, 1, 1, clearb
)
1774 GEN_VEXT_VX(vmulhu_vx_h
, 2, 2, clearh
)
1775 GEN_VEXT_VX(vmulhu_vx_w
, 4, 4, clearl
)
1776 GEN_VEXT_VX(vmulhu_vx_d
, 8, 8, clearq
)
1777 GEN_VEXT_VX(vmulhsu_vx_b
, 1, 1, clearb
)
1778 GEN_VEXT_VX(vmulhsu_vx_h
, 2, 2, clearh
)
1779 GEN_VEXT_VX(vmulhsu_vx_w
, 4, 4, clearl
)
1780 GEN_VEXT_VX(vmulhsu_vx_d
, 8, 8, clearq
)
1782 /* Vector Integer Divide Instructions */
1783 #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M)
1784 #define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M)
1785 #define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) :\
1786 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
1787 #define DO_REM(N, M) (unlikely(M == 0) ? N :\
1788 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
1790 RVVCALL(OPIVV2
, vdivu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_DIVU
)
1791 RVVCALL(OPIVV2
, vdivu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_DIVU
)
1792 RVVCALL(OPIVV2
, vdivu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_DIVU
)
1793 RVVCALL(OPIVV2
, vdivu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_DIVU
)
1794 RVVCALL(OPIVV2
, vdiv_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_DIV
)
1795 RVVCALL(OPIVV2
, vdiv_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_DIV
)
1796 RVVCALL(OPIVV2
, vdiv_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_DIV
)
1797 RVVCALL(OPIVV2
, vdiv_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_DIV
)
1798 RVVCALL(OPIVV2
, vremu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_REMU
)
1799 RVVCALL(OPIVV2
, vremu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_REMU
)
1800 RVVCALL(OPIVV2
, vremu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_REMU
)
1801 RVVCALL(OPIVV2
, vremu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_REMU
)
1802 RVVCALL(OPIVV2
, vrem_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_REM
)
1803 RVVCALL(OPIVV2
, vrem_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_REM
)
1804 RVVCALL(OPIVV2
, vrem_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_REM
)
1805 RVVCALL(OPIVV2
, vrem_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_REM
)
1806 GEN_VEXT_VV(vdivu_vv_b
, 1, 1, clearb
)
1807 GEN_VEXT_VV(vdivu_vv_h
, 2, 2, clearh
)
1808 GEN_VEXT_VV(vdivu_vv_w
, 4, 4, clearl
)
1809 GEN_VEXT_VV(vdivu_vv_d
, 8, 8, clearq
)
1810 GEN_VEXT_VV(vdiv_vv_b
, 1, 1, clearb
)
1811 GEN_VEXT_VV(vdiv_vv_h
, 2, 2, clearh
)
1812 GEN_VEXT_VV(vdiv_vv_w
, 4, 4, clearl
)
1813 GEN_VEXT_VV(vdiv_vv_d
, 8, 8, clearq
)
1814 GEN_VEXT_VV(vremu_vv_b
, 1, 1, clearb
)
1815 GEN_VEXT_VV(vremu_vv_h
, 2, 2, clearh
)
1816 GEN_VEXT_VV(vremu_vv_w
, 4, 4, clearl
)
1817 GEN_VEXT_VV(vremu_vv_d
, 8, 8, clearq
)
1818 GEN_VEXT_VV(vrem_vv_b
, 1, 1, clearb
)
1819 GEN_VEXT_VV(vrem_vv_h
, 2, 2, clearh
)
1820 GEN_VEXT_VV(vrem_vv_w
, 4, 4, clearl
)
1821 GEN_VEXT_VV(vrem_vv_d
, 8, 8, clearq
)
1823 RVVCALL(OPIVX2
, vdivu_vx_b
, OP_UUU_B
, H1
, H1
, DO_DIVU
)
1824 RVVCALL(OPIVX2
, vdivu_vx_h
, OP_UUU_H
, H2
, H2
, DO_DIVU
)
1825 RVVCALL(OPIVX2
, vdivu_vx_w
, OP_UUU_W
, H4
, H4
, DO_DIVU
)
1826 RVVCALL(OPIVX2
, vdivu_vx_d
, OP_UUU_D
, H8
, H8
, DO_DIVU
)
1827 RVVCALL(OPIVX2
, vdiv_vx_b
, OP_SSS_B
, H1
, H1
, DO_DIV
)
1828 RVVCALL(OPIVX2
, vdiv_vx_h
, OP_SSS_H
, H2
, H2
, DO_DIV
)
1829 RVVCALL(OPIVX2
, vdiv_vx_w
, OP_SSS_W
, H4
, H4
, DO_DIV
)
1830 RVVCALL(OPIVX2
, vdiv_vx_d
, OP_SSS_D
, H8
, H8
, DO_DIV
)
1831 RVVCALL(OPIVX2
, vremu_vx_b
, OP_UUU_B
, H1
, H1
, DO_REMU
)
1832 RVVCALL(OPIVX2
, vremu_vx_h
, OP_UUU_H
, H2
, H2
, DO_REMU
)
1833 RVVCALL(OPIVX2
, vremu_vx_w
, OP_UUU_W
, H4
, H4
, DO_REMU
)
1834 RVVCALL(OPIVX2
, vremu_vx_d
, OP_UUU_D
, H8
, H8
, DO_REMU
)
1835 RVVCALL(OPIVX2
, vrem_vx_b
, OP_SSS_B
, H1
, H1
, DO_REM
)
1836 RVVCALL(OPIVX2
, vrem_vx_h
, OP_SSS_H
, H2
, H2
, DO_REM
)
1837 RVVCALL(OPIVX2
, vrem_vx_w
, OP_SSS_W
, H4
, H4
, DO_REM
)
1838 RVVCALL(OPIVX2
, vrem_vx_d
, OP_SSS_D
, H8
, H8
, DO_REM
)
1839 GEN_VEXT_VX(vdivu_vx_b
, 1, 1, clearb
)
1840 GEN_VEXT_VX(vdivu_vx_h
, 2, 2, clearh
)
1841 GEN_VEXT_VX(vdivu_vx_w
, 4, 4, clearl
)
1842 GEN_VEXT_VX(vdivu_vx_d
, 8, 8, clearq
)
1843 GEN_VEXT_VX(vdiv_vx_b
, 1, 1, clearb
)
1844 GEN_VEXT_VX(vdiv_vx_h
, 2, 2, clearh
)
1845 GEN_VEXT_VX(vdiv_vx_w
, 4, 4, clearl
)
1846 GEN_VEXT_VX(vdiv_vx_d
, 8, 8, clearq
)
1847 GEN_VEXT_VX(vremu_vx_b
, 1, 1, clearb
)
1848 GEN_VEXT_VX(vremu_vx_h
, 2, 2, clearh
)
1849 GEN_VEXT_VX(vremu_vx_w
, 4, 4, clearl
)
1850 GEN_VEXT_VX(vremu_vx_d
, 8, 8, clearq
)
1851 GEN_VEXT_VX(vrem_vx_b
, 1, 1, clearb
)
1852 GEN_VEXT_VX(vrem_vx_h
, 2, 2, clearh
)
1853 GEN_VEXT_VX(vrem_vx_w
, 4, 4, clearl
)
1854 GEN_VEXT_VX(vrem_vx_d
, 8, 8, clearq
)
1856 /* Vector Widening Integer Multiply Instructions */
1857 RVVCALL(OPIVV2
, vwmul_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_MUL
)
1858 RVVCALL(OPIVV2
, vwmul_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_MUL
)
1859 RVVCALL(OPIVV2
, vwmul_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_MUL
)
1860 RVVCALL(OPIVV2
, vwmulu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_MUL
)
1861 RVVCALL(OPIVV2
, vwmulu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_MUL
)
1862 RVVCALL(OPIVV2
, vwmulu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_MUL
)
1863 RVVCALL(OPIVV2
, vwmulsu_vv_b
, WOP_SUS_B
, H2
, H1
, H1
, DO_MUL
)
1864 RVVCALL(OPIVV2
, vwmulsu_vv_h
, WOP_SUS_H
, H4
, H2
, H2
, DO_MUL
)
1865 RVVCALL(OPIVV2
, vwmulsu_vv_w
, WOP_SUS_W
, H8
, H4
, H4
, DO_MUL
)
1866 GEN_VEXT_VV(vwmul_vv_b
, 1, 2, clearh
)
1867 GEN_VEXT_VV(vwmul_vv_h
, 2, 4, clearl
)
1868 GEN_VEXT_VV(vwmul_vv_w
, 4, 8, clearq
)
1869 GEN_VEXT_VV(vwmulu_vv_b
, 1, 2, clearh
)
1870 GEN_VEXT_VV(vwmulu_vv_h
, 2, 4, clearl
)
1871 GEN_VEXT_VV(vwmulu_vv_w
, 4, 8, clearq
)
1872 GEN_VEXT_VV(vwmulsu_vv_b
, 1, 2, clearh
)
1873 GEN_VEXT_VV(vwmulsu_vv_h
, 2, 4, clearl
)
1874 GEN_VEXT_VV(vwmulsu_vv_w
, 4, 8, clearq
)
1876 RVVCALL(OPIVX2
, vwmul_vx_b
, WOP_SSS_B
, H2
, H1
, DO_MUL
)
1877 RVVCALL(OPIVX2
, vwmul_vx_h
, WOP_SSS_H
, H4
, H2
, DO_MUL
)
1878 RVVCALL(OPIVX2
, vwmul_vx_w
, WOP_SSS_W
, H8
, H4
, DO_MUL
)
1879 RVVCALL(OPIVX2
, vwmulu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_MUL
)
1880 RVVCALL(OPIVX2
, vwmulu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_MUL
)
1881 RVVCALL(OPIVX2
, vwmulu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_MUL
)
1882 RVVCALL(OPIVX2
, vwmulsu_vx_b
, WOP_SUS_B
, H2
, H1
, DO_MUL
)
1883 RVVCALL(OPIVX2
, vwmulsu_vx_h
, WOP_SUS_H
, H4
, H2
, DO_MUL
)
1884 RVVCALL(OPIVX2
, vwmulsu_vx_w
, WOP_SUS_W
, H8
, H4
, DO_MUL
)
1885 GEN_VEXT_VX(vwmul_vx_b
, 1, 2, clearh
)
1886 GEN_VEXT_VX(vwmul_vx_h
, 2, 4, clearl
)
1887 GEN_VEXT_VX(vwmul_vx_w
, 4, 8, clearq
)
1888 GEN_VEXT_VX(vwmulu_vx_b
, 1, 2, clearh
)
1889 GEN_VEXT_VX(vwmulu_vx_h
, 2, 4, clearl
)
1890 GEN_VEXT_VX(vwmulu_vx_w
, 4, 8, clearq
)
1891 GEN_VEXT_VX(vwmulsu_vx_b
, 1, 2, clearh
)
1892 GEN_VEXT_VX(vwmulsu_vx_h
, 2, 4, clearl
)
1893 GEN_VEXT_VX(vwmulsu_vx_w
, 4, 8, clearq
)
1895 /* Vector Single-Width Integer Multiply-Add Instructions */
1896 #define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
1897 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
1899 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
1900 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
1901 TD d = *((TD *)vd + HD(i)); \
1902 *((TD *)vd + HD(i)) = OP(s2, s1, d); \
1905 #define DO_MACC(N, M, D) (M * N + D)
1906 #define DO_NMSAC(N, M, D) (-(M * N) + D)
1907 #define DO_MADD(N, M, D) (M * D + N)
1908 #define DO_NMSUB(N, M, D) (-(M * D) + N)
1909 RVVCALL(OPIVV3
, vmacc_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MACC
)
1910 RVVCALL(OPIVV3
, vmacc_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MACC
)
1911 RVVCALL(OPIVV3
, vmacc_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MACC
)
1912 RVVCALL(OPIVV3
, vmacc_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MACC
)
1913 RVVCALL(OPIVV3
, vnmsac_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_NMSAC
)
1914 RVVCALL(OPIVV3
, vnmsac_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_NMSAC
)
1915 RVVCALL(OPIVV3
, vnmsac_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_NMSAC
)
1916 RVVCALL(OPIVV3
, vnmsac_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_NMSAC
)
1917 RVVCALL(OPIVV3
, vmadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MADD
)
1918 RVVCALL(OPIVV3
, vmadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MADD
)
1919 RVVCALL(OPIVV3
, vmadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MADD
)
1920 RVVCALL(OPIVV3
, vmadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MADD
)
1921 RVVCALL(OPIVV3
, vnmsub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_NMSUB
)
1922 RVVCALL(OPIVV3
, vnmsub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_NMSUB
)
1923 RVVCALL(OPIVV3
, vnmsub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_NMSUB
)
1924 RVVCALL(OPIVV3
, vnmsub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_NMSUB
)
1925 GEN_VEXT_VV(vmacc_vv_b
, 1, 1, clearb
)
1926 GEN_VEXT_VV(vmacc_vv_h
, 2, 2, clearh
)
1927 GEN_VEXT_VV(vmacc_vv_w
, 4, 4, clearl
)
1928 GEN_VEXT_VV(vmacc_vv_d
, 8, 8, clearq
)
1929 GEN_VEXT_VV(vnmsac_vv_b
, 1, 1, clearb
)
1930 GEN_VEXT_VV(vnmsac_vv_h
, 2, 2, clearh
)
1931 GEN_VEXT_VV(vnmsac_vv_w
, 4, 4, clearl
)
1932 GEN_VEXT_VV(vnmsac_vv_d
, 8, 8, clearq
)
1933 GEN_VEXT_VV(vmadd_vv_b
, 1, 1, clearb
)
1934 GEN_VEXT_VV(vmadd_vv_h
, 2, 2, clearh
)
1935 GEN_VEXT_VV(vmadd_vv_w
, 4, 4, clearl
)
1936 GEN_VEXT_VV(vmadd_vv_d
, 8, 8, clearq
)
1937 GEN_VEXT_VV(vnmsub_vv_b
, 1, 1, clearb
)
1938 GEN_VEXT_VV(vnmsub_vv_h
, 2, 2, clearh
)
1939 GEN_VEXT_VV(vnmsub_vv_w
, 4, 4, clearl
)
1940 GEN_VEXT_VV(vnmsub_vv_d
, 8, 8, clearq
)
1942 #define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
1943 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
1945 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
1946 TD d = *((TD *)vd + HD(i)); \
1947 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d); \
1950 RVVCALL(OPIVX3
, vmacc_vx_b
, OP_SSS_B
, H1
, H1
, DO_MACC
)
1951 RVVCALL(OPIVX3
, vmacc_vx_h
, OP_SSS_H
, H2
, H2
, DO_MACC
)
1952 RVVCALL(OPIVX3
, vmacc_vx_w
, OP_SSS_W
, H4
, H4
, DO_MACC
)
1953 RVVCALL(OPIVX3
, vmacc_vx_d
, OP_SSS_D
, H8
, H8
, DO_MACC
)
1954 RVVCALL(OPIVX3
, vnmsac_vx_b
, OP_SSS_B
, H1
, H1
, DO_NMSAC
)
1955 RVVCALL(OPIVX3
, vnmsac_vx_h
, OP_SSS_H
, H2
, H2
, DO_NMSAC
)
1956 RVVCALL(OPIVX3
, vnmsac_vx_w
, OP_SSS_W
, H4
, H4
, DO_NMSAC
)
1957 RVVCALL(OPIVX3
, vnmsac_vx_d
, OP_SSS_D
, H8
, H8
, DO_NMSAC
)
1958 RVVCALL(OPIVX3
, vmadd_vx_b
, OP_SSS_B
, H1
, H1
, DO_MADD
)
1959 RVVCALL(OPIVX3
, vmadd_vx_h
, OP_SSS_H
, H2
, H2
, DO_MADD
)
1960 RVVCALL(OPIVX3
, vmadd_vx_w
, OP_SSS_W
, H4
, H4
, DO_MADD
)
1961 RVVCALL(OPIVX3
, vmadd_vx_d
, OP_SSS_D
, H8
, H8
, DO_MADD
)
1962 RVVCALL(OPIVX3
, vnmsub_vx_b
, OP_SSS_B
, H1
, H1
, DO_NMSUB
)
1963 RVVCALL(OPIVX3
, vnmsub_vx_h
, OP_SSS_H
, H2
, H2
, DO_NMSUB
)
1964 RVVCALL(OPIVX3
, vnmsub_vx_w
, OP_SSS_W
, H4
, H4
, DO_NMSUB
)
1965 RVVCALL(OPIVX3
, vnmsub_vx_d
, OP_SSS_D
, H8
, H8
, DO_NMSUB
)
1966 GEN_VEXT_VX(vmacc_vx_b
, 1, 1, clearb
)
1967 GEN_VEXT_VX(vmacc_vx_h
, 2, 2, clearh
)
1968 GEN_VEXT_VX(vmacc_vx_w
, 4, 4, clearl
)
1969 GEN_VEXT_VX(vmacc_vx_d
, 8, 8, clearq
)
1970 GEN_VEXT_VX(vnmsac_vx_b
, 1, 1, clearb
)
1971 GEN_VEXT_VX(vnmsac_vx_h
, 2, 2, clearh
)
1972 GEN_VEXT_VX(vnmsac_vx_w
, 4, 4, clearl
)
1973 GEN_VEXT_VX(vnmsac_vx_d
, 8, 8, clearq
)
1974 GEN_VEXT_VX(vmadd_vx_b
, 1, 1, clearb
)
1975 GEN_VEXT_VX(vmadd_vx_h
, 2, 2, clearh
)
1976 GEN_VEXT_VX(vmadd_vx_w
, 4, 4, clearl
)
1977 GEN_VEXT_VX(vmadd_vx_d
, 8, 8, clearq
)
1978 GEN_VEXT_VX(vnmsub_vx_b
, 1, 1, clearb
)
1979 GEN_VEXT_VX(vnmsub_vx_h
, 2, 2, clearh
)
1980 GEN_VEXT_VX(vnmsub_vx_w
, 4, 4, clearl
)
1981 GEN_VEXT_VX(vnmsub_vx_d
, 8, 8, clearq
)
1983 /* Vector Widening Integer Multiply-Add Instructions */
1984 RVVCALL(OPIVV3
, vwmaccu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_MACC
)
1985 RVVCALL(OPIVV3
, vwmaccu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_MACC
)
1986 RVVCALL(OPIVV3
, vwmaccu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_MACC
)
1987 RVVCALL(OPIVV3
, vwmacc_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_MACC
)
1988 RVVCALL(OPIVV3
, vwmacc_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_MACC
)
1989 RVVCALL(OPIVV3
, vwmacc_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_MACC
)
1990 RVVCALL(OPIVV3
, vwmaccsu_vv_b
, WOP_SSU_B
, H2
, H1
, H1
, DO_MACC
)
1991 RVVCALL(OPIVV3
, vwmaccsu_vv_h
, WOP_SSU_H
, H4
, H2
, H2
, DO_MACC
)
1992 RVVCALL(OPIVV3
, vwmaccsu_vv_w
, WOP_SSU_W
, H8
, H4
, H4
, DO_MACC
)
1993 GEN_VEXT_VV(vwmaccu_vv_b
, 1, 2, clearh
)
1994 GEN_VEXT_VV(vwmaccu_vv_h
, 2, 4, clearl
)
1995 GEN_VEXT_VV(vwmaccu_vv_w
, 4, 8, clearq
)
1996 GEN_VEXT_VV(vwmacc_vv_b
, 1, 2, clearh
)
1997 GEN_VEXT_VV(vwmacc_vv_h
, 2, 4, clearl
)
1998 GEN_VEXT_VV(vwmacc_vv_w
, 4, 8, clearq
)
1999 GEN_VEXT_VV(vwmaccsu_vv_b
, 1, 2, clearh
)
2000 GEN_VEXT_VV(vwmaccsu_vv_h
, 2, 4, clearl
)
2001 GEN_VEXT_VV(vwmaccsu_vv_w
, 4, 8, clearq
)
2003 RVVCALL(OPIVX3
, vwmaccu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_MACC
)
2004 RVVCALL(OPIVX3
, vwmaccu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_MACC
)
2005 RVVCALL(OPIVX3
, vwmaccu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_MACC
)
2006 RVVCALL(OPIVX3
, vwmacc_vx_b
, WOP_SSS_B
, H2
, H1
, DO_MACC
)
2007 RVVCALL(OPIVX3
, vwmacc_vx_h
, WOP_SSS_H
, H4
, H2
, DO_MACC
)
2008 RVVCALL(OPIVX3
, vwmacc_vx_w
, WOP_SSS_W
, H8
, H4
, DO_MACC
)
2009 RVVCALL(OPIVX3
, vwmaccsu_vx_b
, WOP_SSU_B
, H2
, H1
, DO_MACC
)
2010 RVVCALL(OPIVX3
, vwmaccsu_vx_h
, WOP_SSU_H
, H4
, H2
, DO_MACC
)
2011 RVVCALL(OPIVX3
, vwmaccsu_vx_w
, WOP_SSU_W
, H8
, H4
, DO_MACC
)
2012 RVVCALL(OPIVX3
, vwmaccus_vx_b
, WOP_SUS_B
, H2
, H1
, DO_MACC
)
2013 RVVCALL(OPIVX3
, vwmaccus_vx_h
, WOP_SUS_H
, H4
, H2
, DO_MACC
)
2014 RVVCALL(OPIVX3
, vwmaccus_vx_w
, WOP_SUS_W
, H8
, H4
, DO_MACC
)
2015 GEN_VEXT_VX(vwmaccu_vx_b
, 1, 2, clearh
)
2016 GEN_VEXT_VX(vwmaccu_vx_h
, 2, 4, clearl
)
2017 GEN_VEXT_VX(vwmaccu_vx_w
, 4, 8, clearq
)
2018 GEN_VEXT_VX(vwmacc_vx_b
, 1, 2, clearh
)
2019 GEN_VEXT_VX(vwmacc_vx_h
, 2, 4, clearl
)
2020 GEN_VEXT_VX(vwmacc_vx_w
, 4, 8, clearq
)
2021 GEN_VEXT_VX(vwmaccsu_vx_b
, 1, 2, clearh
)
2022 GEN_VEXT_VX(vwmaccsu_vx_h
, 2, 4, clearl
)
2023 GEN_VEXT_VX(vwmaccsu_vx_w
, 4, 8, clearq
)
2024 GEN_VEXT_VX(vwmaccus_vx_b
, 1, 2, clearh
)
2025 GEN_VEXT_VX(vwmaccus_vx_h
, 2, 4, clearl
)
2026 GEN_VEXT_VX(vwmaccus_vx_w
, 4, 8, clearq
)
2028 /* Vector Integer Merge and Move Instructions */
2029 #define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \
2030 void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
2033 uint32_t vl = env->vl; \
2034 uint32_t esz = sizeof(ETYPE); \
2035 uint32_t vlmax = vext_maxsz(desc) / esz; \
2038 for (i = 0; i < vl; i++) { \
2039 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
2040 *((ETYPE *)vd + H(i)) = s1; \
2042 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
2045 GEN_VEXT_VMV_VV(vmv_v_v_b
, int8_t, H1
, clearb
)
2046 GEN_VEXT_VMV_VV(vmv_v_v_h
, int16_t, H2
, clearh
)
2047 GEN_VEXT_VMV_VV(vmv_v_v_w
, int32_t, H4
, clearl
)
2048 GEN_VEXT_VMV_VV(vmv_v_v_d
, int64_t, H8
, clearq
)
2050 #define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN) \
2051 void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
2054 uint32_t vl = env->vl; \
2055 uint32_t esz = sizeof(ETYPE); \
2056 uint32_t vlmax = vext_maxsz(desc) / esz; \
2059 for (i = 0; i < vl; i++) { \
2060 *((ETYPE *)vd + H(i)) = (ETYPE)s1; \
2062 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
2065 GEN_VEXT_VMV_VX(vmv_v_x_b
, int8_t, H1
, clearb
)
2066 GEN_VEXT_VMV_VX(vmv_v_x_h
, int16_t, H2
, clearh
)
2067 GEN_VEXT_VMV_VX(vmv_v_x_w
, int32_t, H4
, clearl
)
2068 GEN_VEXT_VMV_VX(vmv_v_x_d
, int64_t, H8
, clearq
)
2070 #define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN) \
2071 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
2072 CPURISCVState *env, uint32_t desc) \
2074 uint32_t mlen = vext_mlen(desc); \
2075 uint32_t vl = env->vl; \
2076 uint32_t esz = sizeof(ETYPE); \
2077 uint32_t vlmax = vext_maxsz(desc) / esz; \
2080 for (i = 0; i < vl; i++) { \
2081 ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1); \
2082 *((ETYPE *)vd + H(i)) = *(vt + H(i)); \
2084 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
2087 GEN_VEXT_VMERGE_VV(vmerge_vvm_b
, int8_t, H1
, clearb
)
2088 GEN_VEXT_VMERGE_VV(vmerge_vvm_h
, int16_t, H2
, clearh
)
2089 GEN_VEXT_VMERGE_VV(vmerge_vvm_w
, int32_t, H4
, clearl
)
2090 GEN_VEXT_VMERGE_VV(vmerge_vvm_d
, int64_t, H8
, clearq
)
2092 #define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN) \
2093 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
2094 void *vs2, CPURISCVState *env, uint32_t desc) \
2096 uint32_t mlen = vext_mlen(desc); \
2097 uint32_t vl = env->vl; \
2098 uint32_t esz = sizeof(ETYPE); \
2099 uint32_t vlmax = vext_maxsz(desc) / esz; \
2102 for (i = 0; i < vl; i++) { \
2103 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
2104 ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 : \
2105 (ETYPE)(target_long)s1); \
2106 *((ETYPE *)vd + H(i)) = d; \
2108 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
2111 GEN_VEXT_VMERGE_VX(vmerge_vxm_b
, int8_t, H1
, clearb
)
2112 GEN_VEXT_VMERGE_VX(vmerge_vxm_h
, int16_t, H2
, clearh
)
2113 GEN_VEXT_VMERGE_VX(vmerge_vxm_w
, int32_t, H4
, clearl
)
2114 GEN_VEXT_VMERGE_VX(vmerge_vxm_d
, int64_t, H8
, clearq
)
2117 *** Vector Fixed-Point Arithmetic Instructions
2120 /* Vector Single-Width Saturating Add and Subtract */
2123 * As fixed point instructions probably have round mode and saturation,
2124 * define common macros for fixed point here.
2126 typedef void opivv2_rm_fn(void *vd
, void *vs1
, void *vs2
, int i
,
2127 CPURISCVState
*env
, int vxrm
);
2129 #define OPIVV2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
2130 static inline void \
2131 do_##NAME(void *vd, void *vs1, void *vs2, int i, \
2132 CPURISCVState *env, int vxrm) \
2134 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
2135 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
2136 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1); \
2140 vext_vv_rm_1(void *vd
, void *v0
, void *vs1
, void *vs2
,
2142 uint32_t vl
, uint32_t vm
, uint32_t mlen
, int vxrm
,
2145 for (uint32_t i
= 0; i
< vl
; i
++) {
2146 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
2149 fn(vd
, vs1
, vs2
, i
, env
, vxrm
);
2154 vext_vv_rm_2(void *vd
, void *v0
, void *vs1
, void *vs2
,
2156 uint32_t desc
, uint32_t esz
, uint32_t dsz
,
2157 opivv2_rm_fn
*fn
, clear_fn
*clearfn
)
2159 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
2160 uint32_t mlen
= vext_mlen(desc
);
2161 uint32_t vm
= vext_vm(desc
);
2162 uint32_t vl
= env
->vl
;
2164 switch (env
->vxrm
) {
2166 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
2167 env
, vl
, vm
, mlen
, 0, fn
);
2170 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
2171 env
, vl
, vm
, mlen
, 1, fn
);
2174 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
2175 env
, vl
, vm
, mlen
, 2, fn
);
2178 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
2179 env
, vl
, vm
, mlen
, 3, fn
);
2183 clearfn(vd
, vl
, vl
* dsz
, vlmax
* dsz
);
2186 /* generate helpers for fixed point instructions with OPIVV format */
2187 #define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN) \
2188 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
2189 CPURISCVState *env, uint32_t desc) \
2191 vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
2192 do_##NAME, CLEAR_FN); \
2195 static inline uint8_t saddu8(CPURISCVState
*env
, int vxrm
, uint8_t a
, uint8_t b
)
2197 uint8_t res
= a
+ b
;
2205 static inline uint16_t saddu16(CPURISCVState
*env
, int vxrm
, uint16_t a
,
2208 uint16_t res
= a
+ b
;
2216 static inline uint32_t saddu32(CPURISCVState
*env
, int vxrm
, uint32_t a
,
2219 uint32_t res
= a
+ b
;
2227 static inline uint64_t saddu64(CPURISCVState
*env
, int vxrm
, uint64_t a
,
2230 uint64_t res
= a
+ b
;
2238 RVVCALL(OPIVV2_RM
, vsaddu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, saddu8
)
2239 RVVCALL(OPIVV2_RM
, vsaddu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, saddu16
)
2240 RVVCALL(OPIVV2_RM
, vsaddu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, saddu32
)
2241 RVVCALL(OPIVV2_RM
, vsaddu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, saddu64
)
2242 GEN_VEXT_VV_RM(vsaddu_vv_b
, 1, 1, clearb
)
2243 GEN_VEXT_VV_RM(vsaddu_vv_h
, 2, 2, clearh
)
2244 GEN_VEXT_VV_RM(vsaddu_vv_w
, 4, 4, clearl
)
2245 GEN_VEXT_VV_RM(vsaddu_vv_d
, 8, 8, clearq
)
2247 typedef void opivx2_rm_fn(void *vd
, target_long s1
, void *vs2
, int i
,
2248 CPURISCVState
*env
, int vxrm
);
2250 #define OPIVX2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
2251 static inline void \
2252 do_##NAME(void *vd, target_long s1, void *vs2, int i, \
2253 CPURISCVState *env, int vxrm) \
2255 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
2256 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1); \
2260 vext_vx_rm_1(void *vd
, void *v0
, target_long s1
, void *vs2
,
2262 uint32_t vl
, uint32_t vm
, uint32_t mlen
, int vxrm
,
2265 for (uint32_t i
= 0; i
< vl
; i
++) {
2266 if (!vm
&& !vext_elem_mask(v0
, mlen
, i
)) {
2269 fn(vd
, s1
, vs2
, i
, env
, vxrm
);
2274 vext_vx_rm_2(void *vd
, void *v0
, target_long s1
, void *vs2
,
2276 uint32_t desc
, uint32_t esz
, uint32_t dsz
,
2277 opivx2_rm_fn
*fn
, clear_fn
*clearfn
)
2279 uint32_t vlmax
= vext_maxsz(desc
) / esz
;
2280 uint32_t mlen
= vext_mlen(desc
);
2281 uint32_t vm
= vext_vm(desc
);
2282 uint32_t vl
= env
->vl
;
2284 switch (env
->vxrm
) {
2286 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2287 env
, vl
, vm
, mlen
, 0, fn
);
2290 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2291 env
, vl
, vm
, mlen
, 1, fn
);
2294 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2295 env
, vl
, vm
, mlen
, 2, fn
);
2298 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2299 env
, vl
, vm
, mlen
, 3, fn
);
2303 clearfn(vd
, vl
, vl
* dsz
, vlmax
* dsz
);
2306 /* generate helpers for fixed point instructions with OPIVX format */
2307 #define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN) \
2308 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
2309 void *vs2, CPURISCVState *env, uint32_t desc) \
2311 vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
2312 do_##NAME, CLEAR_FN); \
2315 RVVCALL(OPIVX2_RM
, vsaddu_vx_b
, OP_UUU_B
, H1
, H1
, saddu8
)
2316 RVVCALL(OPIVX2_RM
, vsaddu_vx_h
, OP_UUU_H
, H2
, H2
, saddu16
)
2317 RVVCALL(OPIVX2_RM
, vsaddu_vx_w
, OP_UUU_W
, H4
, H4
, saddu32
)
2318 RVVCALL(OPIVX2_RM
, vsaddu_vx_d
, OP_UUU_D
, H8
, H8
, saddu64
)
2319 GEN_VEXT_VX_RM(vsaddu_vx_b
, 1, 1, clearb
)
2320 GEN_VEXT_VX_RM(vsaddu_vx_h
, 2, 2, clearh
)
2321 GEN_VEXT_VX_RM(vsaddu_vx_w
, 4, 4, clearl
)
2322 GEN_VEXT_VX_RM(vsaddu_vx_d
, 8, 8, clearq
)
2324 static inline int8_t sadd8(CPURISCVState
*env
, int vxrm
, int8_t a
, int8_t b
)
2327 if ((res
^ a
) & (res
^ b
) & INT8_MIN
) {
2328 res
= a
> 0 ? INT8_MAX
: INT8_MIN
;
2334 static inline int16_t sadd16(CPURISCVState
*env
, int vxrm
, int16_t a
, int16_t b
)
2336 int16_t res
= a
+ b
;
2337 if ((res
^ a
) & (res
^ b
) & INT16_MIN
) {
2338 res
= a
> 0 ? INT16_MAX
: INT16_MIN
;
2344 static inline int32_t sadd32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2346 int32_t res
= a
+ b
;
2347 if ((res
^ a
) & (res
^ b
) & INT32_MIN
) {
2348 res
= a
> 0 ? INT32_MAX
: INT32_MIN
;
2354 static inline int64_t sadd64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2356 int64_t res
= a
+ b
;
2357 if ((res
^ a
) & (res
^ b
) & INT64_MIN
) {
2358 res
= a
> 0 ? INT64_MAX
: INT64_MIN
;
2364 RVVCALL(OPIVV2_RM
, vsadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, sadd8
)
2365 RVVCALL(OPIVV2_RM
, vsadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, sadd16
)
2366 RVVCALL(OPIVV2_RM
, vsadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, sadd32
)
2367 RVVCALL(OPIVV2_RM
, vsadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, sadd64
)
2368 GEN_VEXT_VV_RM(vsadd_vv_b
, 1, 1, clearb
)
2369 GEN_VEXT_VV_RM(vsadd_vv_h
, 2, 2, clearh
)
2370 GEN_VEXT_VV_RM(vsadd_vv_w
, 4, 4, clearl
)
2371 GEN_VEXT_VV_RM(vsadd_vv_d
, 8, 8, clearq
)
2373 RVVCALL(OPIVX2_RM
, vsadd_vx_b
, OP_SSS_B
, H1
, H1
, sadd8
)
2374 RVVCALL(OPIVX2_RM
, vsadd_vx_h
, OP_SSS_H
, H2
, H2
, sadd16
)
2375 RVVCALL(OPIVX2_RM
, vsadd_vx_w
, OP_SSS_W
, H4
, H4
, sadd32
)
2376 RVVCALL(OPIVX2_RM
, vsadd_vx_d
, OP_SSS_D
, H8
, H8
, sadd64
)
2377 GEN_VEXT_VX_RM(vsadd_vx_b
, 1, 1, clearb
)
2378 GEN_VEXT_VX_RM(vsadd_vx_h
, 2, 2, clearh
)
2379 GEN_VEXT_VX_RM(vsadd_vx_w
, 4, 4, clearl
)
2380 GEN_VEXT_VX_RM(vsadd_vx_d
, 8, 8, clearq
)
2382 static inline uint8_t ssubu8(CPURISCVState
*env
, int vxrm
, uint8_t a
, uint8_t b
)
2384 uint8_t res
= a
- b
;
2392 static inline uint16_t ssubu16(CPURISCVState
*env
, int vxrm
, uint16_t a
,
2395 uint16_t res
= a
- b
;
2403 static inline uint32_t ssubu32(CPURISCVState
*env
, int vxrm
, uint32_t a
,
2406 uint32_t res
= a
- b
;
2414 static inline uint64_t ssubu64(CPURISCVState
*env
, int vxrm
, uint64_t a
,
2417 uint64_t res
= a
- b
;
2425 RVVCALL(OPIVV2_RM
, vssubu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, ssubu8
)
2426 RVVCALL(OPIVV2_RM
, vssubu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, ssubu16
)
2427 RVVCALL(OPIVV2_RM
, vssubu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, ssubu32
)
2428 RVVCALL(OPIVV2_RM
, vssubu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, ssubu64
)
2429 GEN_VEXT_VV_RM(vssubu_vv_b
, 1, 1, clearb
)
2430 GEN_VEXT_VV_RM(vssubu_vv_h
, 2, 2, clearh
)
2431 GEN_VEXT_VV_RM(vssubu_vv_w
, 4, 4, clearl
)
2432 GEN_VEXT_VV_RM(vssubu_vv_d
, 8, 8, clearq
)
2434 RVVCALL(OPIVX2_RM
, vssubu_vx_b
, OP_UUU_B
, H1
, H1
, ssubu8
)
2435 RVVCALL(OPIVX2_RM
, vssubu_vx_h
, OP_UUU_H
, H2
, H2
, ssubu16
)
2436 RVVCALL(OPIVX2_RM
, vssubu_vx_w
, OP_UUU_W
, H4
, H4
, ssubu32
)
2437 RVVCALL(OPIVX2_RM
, vssubu_vx_d
, OP_UUU_D
, H8
, H8
, ssubu64
)
2438 GEN_VEXT_VX_RM(vssubu_vx_b
, 1, 1, clearb
)
2439 GEN_VEXT_VX_RM(vssubu_vx_h
, 2, 2, clearh
)
2440 GEN_VEXT_VX_RM(vssubu_vx_w
, 4, 4, clearl
)
2441 GEN_VEXT_VX_RM(vssubu_vx_d
, 8, 8, clearq
)
2443 static inline int8_t ssub8(CPURISCVState
*env
, int vxrm
, int8_t a
, int8_t b
)
2446 if ((res
^ a
) & (a
^ b
) & INT8_MIN
) {
2447 res
= a
> 0 ? INT8_MAX
: INT8_MIN
;
2453 static inline int16_t ssub16(CPURISCVState
*env
, int vxrm
, int16_t a
, int16_t b
)
2455 int16_t res
= a
- b
;
2456 if ((res
^ a
) & (a
^ b
) & INT16_MIN
) {
2457 res
= a
> 0 ? INT16_MAX
: INT16_MIN
;
2463 static inline int32_t ssub32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2465 int32_t res
= a
- b
;
2466 if ((res
^ a
) & (a
^ b
) & INT32_MIN
) {
2467 res
= a
> 0 ? INT32_MAX
: INT32_MIN
;
2473 static inline int64_t ssub64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2475 int64_t res
= a
- b
;
2476 if ((res
^ a
) & (a
^ b
) & INT64_MIN
) {
2477 res
= a
> 0 ? INT64_MAX
: INT64_MIN
;
2483 RVVCALL(OPIVV2_RM
, vssub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, ssub8
)
2484 RVVCALL(OPIVV2_RM
, vssub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, ssub16
)
2485 RVVCALL(OPIVV2_RM
, vssub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, ssub32
)
2486 RVVCALL(OPIVV2_RM
, vssub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, ssub64
)
2487 GEN_VEXT_VV_RM(vssub_vv_b
, 1, 1, clearb
)
2488 GEN_VEXT_VV_RM(vssub_vv_h
, 2, 2, clearh
)
2489 GEN_VEXT_VV_RM(vssub_vv_w
, 4, 4, clearl
)
2490 GEN_VEXT_VV_RM(vssub_vv_d
, 8, 8, clearq
)
2492 RVVCALL(OPIVX2_RM
, vssub_vx_b
, OP_SSS_B
, H1
, H1
, ssub8
)
2493 RVVCALL(OPIVX2_RM
, vssub_vx_h
, OP_SSS_H
, H2
, H2
, ssub16
)
2494 RVVCALL(OPIVX2_RM
, vssub_vx_w
, OP_SSS_W
, H4
, H4
, ssub32
)
2495 RVVCALL(OPIVX2_RM
, vssub_vx_d
, OP_SSS_D
, H8
, H8
, ssub64
)
2496 GEN_VEXT_VX_RM(vssub_vx_b
, 1, 1, clearb
)
2497 GEN_VEXT_VX_RM(vssub_vx_h
, 2, 2, clearh
)
2498 GEN_VEXT_VX_RM(vssub_vx_w
, 4, 4, clearl
)
2499 GEN_VEXT_VX_RM(vssub_vx_d
, 8, 8, clearq
)
2501 /* Vector Single-Width Averaging Add and Subtract */
2502 static inline uint8_t get_round(int vxrm
, uint64_t v
, uint8_t shift
)
2504 uint8_t d
= extract64(v
, shift
, 1);
2508 if (shift
== 0 || shift
> 64) {
2512 d1
= extract64(v
, shift
- 1, 1);
2513 D1
= extract64(v
, 0, shift
);
2514 if (vxrm
== 0) { /* round-to-nearest-up (add +0.5 LSB) */
2516 } else if (vxrm
== 1) { /* round-to-nearest-even */
2518 D2
= extract64(v
, 0, shift
- 1);
2519 return d1
& ((D2
!= 0) | d
);
2523 } else if (vxrm
== 3) { /* round-to-odd (OR bits into LSB, aka "jam") */
2524 return !d
& (D1
!= 0);
2526 return 0; /* round-down (truncate) */
2529 static inline int32_t aadd32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2531 int64_t res
= (int64_t)a
+ b
;
2532 uint8_t round
= get_round(vxrm
, res
, 1);
2534 return (res
>> 1) + round
;
2537 static inline int64_t aadd64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2539 int64_t res
= a
+ b
;
2540 uint8_t round
= get_round(vxrm
, res
, 1);
2541 int64_t over
= (res
^ a
) & (res
^ b
) & INT64_MIN
;
2543 /* With signed overflow, bit 64 is inverse of bit 63. */
2544 return ((res
>> 1) ^ over
) + round
;
2547 RVVCALL(OPIVV2_RM
, vaadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, aadd32
)
2548 RVVCALL(OPIVV2_RM
, vaadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, aadd32
)
2549 RVVCALL(OPIVV2_RM
, vaadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, aadd32
)
2550 RVVCALL(OPIVV2_RM
, vaadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, aadd64
)
2551 GEN_VEXT_VV_RM(vaadd_vv_b
, 1, 1, clearb
)
2552 GEN_VEXT_VV_RM(vaadd_vv_h
, 2, 2, clearh
)
2553 GEN_VEXT_VV_RM(vaadd_vv_w
, 4, 4, clearl
)
2554 GEN_VEXT_VV_RM(vaadd_vv_d
, 8, 8, clearq
)
2556 RVVCALL(OPIVX2_RM
, vaadd_vx_b
, OP_SSS_B
, H1
, H1
, aadd32
)
2557 RVVCALL(OPIVX2_RM
, vaadd_vx_h
, OP_SSS_H
, H2
, H2
, aadd32
)
2558 RVVCALL(OPIVX2_RM
, vaadd_vx_w
, OP_SSS_W
, H4
, H4
, aadd32
)
2559 RVVCALL(OPIVX2_RM
, vaadd_vx_d
, OP_SSS_D
, H8
, H8
, aadd64
)
2560 GEN_VEXT_VX_RM(vaadd_vx_b
, 1, 1, clearb
)
2561 GEN_VEXT_VX_RM(vaadd_vx_h
, 2, 2, clearh
)
2562 GEN_VEXT_VX_RM(vaadd_vx_w
, 4, 4, clearl
)
2563 GEN_VEXT_VX_RM(vaadd_vx_d
, 8, 8, clearq
)
2565 static inline int32_t asub32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2567 int64_t res
= (int64_t)a
- b
;
2568 uint8_t round
= get_round(vxrm
, res
, 1);
2570 return (res
>> 1) + round
;
2573 static inline int64_t asub64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2575 int64_t res
= (int64_t)a
- b
;
2576 uint8_t round
= get_round(vxrm
, res
, 1);
2577 int64_t over
= (res
^ a
) & (a
^ b
) & INT64_MIN
;
2579 /* With signed overflow, bit 64 is inverse of bit 63. */
2580 return ((res
>> 1) ^ over
) + round
;
2583 RVVCALL(OPIVV2_RM
, vasub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, asub32
)
2584 RVVCALL(OPIVV2_RM
, vasub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, asub32
)
2585 RVVCALL(OPIVV2_RM
, vasub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, asub32
)
2586 RVVCALL(OPIVV2_RM
, vasub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, asub64
)
2587 GEN_VEXT_VV_RM(vasub_vv_b
, 1, 1, clearb
)
2588 GEN_VEXT_VV_RM(vasub_vv_h
, 2, 2, clearh
)
2589 GEN_VEXT_VV_RM(vasub_vv_w
, 4, 4, clearl
)
2590 GEN_VEXT_VV_RM(vasub_vv_d
, 8, 8, clearq
)
2592 RVVCALL(OPIVX2_RM
, vasub_vx_b
, OP_SSS_B
, H1
, H1
, asub32
)
2593 RVVCALL(OPIVX2_RM
, vasub_vx_h
, OP_SSS_H
, H2
, H2
, asub32
)
2594 RVVCALL(OPIVX2_RM
, vasub_vx_w
, OP_SSS_W
, H4
, H4
, asub32
)
2595 RVVCALL(OPIVX2_RM
, vasub_vx_d
, OP_SSS_D
, H8
, H8
, asub64
)
2596 GEN_VEXT_VX_RM(vasub_vx_b
, 1, 1, clearb
)
2597 GEN_VEXT_VX_RM(vasub_vx_h
, 2, 2, clearh
)
2598 GEN_VEXT_VX_RM(vasub_vx_w
, 4, 4, clearl
)
2599 GEN_VEXT_VX_RM(vasub_vx_d
, 8, 8, clearq
)