2 * Generic vector operation expansion
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
24 #include "tcg-op-gvec.h"
25 #include "tcg-gvec-desc.h"
29 #ifdef CONFIG_DEBUG_TCG
30 static const TCGOpcode vecop_list_empty
[1] = { 0 };
32 #define vecop_list_empty NULL
36 /* Verify vector size and alignment rules. OFS should be the OR of all
37 of the operand offsets so that we can check them all at once. */
38 static void check_size_align(uint32_t oprsz
, uint32_t maxsz
, uint32_t ofs
)
40 uint32_t opr_align
= oprsz
>= 16 ? 15 : 7;
41 uint32_t max_align
= maxsz
>= 16 || oprsz
>= 16 ? 15 : 7;
42 tcg_debug_assert(oprsz
> 0);
43 tcg_debug_assert(oprsz
<= maxsz
);
44 tcg_debug_assert((oprsz
& opr_align
) == 0);
45 tcg_debug_assert((maxsz
& max_align
) == 0);
46 tcg_debug_assert((ofs
& max_align
) == 0);
49 /* Verify vector overlap rules for two operands. */
50 static void check_overlap_2(uint32_t d
, uint32_t a
, uint32_t s
)
52 tcg_debug_assert(d
== a
|| d
+ s
<= a
|| a
+ s
<= d
);
55 /* Verify vector overlap rules for three operands. */
56 static void check_overlap_3(uint32_t d
, uint32_t a
, uint32_t b
, uint32_t s
)
58 check_overlap_2(d
, a
, s
);
59 check_overlap_2(d
, b
, s
);
60 check_overlap_2(a
, b
, s
);
63 /* Verify vector overlap rules for four operands. */
64 static void check_overlap_4(uint32_t d
, uint32_t a
, uint32_t b
,
65 uint32_t c
, uint32_t s
)
67 check_overlap_2(d
, a
, s
);
68 check_overlap_2(d
, b
, s
);
69 check_overlap_2(d
, c
, s
);
70 check_overlap_2(a
, b
, s
);
71 check_overlap_2(a
, c
, s
);
72 check_overlap_2(b
, c
, s
);
75 /* Create a descriptor from components. */
76 uint32_t simd_desc(uint32_t oprsz
, uint32_t maxsz
, int32_t data
)
80 assert(oprsz
% 8 == 0 && oprsz
<= (8 << SIMD_OPRSZ_BITS
));
81 assert(maxsz
% 8 == 0 && maxsz
<= (8 << SIMD_MAXSZ_BITS
));
82 assert(data
== sextract32(data
, 0, SIMD_DATA_BITS
));
84 oprsz
= (oprsz
/ 8) - 1;
85 maxsz
= (maxsz
/ 8) - 1;
86 desc
= deposit32(desc
, SIMD_OPRSZ_SHIFT
, SIMD_OPRSZ_BITS
, oprsz
);
87 desc
= deposit32(desc
, SIMD_MAXSZ_SHIFT
, SIMD_MAXSZ_BITS
, maxsz
);
88 desc
= deposit32(desc
, SIMD_DATA_SHIFT
, SIMD_DATA_BITS
, data
);
93 /* Generate a call to a gvec-style helper with two vector operands. */
94 void tcg_gen_gvec_2_ool(uint32_t dofs
, uint32_t aofs
,
95 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
96 gen_helper_gvec_2
*fn
)
99 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
101 a0
= tcg_temp_new_ptr();
102 a1
= tcg_temp_new_ptr();
104 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
105 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
109 tcg_temp_free_ptr(a0
);
110 tcg_temp_free_ptr(a1
);
111 tcg_temp_free_i32(desc
);
114 /* Generate a call to a gvec-style helper with two vector operands
115 and one scalar operand. */
116 void tcg_gen_gvec_2i_ool(uint32_t dofs
, uint32_t aofs
, TCGv_i64 c
,
117 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
118 gen_helper_gvec_2i
*fn
)
121 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
123 a0
= tcg_temp_new_ptr();
124 a1
= tcg_temp_new_ptr();
126 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
127 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
131 tcg_temp_free_ptr(a0
);
132 tcg_temp_free_ptr(a1
);
133 tcg_temp_free_i32(desc
);
136 /* Generate a call to a gvec-style helper with three vector operands. */
137 void tcg_gen_gvec_3_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
138 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
139 gen_helper_gvec_3
*fn
)
142 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
144 a0
= tcg_temp_new_ptr();
145 a1
= tcg_temp_new_ptr();
146 a2
= tcg_temp_new_ptr();
148 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
149 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
150 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
152 fn(a0
, a1
, a2
, desc
);
154 tcg_temp_free_ptr(a0
);
155 tcg_temp_free_ptr(a1
);
156 tcg_temp_free_ptr(a2
);
157 tcg_temp_free_i32(desc
);
160 /* Generate a call to a gvec-style helper with four vector operands. */
161 void tcg_gen_gvec_4_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
162 uint32_t cofs
, uint32_t oprsz
, uint32_t maxsz
,
163 int32_t data
, gen_helper_gvec_4
*fn
)
165 TCGv_ptr a0
, a1
, a2
, a3
;
166 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
168 a0
= tcg_temp_new_ptr();
169 a1
= tcg_temp_new_ptr();
170 a2
= tcg_temp_new_ptr();
171 a3
= tcg_temp_new_ptr();
173 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
174 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
175 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
176 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
178 fn(a0
, a1
, a2
, a3
, desc
);
180 tcg_temp_free_ptr(a0
);
181 tcg_temp_free_ptr(a1
);
182 tcg_temp_free_ptr(a2
);
183 tcg_temp_free_ptr(a3
);
184 tcg_temp_free_i32(desc
);
187 /* Generate a call to a gvec-style helper with five vector operands. */
188 void tcg_gen_gvec_5_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
189 uint32_t cofs
, uint32_t xofs
, uint32_t oprsz
,
190 uint32_t maxsz
, int32_t data
, gen_helper_gvec_5
*fn
)
192 TCGv_ptr a0
, a1
, a2
, a3
, a4
;
193 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
195 a0
= tcg_temp_new_ptr();
196 a1
= tcg_temp_new_ptr();
197 a2
= tcg_temp_new_ptr();
198 a3
= tcg_temp_new_ptr();
199 a4
= tcg_temp_new_ptr();
201 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
202 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
203 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
204 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
205 tcg_gen_addi_ptr(a4
, cpu_env
, xofs
);
207 fn(a0
, a1
, a2
, a3
, a4
, desc
);
209 tcg_temp_free_ptr(a0
);
210 tcg_temp_free_ptr(a1
);
211 tcg_temp_free_ptr(a2
);
212 tcg_temp_free_ptr(a3
);
213 tcg_temp_free_ptr(a4
);
214 tcg_temp_free_i32(desc
);
217 /* Generate a call to a gvec-style helper with three vector operands
218 and an extra pointer operand. */
219 void tcg_gen_gvec_2_ptr(uint32_t dofs
, uint32_t aofs
,
220 TCGv_ptr ptr
, uint32_t oprsz
, uint32_t maxsz
,
221 int32_t data
, gen_helper_gvec_2_ptr
*fn
)
224 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
226 a0
= tcg_temp_new_ptr();
227 a1
= tcg_temp_new_ptr();
229 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
230 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
232 fn(a0
, a1
, ptr
, desc
);
234 tcg_temp_free_ptr(a0
);
235 tcg_temp_free_ptr(a1
);
236 tcg_temp_free_i32(desc
);
239 /* Generate a call to a gvec-style helper with three vector operands
240 and an extra pointer operand. */
241 void tcg_gen_gvec_3_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
242 TCGv_ptr ptr
, uint32_t oprsz
, uint32_t maxsz
,
243 int32_t data
, gen_helper_gvec_3_ptr
*fn
)
246 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
248 a0
= tcg_temp_new_ptr();
249 a1
= tcg_temp_new_ptr();
250 a2
= tcg_temp_new_ptr();
252 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
253 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
254 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
256 fn(a0
, a1
, a2
, ptr
, desc
);
258 tcg_temp_free_ptr(a0
);
259 tcg_temp_free_ptr(a1
);
260 tcg_temp_free_ptr(a2
);
261 tcg_temp_free_i32(desc
);
264 /* Generate a call to a gvec-style helper with four vector operands
265 and an extra pointer operand. */
266 void tcg_gen_gvec_4_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
267 uint32_t cofs
, TCGv_ptr ptr
, uint32_t oprsz
,
268 uint32_t maxsz
, int32_t data
,
269 gen_helper_gvec_4_ptr
*fn
)
271 TCGv_ptr a0
, a1
, a2
, a3
;
272 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
274 a0
= tcg_temp_new_ptr();
275 a1
= tcg_temp_new_ptr();
276 a2
= tcg_temp_new_ptr();
277 a3
= tcg_temp_new_ptr();
279 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
280 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
281 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
282 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
284 fn(a0
, a1
, a2
, a3
, ptr
, desc
);
286 tcg_temp_free_ptr(a0
);
287 tcg_temp_free_ptr(a1
);
288 tcg_temp_free_ptr(a2
);
289 tcg_temp_free_ptr(a3
);
290 tcg_temp_free_i32(desc
);
293 /* Return true if we want to implement something of OPRSZ bytes
294 in units of LNSZ. This limits the expansion of inline code. */
295 static inline bool check_size_impl(uint32_t oprsz
, uint32_t lnsz
)
297 if (oprsz
% lnsz
== 0) {
298 uint32_t lnct
= oprsz
/ lnsz
;
299 return lnct
>= 1 && lnct
<= MAX_UNROLL
;
304 static void expand_clr(uint32_t dofs
, uint32_t maxsz
);
306 /* Duplicate C as per VECE. */
307 uint64_t (dup_const
)(unsigned vece
, uint64_t c
)
311 return 0x0101010101010101ull
* (uint8_t)c
;
313 return 0x0001000100010001ull
* (uint16_t)c
;
315 return 0x0000000100000001ull
* (uint32_t)c
;
319 g_assert_not_reached();
323 /* Duplicate IN into OUT as per VECE. */
324 static void gen_dup_i32(unsigned vece
, TCGv_i32 out
, TCGv_i32 in
)
328 tcg_gen_ext8u_i32(out
, in
);
329 tcg_gen_muli_i32(out
, out
, 0x01010101);
332 tcg_gen_deposit_i32(out
, in
, in
, 16, 16);
335 tcg_gen_mov_i32(out
, in
);
338 g_assert_not_reached();
342 static void gen_dup_i64(unsigned vece
, TCGv_i64 out
, TCGv_i64 in
)
346 tcg_gen_ext8u_i64(out
, in
);
347 tcg_gen_muli_i64(out
, out
, 0x0101010101010101ull
);
350 tcg_gen_ext16u_i64(out
, in
);
351 tcg_gen_muli_i64(out
, out
, 0x0001000100010001ull
);
354 tcg_gen_deposit_i64(out
, in
, in
, 32, 32);
357 tcg_gen_mov_i64(out
, in
);
360 g_assert_not_reached();
364 /* Select a supported vector type for implementing an operation on SIZE
365 * bytes. If OP is 0, assume that the real operation to be performed is
366 * required by all backends. Otherwise, make sure than OP can be performed
367 * on elements of size VECE in the selected type. Do not select V64 if
368 * PREFER_I64 is true. Return 0 if no vector type is selected.
370 static TCGType
choose_vector_type(const TCGOpcode
*list
, unsigned vece
,
371 uint32_t size
, bool prefer_i64
)
373 if (TCG_TARGET_HAS_v256
&& check_size_impl(size
, 32)) {
375 * Recall that ARM SVE allows vector sizes that are not a
376 * power of 2, but always a multiple of 16. The intent is
377 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
378 * It is hard to imagine a case in which v256 is supported
379 * but v128 is not, but check anyway.
381 if (tcg_can_emit_vecop_list(list
, TCG_TYPE_V256
, vece
)
383 || tcg_can_emit_vecop_list(list
, TCG_TYPE_V128
, vece
))) {
384 return TCG_TYPE_V256
;
387 if (TCG_TARGET_HAS_v128
&& check_size_impl(size
, 16)
388 && tcg_can_emit_vecop_list(list
, TCG_TYPE_V128
, vece
)) {
389 return TCG_TYPE_V128
;
391 if (TCG_TARGET_HAS_v64
&& !prefer_i64
&& check_size_impl(size
, 8)
392 && tcg_can_emit_vecop_list(list
, TCG_TYPE_V64
, vece
)) {
398 /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
399 * Only one of IN_32 or IN_64 may be set;
400 * IN_C is used if IN_32 and IN_64 are unset.
402 static void do_dup(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
403 uint32_t maxsz
, TCGv_i32 in_32
, TCGv_i64 in_64
,
408 TCGv_i32 t_32
, t_desc
;
412 assert(vece
<= (in_32
? MO_32
: MO_64
));
413 assert(in_32
== NULL
|| in_64
== NULL
);
415 /* If we're storing 0, expand oprsz to maxsz. */
416 if (in_32
== NULL
&& in_64
== NULL
) {
417 in_c
= dup_const(vece
, in_c
);
423 /* Implement inline with a vector type, if possible.
424 * Prefer integer when 64-bit host and no variable dup.
426 type
= choose_vector_type(NULL
, vece
, oprsz
,
427 (TCG_TARGET_REG_BITS
== 64 && in_32
== NULL
428 && (in_64
== NULL
|| vece
== MO_64
)));
430 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
433 tcg_gen_dup_i32_vec(vece
, t_vec
, in_32
);
435 tcg_gen_dup_i64_vec(vece
, t_vec
, in_64
);
439 tcg_gen_dup8i_vec(t_vec
, in_c
);
442 tcg_gen_dup16i_vec(t_vec
, in_c
);
445 tcg_gen_dup32i_vec(t_vec
, in_c
);
448 tcg_gen_dup64i_vec(t_vec
, in_c
);
456 /* Recall that ARM SVE allows vector sizes that are not a
457 * power of 2, but always a multiple of 16. The intent is
458 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
460 for (; i
+ 32 <= oprsz
; i
+= 32) {
461 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V256
);
465 for (; i
+ 16 <= oprsz
; i
+= 16) {
466 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V128
);
470 for (; i
< oprsz
; i
+= 8) {
471 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V64
);
475 g_assert_not_reached();
478 tcg_temp_free_vec(t_vec
);
482 /* Otherwise, inline with an integer type, unless "large". */
483 if (check_size_impl(oprsz
, TCG_TARGET_REG_BITS
/ 8)) {
488 /* We are given a 32-bit variable input. For a 64-bit host,
489 use a 64-bit operation unless the 32-bit operation would
491 if (TCG_TARGET_REG_BITS
== 64
492 && (vece
!= MO_32
|| !check_size_impl(oprsz
, 4))) {
493 t_64
= tcg_temp_new_i64();
494 tcg_gen_extu_i32_i64(t_64
, in_32
);
495 gen_dup_i64(vece
, t_64
, t_64
);
497 t_32
= tcg_temp_new_i32();
498 gen_dup_i32(vece
, t_32
, in_32
);
501 /* We are given a 64-bit variable input. */
502 t_64
= tcg_temp_new_i64();
503 gen_dup_i64(vece
, t_64
, in_64
);
505 /* We are given a constant input. */
506 /* For 64-bit hosts, use 64-bit constants for "simple" constants
507 or when we'd need too many 32-bit stores, or when a 64-bit
508 constant is really required. */
510 || (TCG_TARGET_REG_BITS
== 64
511 && (in_c
== 0 || in_c
== -1
512 || !check_size_impl(oprsz
, 4)))) {
513 t_64
= tcg_const_i64(in_c
);
515 t_32
= tcg_const_i32(in_c
);
519 /* Implement inline if we picked an implementation size above. */
521 for (i
= 0; i
< oprsz
; i
+= 4) {
522 tcg_gen_st_i32(t_32
, cpu_env
, dofs
+ i
);
524 tcg_temp_free_i32(t_32
);
528 for (i
= 0; i
< oprsz
; i
+= 8) {
529 tcg_gen_st_i64(t_64
, cpu_env
, dofs
+ i
);
531 tcg_temp_free_i64(t_64
);
536 /* Otherwise implement out of line. */
537 t_ptr
= tcg_temp_new_ptr();
538 tcg_gen_addi_ptr(t_ptr
, cpu_env
, dofs
);
539 t_desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, 0));
543 gen_helper_gvec_dup64(t_ptr
, t_desc
, in_64
);
545 t_64
= tcg_const_i64(in_c
);
546 gen_helper_gvec_dup64(t_ptr
, t_desc
, t_64
);
547 tcg_temp_free_i64(t_64
);
550 typedef void dup_fn(TCGv_ptr
, TCGv_i32
, TCGv_i32
);
551 static dup_fn
* const fns
[3] = {
552 gen_helper_gvec_dup8
,
553 gen_helper_gvec_dup16
,
554 gen_helper_gvec_dup32
558 fns
[vece
](t_ptr
, t_desc
, in_32
);
560 t_32
= tcg_temp_new_i32();
562 tcg_gen_extrl_i64_i32(t_32
, in_64
);
563 } else if (vece
== MO_8
) {
564 tcg_gen_movi_i32(t_32
, in_c
& 0xff);
565 } else if (vece
== MO_16
) {
566 tcg_gen_movi_i32(t_32
, in_c
& 0xffff);
568 tcg_gen_movi_i32(t_32
, in_c
);
570 fns
[vece
](t_ptr
, t_desc
, t_32
);
571 tcg_temp_free_i32(t_32
);
575 tcg_temp_free_ptr(t_ptr
);
576 tcg_temp_free_i32(t_desc
);
581 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
585 /* Likewise, but with zero. */
586 static void expand_clr(uint32_t dofs
, uint32_t maxsz
)
588 do_dup(MO_8
, dofs
, maxsz
, maxsz
, NULL
, NULL
, 0);
591 /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
592 static void expand_2_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
593 void (*fni
)(TCGv_i32
, TCGv_i32
))
595 TCGv_i32 t0
= tcg_temp_new_i32();
598 for (i
= 0; i
< oprsz
; i
+= 4) {
599 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
601 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
603 tcg_temp_free_i32(t0
);
606 static void expand_2i_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
607 int32_t c
, bool load_dest
,
608 void (*fni
)(TCGv_i32
, TCGv_i32
, int32_t))
610 TCGv_i32 t0
= tcg_temp_new_i32();
611 TCGv_i32 t1
= tcg_temp_new_i32();
614 for (i
= 0; i
< oprsz
; i
+= 4) {
615 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
617 tcg_gen_ld_i32(t1
, cpu_env
, dofs
+ i
);
620 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
622 tcg_temp_free_i32(t0
);
623 tcg_temp_free_i32(t1
);
626 static void expand_2s_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
627 TCGv_i32 c
, bool scalar_first
,
628 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
630 TCGv_i32 t0
= tcg_temp_new_i32();
631 TCGv_i32 t1
= tcg_temp_new_i32();
634 for (i
= 0; i
< oprsz
; i
+= 4) {
635 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
641 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
643 tcg_temp_free_i32(t0
);
644 tcg_temp_free_i32(t1
);
647 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
648 static void expand_3_i32(uint32_t dofs
, uint32_t aofs
,
649 uint32_t bofs
, uint32_t oprsz
, bool load_dest
,
650 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
652 TCGv_i32 t0
= tcg_temp_new_i32();
653 TCGv_i32 t1
= tcg_temp_new_i32();
654 TCGv_i32 t2
= tcg_temp_new_i32();
657 for (i
= 0; i
< oprsz
; i
+= 4) {
658 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
659 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
661 tcg_gen_ld_i32(t2
, cpu_env
, dofs
+ i
);
664 tcg_gen_st_i32(t2
, cpu_env
, dofs
+ i
);
666 tcg_temp_free_i32(t2
);
667 tcg_temp_free_i32(t1
);
668 tcg_temp_free_i32(t0
);
671 static void expand_3i_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
672 uint32_t oprsz
, int32_t c
, bool load_dest
,
673 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, int32_t))
675 TCGv_i32 t0
= tcg_temp_new_i32();
676 TCGv_i32 t1
= tcg_temp_new_i32();
677 TCGv_i32 t2
= tcg_temp_new_i32();
680 for (i
= 0; i
< oprsz
; i
+= 4) {
681 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
682 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
684 tcg_gen_ld_i32(t2
, cpu_env
, dofs
+ i
);
687 tcg_gen_st_i32(t2
, cpu_env
, dofs
+ i
);
689 tcg_temp_free_i32(t0
);
690 tcg_temp_free_i32(t1
);
691 tcg_temp_free_i32(t2
);
694 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
695 static void expand_4_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
696 uint32_t cofs
, uint32_t oprsz
, bool write_aofs
,
697 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_i32
))
699 TCGv_i32 t0
= tcg_temp_new_i32();
700 TCGv_i32 t1
= tcg_temp_new_i32();
701 TCGv_i32 t2
= tcg_temp_new_i32();
702 TCGv_i32 t3
= tcg_temp_new_i32();
705 for (i
= 0; i
< oprsz
; i
+= 4) {
706 tcg_gen_ld_i32(t1
, cpu_env
, aofs
+ i
);
707 tcg_gen_ld_i32(t2
, cpu_env
, bofs
+ i
);
708 tcg_gen_ld_i32(t3
, cpu_env
, cofs
+ i
);
710 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
712 tcg_gen_st_i32(t1
, cpu_env
, aofs
+ i
);
715 tcg_temp_free_i32(t3
);
716 tcg_temp_free_i32(t2
);
717 tcg_temp_free_i32(t1
);
718 tcg_temp_free_i32(t0
);
721 /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
722 static void expand_2_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
723 void (*fni
)(TCGv_i64
, TCGv_i64
))
725 TCGv_i64 t0
= tcg_temp_new_i64();
728 for (i
= 0; i
< oprsz
; i
+= 8) {
729 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
731 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
733 tcg_temp_free_i64(t0
);
736 static void expand_2i_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
737 int64_t c
, bool load_dest
,
738 void (*fni
)(TCGv_i64
, TCGv_i64
, int64_t))
740 TCGv_i64 t0
= tcg_temp_new_i64();
741 TCGv_i64 t1
= tcg_temp_new_i64();
744 for (i
= 0; i
< oprsz
; i
+= 8) {
745 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
747 tcg_gen_ld_i64(t1
, cpu_env
, dofs
+ i
);
750 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
752 tcg_temp_free_i64(t0
);
753 tcg_temp_free_i64(t1
);
756 static void expand_2s_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
757 TCGv_i64 c
, bool scalar_first
,
758 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
760 TCGv_i64 t0
= tcg_temp_new_i64();
761 TCGv_i64 t1
= tcg_temp_new_i64();
764 for (i
= 0; i
< oprsz
; i
+= 8) {
765 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
771 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
773 tcg_temp_free_i64(t0
);
774 tcg_temp_free_i64(t1
);
777 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
778 static void expand_3_i64(uint32_t dofs
, uint32_t aofs
,
779 uint32_t bofs
, uint32_t oprsz
, bool load_dest
,
780 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
782 TCGv_i64 t0
= tcg_temp_new_i64();
783 TCGv_i64 t1
= tcg_temp_new_i64();
784 TCGv_i64 t2
= tcg_temp_new_i64();
787 for (i
= 0; i
< oprsz
; i
+= 8) {
788 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
789 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
791 tcg_gen_ld_i64(t2
, cpu_env
, dofs
+ i
);
794 tcg_gen_st_i64(t2
, cpu_env
, dofs
+ i
);
796 tcg_temp_free_i64(t2
);
797 tcg_temp_free_i64(t1
);
798 tcg_temp_free_i64(t0
);
801 static void expand_3i_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
802 uint32_t oprsz
, int64_t c
, bool load_dest
,
803 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, int64_t))
805 TCGv_i64 t0
= tcg_temp_new_i64();
806 TCGv_i64 t1
= tcg_temp_new_i64();
807 TCGv_i64 t2
= tcg_temp_new_i64();
810 for (i
= 0; i
< oprsz
; i
+= 8) {
811 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
812 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
814 tcg_gen_ld_i64(t2
, cpu_env
, dofs
+ i
);
817 tcg_gen_st_i64(t2
, cpu_env
, dofs
+ i
);
819 tcg_temp_free_i64(t0
);
820 tcg_temp_free_i64(t1
);
821 tcg_temp_free_i64(t2
);
824 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
825 static void expand_4_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
826 uint32_t cofs
, uint32_t oprsz
, bool write_aofs
,
827 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
829 TCGv_i64 t0
= tcg_temp_new_i64();
830 TCGv_i64 t1
= tcg_temp_new_i64();
831 TCGv_i64 t2
= tcg_temp_new_i64();
832 TCGv_i64 t3
= tcg_temp_new_i64();
835 for (i
= 0; i
< oprsz
; i
+= 8) {
836 tcg_gen_ld_i64(t1
, cpu_env
, aofs
+ i
);
837 tcg_gen_ld_i64(t2
, cpu_env
, bofs
+ i
);
838 tcg_gen_ld_i64(t3
, cpu_env
, cofs
+ i
);
840 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
842 tcg_gen_st_i64(t1
, cpu_env
, aofs
+ i
);
845 tcg_temp_free_i64(t3
);
846 tcg_temp_free_i64(t2
);
847 tcg_temp_free_i64(t1
);
848 tcg_temp_free_i64(t0
);
851 /* Expand OPSZ bytes worth of two-operand operations using host vectors. */
852 static void expand_2_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
853 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
854 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
))
856 TCGv_vec t0
= tcg_temp_new_vec(type
);
859 for (i
= 0; i
< oprsz
; i
+= tysz
) {
860 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
862 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
864 tcg_temp_free_vec(t0
);
867 /* Expand OPSZ bytes worth of two-vector operands and an immediate operand
868 using host vectors. */
869 static void expand_2i_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
870 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
871 int64_t c
, bool load_dest
,
872 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, int64_t))
874 TCGv_vec t0
= tcg_temp_new_vec(type
);
875 TCGv_vec t1
= tcg_temp_new_vec(type
);
878 for (i
= 0; i
< oprsz
; i
+= tysz
) {
879 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
881 tcg_gen_ld_vec(t1
, cpu_env
, dofs
+ i
);
883 fni(vece
, t1
, t0
, c
);
884 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
886 tcg_temp_free_vec(t0
);
887 tcg_temp_free_vec(t1
);
890 static void expand_2s_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
891 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
892 TCGv_vec c
, bool scalar_first
,
893 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
))
895 TCGv_vec t0
= tcg_temp_new_vec(type
);
896 TCGv_vec t1
= tcg_temp_new_vec(type
);
899 for (i
= 0; i
< oprsz
; i
+= tysz
) {
900 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
902 fni(vece
, t1
, c
, t0
);
904 fni(vece
, t1
, t0
, c
);
906 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
908 tcg_temp_free_vec(t0
);
909 tcg_temp_free_vec(t1
);
912 /* Expand OPSZ bytes worth of three-operand operations using host vectors. */
913 static void expand_3_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
914 uint32_t bofs
, uint32_t oprsz
,
915 uint32_t tysz
, TCGType type
, bool load_dest
,
916 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
))
918 TCGv_vec t0
= tcg_temp_new_vec(type
);
919 TCGv_vec t1
= tcg_temp_new_vec(type
);
920 TCGv_vec t2
= tcg_temp_new_vec(type
);
923 for (i
= 0; i
< oprsz
; i
+= tysz
) {
924 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
925 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
927 tcg_gen_ld_vec(t2
, cpu_env
, dofs
+ i
);
929 fni(vece
, t2
, t0
, t1
);
930 tcg_gen_st_vec(t2
, cpu_env
, dofs
+ i
);
932 tcg_temp_free_vec(t2
);
933 tcg_temp_free_vec(t1
);
934 tcg_temp_free_vec(t0
);
938 * Expand OPSZ bytes worth of three-vector operands and an immediate operand
939 * using host vectors.
941 static void expand_3i_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
942 uint32_t bofs
, uint32_t oprsz
, uint32_t tysz
,
943 TCGType type
, int64_t c
, bool load_dest
,
944 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
,
947 TCGv_vec t0
= tcg_temp_new_vec(type
);
948 TCGv_vec t1
= tcg_temp_new_vec(type
);
949 TCGv_vec t2
= tcg_temp_new_vec(type
);
952 for (i
= 0; i
< oprsz
; i
+= tysz
) {
953 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
954 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
956 tcg_gen_ld_vec(t2
, cpu_env
, dofs
+ i
);
958 fni(vece
, t2
, t0
, t1
, c
);
959 tcg_gen_st_vec(t2
, cpu_env
, dofs
+ i
);
961 tcg_temp_free_vec(t0
);
962 tcg_temp_free_vec(t1
);
963 tcg_temp_free_vec(t2
);
966 /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
967 static void expand_4_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
968 uint32_t bofs
, uint32_t cofs
, uint32_t oprsz
,
969 uint32_t tysz
, TCGType type
, bool write_aofs
,
970 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
,
973 TCGv_vec t0
= tcg_temp_new_vec(type
);
974 TCGv_vec t1
= tcg_temp_new_vec(type
);
975 TCGv_vec t2
= tcg_temp_new_vec(type
);
976 TCGv_vec t3
= tcg_temp_new_vec(type
);
979 for (i
= 0; i
< oprsz
; i
+= tysz
) {
980 tcg_gen_ld_vec(t1
, cpu_env
, aofs
+ i
);
981 tcg_gen_ld_vec(t2
, cpu_env
, bofs
+ i
);
982 tcg_gen_ld_vec(t3
, cpu_env
, cofs
+ i
);
983 fni(vece
, t0
, t1
, t2
, t3
);
984 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
986 tcg_gen_st_vec(t1
, cpu_env
, aofs
+ i
);
989 tcg_temp_free_vec(t3
);
990 tcg_temp_free_vec(t2
);
991 tcg_temp_free_vec(t1
);
992 tcg_temp_free_vec(t0
);
995 /* Expand a vector two-operand operation. */
996 void tcg_gen_gvec_2(uint32_t dofs
, uint32_t aofs
,
997 uint32_t oprsz
, uint32_t maxsz
, const GVecGen2
*g
)
999 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1000 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1004 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1005 check_overlap_2(dofs
, aofs
, maxsz
);
1009 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1013 /* Recall that ARM SVE allows vector sizes that are not a
1014 * power of 2, but always a multiple of 16. The intent is
1015 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1017 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1018 expand_2_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
, g
->fniv
);
1019 if (some
== oprsz
) {
1028 expand_2_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
, g
->fniv
);
1031 expand_2_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
, g
->fniv
);
1035 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1036 expand_2_i64(dofs
, aofs
, oprsz
, g
->fni8
);
1037 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1038 expand_2_i32(dofs
, aofs
, oprsz
, g
->fni4
);
1040 assert(g
->fno
!= NULL
);
1041 tcg_gen_gvec_2_ool(dofs
, aofs
, oprsz
, maxsz
, g
->data
, g
->fno
);
1047 g_assert_not_reached();
1049 tcg_swap_vecop_list(hold_list
);
1051 if (oprsz
< maxsz
) {
1052 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1056 /* Expand a vector operation with two vectors and an immediate. */
1057 void tcg_gen_gvec_2i(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
1058 uint32_t maxsz
, int64_t c
, const GVecGen2i
*g
)
1060 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1061 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1065 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1066 check_overlap_2(dofs
, aofs
, maxsz
);
1070 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1074 /* Recall that ARM SVE allows vector sizes that are not a
1075 * power of 2, but always a multiple of 16. The intent is
1076 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1078 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1079 expand_2i_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1080 c
, g
->load_dest
, g
->fniv
);
1081 if (some
== oprsz
) {
1090 expand_2i_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1091 c
, g
->load_dest
, g
->fniv
);
1094 expand_2i_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1095 c
, g
->load_dest
, g
->fniv
);
1099 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1100 expand_2i_i64(dofs
, aofs
, oprsz
, c
, g
->load_dest
, g
->fni8
);
1101 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1102 expand_2i_i32(dofs
, aofs
, oprsz
, c
, g
->load_dest
, g
->fni4
);
1105 tcg_gen_gvec_2_ool(dofs
, aofs
, oprsz
, maxsz
, c
, g
->fno
);
1107 TCGv_i64 tcg_c
= tcg_const_i64(c
);
1108 tcg_gen_gvec_2i_ool(dofs
, aofs
, tcg_c
, oprsz
,
1110 tcg_temp_free_i64(tcg_c
);
1117 g_assert_not_reached();
1119 tcg_swap_vecop_list(hold_list
);
1121 if (oprsz
< maxsz
) {
1122 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1126 /* Expand a vector operation with two vectors and a scalar. */
1127 void tcg_gen_gvec_2s(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
1128 uint32_t maxsz
, TCGv_i64 c
, const GVecGen2s
*g
)
1132 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1133 check_overlap_2(dofs
, aofs
, maxsz
);
1137 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1140 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1141 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1142 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
1145 tcg_gen_dup_i64_vec(g
->vece
, t_vec
, c
);
1149 /* Recall that ARM SVE allows vector sizes that are not a
1150 * power of 2, but always a multiple of 16. The intent is
1151 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1153 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1154 expand_2s_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1155 t_vec
, g
->scalar_first
, g
->fniv
);
1156 if (some
== oprsz
) {
1166 expand_2s_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1167 t_vec
, g
->scalar_first
, g
->fniv
);
1171 expand_2s_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1172 t_vec
, g
->scalar_first
, g
->fniv
);
1176 g_assert_not_reached();
1178 tcg_temp_free_vec(t_vec
);
1179 tcg_swap_vecop_list(hold_list
);
1180 } else if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1181 TCGv_i64 t64
= tcg_temp_new_i64();
1183 gen_dup_i64(g
->vece
, t64
, c
);
1184 expand_2s_i64(dofs
, aofs
, oprsz
, t64
, g
->scalar_first
, g
->fni8
);
1185 tcg_temp_free_i64(t64
);
1186 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1187 TCGv_i32 t32
= tcg_temp_new_i32();
1189 tcg_gen_extrl_i64_i32(t32
, c
);
1190 gen_dup_i32(g
->vece
, t32
, t32
);
1191 expand_2s_i32(dofs
, aofs
, oprsz
, t32
, g
->scalar_first
, g
->fni4
);
1192 tcg_temp_free_i32(t32
);
1194 tcg_gen_gvec_2i_ool(dofs
, aofs
, c
, oprsz
, maxsz
, 0, g
->fno
);
1198 if (oprsz
< maxsz
) {
1199 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1203 /* Expand a vector three-operand operation. */
1204 void tcg_gen_gvec_3(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
1205 uint32_t oprsz
, uint32_t maxsz
, const GVecGen3
*g
)
1207 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1208 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1212 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
1213 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
1217 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1221 /* Recall that ARM SVE allows vector sizes that are not a
1222 * power of 2, but always a multiple of 16. The intent is
1223 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1225 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1226 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
,
1227 g
->load_dest
, g
->fniv
);
1228 if (some
== oprsz
) {
1238 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
,
1239 g
->load_dest
, g
->fniv
);
1242 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
,
1243 g
->load_dest
, g
->fniv
);
1247 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1248 expand_3_i64(dofs
, aofs
, bofs
, oprsz
, g
->load_dest
, g
->fni8
);
1249 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1250 expand_3_i32(dofs
, aofs
, bofs
, oprsz
, g
->load_dest
, g
->fni4
);
1252 assert(g
->fno
!= NULL
);
1253 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
,
1254 maxsz
, g
->data
, g
->fno
);
1260 g_assert_not_reached();
1262 tcg_swap_vecop_list(hold_list
);
1264 if (oprsz
< maxsz
) {
1265 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1269 /* Expand a vector operation with three vectors and an immediate. */
1270 void tcg_gen_gvec_3i(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
1271 uint32_t oprsz
, uint32_t maxsz
, int64_t c
,
1274 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1275 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1279 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
1280 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
1284 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1289 * Recall that ARM SVE allows vector sizes that are not a
1290 * power of 2, but always a multiple of 16. The intent is
1291 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1293 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1294 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
,
1295 c
, g
->load_dest
, g
->fniv
);
1296 if (some
== oprsz
) {
1306 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
,
1307 c
, g
->load_dest
, g
->fniv
);
1310 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
,
1311 c
, g
->load_dest
, g
->fniv
);
1315 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1316 expand_3i_i64(dofs
, aofs
, bofs
, oprsz
, c
, g
->load_dest
, g
->fni8
);
1317 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1318 expand_3i_i32(dofs
, aofs
, bofs
, oprsz
, c
, g
->load_dest
, g
->fni4
);
1320 assert(g
->fno
!= NULL
);
1321 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
, maxsz
, c
, g
->fno
);
1327 g_assert_not_reached();
1329 tcg_swap_vecop_list(hold_list
);
1331 if (oprsz
< maxsz
) {
1332 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1336 /* Expand a vector four-operand operation. */
1337 void tcg_gen_gvec_4(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
, uint32_t cofs
,
1338 uint32_t oprsz
, uint32_t maxsz
, const GVecGen4
*g
)
1340 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1341 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1345 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
| cofs
);
1346 check_overlap_4(dofs
, aofs
, bofs
, cofs
, maxsz
);
1350 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1354 /* Recall that ARM SVE allows vector sizes that are not a
1355 * power of 2, but always a multiple of 16. The intent is
1356 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1358 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1359 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, some
,
1360 32, TCG_TYPE_V256
, g
->write_aofs
, g
->fniv
);
1361 if (some
== oprsz
) {
1372 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, oprsz
,
1373 16, TCG_TYPE_V128
, g
->write_aofs
, g
->fniv
);
1376 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, oprsz
,
1377 8, TCG_TYPE_V64
, g
->write_aofs
, g
->fniv
);
1381 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1382 expand_4_i64(dofs
, aofs
, bofs
, cofs
, oprsz
,
1383 g
->write_aofs
, g
->fni8
);
1384 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1385 expand_4_i32(dofs
, aofs
, bofs
, cofs
, oprsz
,
1386 g
->write_aofs
, g
->fni4
);
1388 assert(g
->fno
!= NULL
);
1389 tcg_gen_gvec_4_ool(dofs
, aofs
, bofs
, cofs
,
1390 oprsz
, maxsz
, g
->data
, g
->fno
);
1396 g_assert_not_reached();
1398 tcg_swap_vecop_list(hold_list
);
1400 if (oprsz
< maxsz
) {
1401 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1406 * Expand specific vector operations.
1409 static void vec_mov2(unsigned vece
, TCGv_vec a
, TCGv_vec b
)
1411 tcg_gen_mov_vec(a
, b
);
1414 void tcg_gen_gvec_mov(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1415 uint32_t oprsz
, uint32_t maxsz
)
1417 static const GVecGen2 g
= {
1418 .fni8
= tcg_gen_mov_i64
,
1420 .fno
= gen_helper_gvec_mov
,
1421 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1424 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
);
1426 check_size_align(oprsz
, maxsz
, dofs
);
1427 if (oprsz
< maxsz
) {
1428 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1433 void tcg_gen_gvec_dup_i32(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1434 uint32_t maxsz
, TCGv_i32 in
)
1436 check_size_align(oprsz
, maxsz
, dofs
);
1437 tcg_debug_assert(vece
<= MO_32
);
1438 do_dup(vece
, dofs
, oprsz
, maxsz
, in
, NULL
, 0);
1441 void tcg_gen_gvec_dup_i64(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1442 uint32_t maxsz
, TCGv_i64 in
)
1444 check_size_align(oprsz
, maxsz
, dofs
);
1445 tcg_debug_assert(vece
<= MO_64
);
1446 do_dup(vece
, dofs
, oprsz
, maxsz
, NULL
, in
, 0);
1449 void tcg_gen_gvec_dup_mem(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1450 uint32_t oprsz
, uint32_t maxsz
)
1452 if (vece
<= MO_32
) {
1453 TCGv_i32 in
= tcg_temp_new_i32();
1456 tcg_gen_ld8u_i32(in
, cpu_env
, aofs
);
1459 tcg_gen_ld16u_i32(in
, cpu_env
, aofs
);
1462 tcg_gen_ld_i32(in
, cpu_env
, aofs
);
1465 tcg_gen_gvec_dup_i32(vece
, dofs
, oprsz
, maxsz
, in
);
1466 tcg_temp_free_i32(in
);
1467 } else if (vece
== MO_64
) {
1468 TCGv_i64 in
= tcg_temp_new_i64();
1469 tcg_gen_ld_i64(in
, cpu_env
, aofs
);
1470 tcg_gen_gvec_dup_i64(MO_64
, dofs
, oprsz
, maxsz
, in
);
1471 tcg_temp_free_i64(in
);
1473 /* 128-bit duplicate. */
1474 /* ??? Dup to 256-bit vector. */
1477 tcg_debug_assert(vece
== 4);
1478 tcg_debug_assert(oprsz
>= 16);
1479 if (TCG_TARGET_HAS_v128
) {
1480 TCGv_vec in
= tcg_temp_new_vec(TCG_TYPE_V128
);
1482 tcg_gen_ld_vec(in
, cpu_env
, aofs
);
1483 for (i
= 0; i
< oprsz
; i
+= 16) {
1484 tcg_gen_st_vec(in
, cpu_env
, dofs
+ i
);
1486 tcg_temp_free_vec(in
);
1488 TCGv_i64 in0
= tcg_temp_new_i64();
1489 TCGv_i64 in1
= tcg_temp_new_i64();
1491 tcg_gen_ld_i64(in0
, cpu_env
, aofs
);
1492 tcg_gen_ld_i64(in1
, cpu_env
, aofs
+ 8);
1493 for (i
= 0; i
< oprsz
; i
+= 16) {
1494 tcg_gen_st_i64(in0
, cpu_env
, dofs
+ i
);
1495 tcg_gen_st_i64(in1
, cpu_env
, dofs
+ i
+ 8);
1497 tcg_temp_free_i64(in0
);
1498 tcg_temp_free_i64(in1
);
1503 void tcg_gen_gvec_dup64i(uint32_t dofs
, uint32_t oprsz
,
1504 uint32_t maxsz
, uint64_t x
)
1506 check_size_align(oprsz
, maxsz
, dofs
);
1507 do_dup(MO_64
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1510 void tcg_gen_gvec_dup32i(uint32_t dofs
, uint32_t oprsz
,
1511 uint32_t maxsz
, uint32_t x
)
1513 check_size_align(oprsz
, maxsz
, dofs
);
1514 do_dup(MO_32
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1517 void tcg_gen_gvec_dup16i(uint32_t dofs
, uint32_t oprsz
,
1518 uint32_t maxsz
, uint16_t x
)
1520 check_size_align(oprsz
, maxsz
, dofs
);
1521 do_dup(MO_16
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1524 void tcg_gen_gvec_dup8i(uint32_t dofs
, uint32_t oprsz
,
1525 uint32_t maxsz
, uint8_t x
)
1527 check_size_align(oprsz
, maxsz
, dofs
);
1528 do_dup(MO_8
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1531 void tcg_gen_gvec_not(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1532 uint32_t oprsz
, uint32_t maxsz
)
1534 static const GVecGen2 g
= {
1535 .fni8
= tcg_gen_not_i64
,
1536 .fniv
= tcg_gen_not_vec
,
1537 .fno
= gen_helper_gvec_not
,
1538 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1540 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
);
1543 /* Perform a vector addition using normal addition and a mask. The mask
1544 should be the sign bit of each lane. This 6-operation form is more
1545 efficient than separate additions when there are 4 or more lanes in
1546 the 64-bit operation. */
1547 static void gen_addv_mask(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 m
)
1549 TCGv_i64 t1
= tcg_temp_new_i64();
1550 TCGv_i64 t2
= tcg_temp_new_i64();
1551 TCGv_i64 t3
= tcg_temp_new_i64();
1553 tcg_gen_andc_i64(t1
, a
, m
);
1554 tcg_gen_andc_i64(t2
, b
, m
);
1555 tcg_gen_xor_i64(t3
, a
, b
);
1556 tcg_gen_add_i64(d
, t1
, t2
);
1557 tcg_gen_and_i64(t3
, t3
, m
);
1558 tcg_gen_xor_i64(d
, d
, t3
);
1560 tcg_temp_free_i64(t1
);
1561 tcg_temp_free_i64(t2
);
1562 tcg_temp_free_i64(t3
);
1565 void tcg_gen_vec_add8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1567 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
1568 gen_addv_mask(d
, a
, b
, m
);
1569 tcg_temp_free_i64(m
);
1572 void tcg_gen_vec_add16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1574 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
1575 gen_addv_mask(d
, a
, b
, m
);
1576 tcg_temp_free_i64(m
);
1579 void tcg_gen_vec_add32_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1581 TCGv_i64 t1
= tcg_temp_new_i64();
1582 TCGv_i64 t2
= tcg_temp_new_i64();
1584 tcg_gen_andi_i64(t1
, a
, ~0xffffffffull
);
1585 tcg_gen_add_i64(t2
, a
, b
);
1586 tcg_gen_add_i64(t1
, t1
, b
);
1587 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
1589 tcg_temp_free_i64(t1
);
1590 tcg_temp_free_i64(t2
);
1593 static const TCGOpcode vecop_list_add
[] = { INDEX_op_add_vec
, 0 };
1595 void tcg_gen_gvec_add(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1596 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1598 static const GVecGen3 g
[4] = {
1599 { .fni8
= tcg_gen_vec_add8_i64
,
1600 .fniv
= tcg_gen_add_vec
,
1601 .fno
= gen_helper_gvec_add8
,
1602 .opt_opc
= vecop_list_add
,
1604 { .fni8
= tcg_gen_vec_add16_i64
,
1605 .fniv
= tcg_gen_add_vec
,
1606 .fno
= gen_helper_gvec_add16
,
1607 .opt_opc
= vecop_list_add
,
1609 { .fni4
= tcg_gen_add_i32
,
1610 .fniv
= tcg_gen_add_vec
,
1611 .fno
= gen_helper_gvec_add32
,
1612 .opt_opc
= vecop_list_add
,
1614 { .fni8
= tcg_gen_add_i64
,
1615 .fniv
= tcg_gen_add_vec
,
1616 .fno
= gen_helper_gvec_add64
,
1617 .opt_opc
= vecop_list_add
,
1618 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1622 tcg_debug_assert(vece
<= MO_64
);
1623 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1626 void tcg_gen_gvec_adds(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1627 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1629 static const GVecGen2s g
[4] = {
1630 { .fni8
= tcg_gen_vec_add8_i64
,
1631 .fniv
= tcg_gen_add_vec
,
1632 .fno
= gen_helper_gvec_adds8
,
1633 .opt_opc
= vecop_list_add
,
1635 { .fni8
= tcg_gen_vec_add16_i64
,
1636 .fniv
= tcg_gen_add_vec
,
1637 .fno
= gen_helper_gvec_adds16
,
1638 .opt_opc
= vecop_list_add
,
1640 { .fni4
= tcg_gen_add_i32
,
1641 .fniv
= tcg_gen_add_vec
,
1642 .fno
= gen_helper_gvec_adds32
,
1643 .opt_opc
= vecop_list_add
,
1645 { .fni8
= tcg_gen_add_i64
,
1646 .fniv
= tcg_gen_add_vec
,
1647 .fno
= gen_helper_gvec_adds64
,
1648 .opt_opc
= vecop_list_add
,
1649 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1653 tcg_debug_assert(vece
<= MO_64
);
1654 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1657 void tcg_gen_gvec_addi(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1658 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1660 TCGv_i64 tmp
= tcg_const_i64(c
);
1661 tcg_gen_gvec_adds(vece
, dofs
, aofs
, tmp
, oprsz
, maxsz
);
1662 tcg_temp_free_i64(tmp
);
1665 static const TCGOpcode vecop_list_sub
[] = { INDEX_op_sub_vec
, 0 };
1667 void tcg_gen_gvec_subs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1668 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1670 static const GVecGen2s g
[4] = {
1671 { .fni8
= tcg_gen_vec_sub8_i64
,
1672 .fniv
= tcg_gen_sub_vec
,
1673 .fno
= gen_helper_gvec_subs8
,
1674 .opt_opc
= vecop_list_sub
,
1676 { .fni8
= tcg_gen_vec_sub16_i64
,
1677 .fniv
= tcg_gen_sub_vec
,
1678 .fno
= gen_helper_gvec_subs16
,
1679 .opt_opc
= vecop_list_sub
,
1681 { .fni4
= tcg_gen_sub_i32
,
1682 .fniv
= tcg_gen_sub_vec
,
1683 .fno
= gen_helper_gvec_subs32
,
1684 .opt_opc
= vecop_list_sub
,
1686 { .fni8
= tcg_gen_sub_i64
,
1687 .fniv
= tcg_gen_sub_vec
,
1688 .fno
= gen_helper_gvec_subs64
,
1689 .opt_opc
= vecop_list_sub
,
1690 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1694 tcg_debug_assert(vece
<= MO_64
);
1695 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1698 /* Perform a vector subtraction using normal subtraction and a mask.
1699 Compare gen_addv_mask above. */
1700 static void gen_subv_mask(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 m
)
1702 TCGv_i64 t1
= tcg_temp_new_i64();
1703 TCGv_i64 t2
= tcg_temp_new_i64();
1704 TCGv_i64 t3
= tcg_temp_new_i64();
1706 tcg_gen_or_i64(t1
, a
, m
);
1707 tcg_gen_andc_i64(t2
, b
, m
);
1708 tcg_gen_eqv_i64(t3
, a
, b
);
1709 tcg_gen_sub_i64(d
, t1
, t2
);
1710 tcg_gen_and_i64(t3
, t3
, m
);
1711 tcg_gen_xor_i64(d
, d
, t3
);
1713 tcg_temp_free_i64(t1
);
1714 tcg_temp_free_i64(t2
);
1715 tcg_temp_free_i64(t3
);
1718 void tcg_gen_vec_sub8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1720 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
1721 gen_subv_mask(d
, a
, b
, m
);
1722 tcg_temp_free_i64(m
);
1725 void tcg_gen_vec_sub16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1727 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
1728 gen_subv_mask(d
, a
, b
, m
);
1729 tcg_temp_free_i64(m
);
1732 void tcg_gen_vec_sub32_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1734 TCGv_i64 t1
= tcg_temp_new_i64();
1735 TCGv_i64 t2
= tcg_temp_new_i64();
1737 tcg_gen_andi_i64(t1
, b
, ~0xffffffffull
);
1738 tcg_gen_sub_i64(t2
, a
, b
);
1739 tcg_gen_sub_i64(t1
, a
, t1
);
1740 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
1742 tcg_temp_free_i64(t1
);
1743 tcg_temp_free_i64(t2
);
1746 void tcg_gen_gvec_sub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1747 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1749 static const GVecGen3 g
[4] = {
1750 { .fni8
= tcg_gen_vec_sub8_i64
,
1751 .fniv
= tcg_gen_sub_vec
,
1752 .fno
= gen_helper_gvec_sub8
,
1753 .opt_opc
= vecop_list_sub
,
1755 { .fni8
= tcg_gen_vec_sub16_i64
,
1756 .fniv
= tcg_gen_sub_vec
,
1757 .fno
= gen_helper_gvec_sub16
,
1758 .opt_opc
= vecop_list_sub
,
1760 { .fni4
= tcg_gen_sub_i32
,
1761 .fniv
= tcg_gen_sub_vec
,
1762 .fno
= gen_helper_gvec_sub32
,
1763 .opt_opc
= vecop_list_sub
,
1765 { .fni8
= tcg_gen_sub_i64
,
1766 .fniv
= tcg_gen_sub_vec
,
1767 .fno
= gen_helper_gvec_sub64
,
1768 .opt_opc
= vecop_list_sub
,
1769 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1773 tcg_debug_assert(vece
<= MO_64
);
1774 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1777 static const TCGOpcode vecop_list_mul
[] = { INDEX_op_mul_vec
, 0 };
1779 void tcg_gen_gvec_mul(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1780 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1782 static const GVecGen3 g
[4] = {
1783 { .fniv
= tcg_gen_mul_vec
,
1784 .fno
= gen_helper_gvec_mul8
,
1785 .opt_opc
= vecop_list_mul
,
1787 { .fniv
= tcg_gen_mul_vec
,
1788 .fno
= gen_helper_gvec_mul16
,
1789 .opt_opc
= vecop_list_mul
,
1791 { .fni4
= tcg_gen_mul_i32
,
1792 .fniv
= tcg_gen_mul_vec
,
1793 .fno
= gen_helper_gvec_mul32
,
1794 .opt_opc
= vecop_list_mul
,
1796 { .fni8
= tcg_gen_mul_i64
,
1797 .fniv
= tcg_gen_mul_vec
,
1798 .fno
= gen_helper_gvec_mul64
,
1799 .opt_opc
= vecop_list_mul
,
1800 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1804 tcg_debug_assert(vece
<= MO_64
);
1805 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1808 void tcg_gen_gvec_muls(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1809 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1811 static const GVecGen2s g
[4] = {
1812 { .fniv
= tcg_gen_mul_vec
,
1813 .fno
= gen_helper_gvec_muls8
,
1814 .opt_opc
= vecop_list_mul
,
1816 { .fniv
= tcg_gen_mul_vec
,
1817 .fno
= gen_helper_gvec_muls16
,
1818 .opt_opc
= vecop_list_mul
,
1820 { .fni4
= tcg_gen_mul_i32
,
1821 .fniv
= tcg_gen_mul_vec
,
1822 .fno
= gen_helper_gvec_muls32
,
1823 .opt_opc
= vecop_list_mul
,
1825 { .fni8
= tcg_gen_mul_i64
,
1826 .fniv
= tcg_gen_mul_vec
,
1827 .fno
= gen_helper_gvec_muls64
,
1828 .opt_opc
= vecop_list_mul
,
1829 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1833 tcg_debug_assert(vece
<= MO_64
);
1834 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1837 void tcg_gen_gvec_muli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1838 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1840 TCGv_i64 tmp
= tcg_const_i64(c
);
1841 tcg_gen_gvec_muls(vece
, dofs
, aofs
, tmp
, oprsz
, maxsz
);
1842 tcg_temp_free_i64(tmp
);
1845 void tcg_gen_gvec_ssadd(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1846 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1848 static const TCGOpcode vecop_list
[] = { INDEX_op_ssadd_vec
, 0 };
1849 static const GVecGen3 g
[4] = {
1850 { .fniv
= tcg_gen_ssadd_vec
,
1851 .fno
= gen_helper_gvec_ssadd8
,
1852 .opt_opc
= vecop_list
,
1854 { .fniv
= tcg_gen_ssadd_vec
,
1855 .fno
= gen_helper_gvec_ssadd16
,
1856 .opt_opc
= vecop_list
,
1858 { .fniv
= tcg_gen_ssadd_vec
,
1859 .fno
= gen_helper_gvec_ssadd32
,
1860 .opt_opc
= vecop_list
,
1862 { .fniv
= tcg_gen_ssadd_vec
,
1863 .fno
= gen_helper_gvec_ssadd64
,
1864 .opt_opc
= vecop_list
,
1867 tcg_debug_assert(vece
<= MO_64
);
1868 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1871 void tcg_gen_gvec_sssub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1872 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1874 static const TCGOpcode vecop_list
[] = { INDEX_op_sssub_vec
, 0 };
1875 static const GVecGen3 g
[4] = {
1876 { .fniv
= tcg_gen_sssub_vec
,
1877 .fno
= gen_helper_gvec_sssub8
,
1878 .opt_opc
= vecop_list
,
1880 { .fniv
= tcg_gen_sssub_vec
,
1881 .fno
= gen_helper_gvec_sssub16
,
1882 .opt_opc
= vecop_list
,
1884 { .fniv
= tcg_gen_sssub_vec
,
1885 .fno
= gen_helper_gvec_sssub32
,
1886 .opt_opc
= vecop_list
,
1888 { .fniv
= tcg_gen_sssub_vec
,
1889 .fno
= gen_helper_gvec_sssub64
,
1890 .opt_opc
= vecop_list
,
1893 tcg_debug_assert(vece
<= MO_64
);
1894 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1897 static void tcg_gen_usadd_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1899 TCGv_i32 max
= tcg_const_i32(-1);
1900 tcg_gen_add_i32(d
, a
, b
);
1901 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, d
, a
, max
, d
);
1902 tcg_temp_free_i32(max
);
1905 static void tcg_gen_usadd_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1907 TCGv_i64 max
= tcg_const_i64(-1);
1908 tcg_gen_add_i64(d
, a
, b
);
1909 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, d
, a
, max
, d
);
1910 tcg_temp_free_i64(max
);
1913 void tcg_gen_gvec_usadd(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1914 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1916 static const TCGOpcode vecop_list
[] = { INDEX_op_usadd_vec
, 0 };
1917 static const GVecGen3 g
[4] = {
1918 { .fniv
= tcg_gen_usadd_vec
,
1919 .fno
= gen_helper_gvec_usadd8
,
1920 .opt_opc
= vecop_list
,
1922 { .fniv
= tcg_gen_usadd_vec
,
1923 .fno
= gen_helper_gvec_usadd16
,
1924 .opt_opc
= vecop_list
,
1926 { .fni4
= tcg_gen_usadd_i32
,
1927 .fniv
= tcg_gen_usadd_vec
,
1928 .fno
= gen_helper_gvec_usadd32
,
1929 .opt_opc
= vecop_list
,
1931 { .fni8
= tcg_gen_usadd_i64
,
1932 .fniv
= tcg_gen_usadd_vec
,
1933 .fno
= gen_helper_gvec_usadd64
,
1934 .opt_opc
= vecop_list
,
1937 tcg_debug_assert(vece
<= MO_64
);
1938 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1941 static void tcg_gen_ussub_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1943 TCGv_i32 min
= tcg_const_i32(0);
1944 tcg_gen_sub_i32(d
, a
, b
);
1945 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, a
, b
, min
, d
);
1946 tcg_temp_free_i32(min
);
1949 static void tcg_gen_ussub_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1951 TCGv_i64 min
= tcg_const_i64(0);
1952 tcg_gen_sub_i64(d
, a
, b
);
1953 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, a
, b
, min
, d
);
1954 tcg_temp_free_i64(min
);
1957 void tcg_gen_gvec_ussub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1958 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1960 static const TCGOpcode vecop_list
[] = { INDEX_op_ussub_vec
, 0 };
1961 static const GVecGen3 g
[4] = {
1962 { .fniv
= tcg_gen_ussub_vec
,
1963 .fno
= gen_helper_gvec_ussub8
,
1964 .opt_opc
= vecop_list
,
1966 { .fniv
= tcg_gen_ussub_vec
,
1967 .fno
= gen_helper_gvec_ussub16
,
1968 .opt_opc
= vecop_list
,
1970 { .fni4
= tcg_gen_ussub_i32
,
1971 .fniv
= tcg_gen_ussub_vec
,
1972 .fno
= gen_helper_gvec_ussub32
,
1973 .opt_opc
= vecop_list
,
1975 { .fni8
= tcg_gen_ussub_i64
,
1976 .fniv
= tcg_gen_ussub_vec
,
1977 .fno
= gen_helper_gvec_ussub64
,
1978 .opt_opc
= vecop_list
,
1981 tcg_debug_assert(vece
<= MO_64
);
1982 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1985 void tcg_gen_gvec_smin(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1986 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1988 static const TCGOpcode vecop_list
[] = { INDEX_op_smin_vec
, 0 };
1989 static const GVecGen3 g
[4] = {
1990 { .fniv
= tcg_gen_smin_vec
,
1991 .fno
= gen_helper_gvec_smin8
,
1992 .opt_opc
= vecop_list
,
1994 { .fniv
= tcg_gen_smin_vec
,
1995 .fno
= gen_helper_gvec_smin16
,
1996 .opt_opc
= vecop_list
,
1998 { .fni4
= tcg_gen_smin_i32
,
1999 .fniv
= tcg_gen_smin_vec
,
2000 .fno
= gen_helper_gvec_smin32
,
2001 .opt_opc
= vecop_list
,
2003 { .fni8
= tcg_gen_smin_i64
,
2004 .fniv
= tcg_gen_smin_vec
,
2005 .fno
= gen_helper_gvec_smin64
,
2006 .opt_opc
= vecop_list
,
2009 tcg_debug_assert(vece
<= MO_64
);
2010 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2013 void tcg_gen_gvec_umin(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2014 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2016 static const TCGOpcode vecop_list
[] = { INDEX_op_umin_vec
, 0 };
2017 static const GVecGen3 g
[4] = {
2018 { .fniv
= tcg_gen_umin_vec
,
2019 .fno
= gen_helper_gvec_umin8
,
2020 .opt_opc
= vecop_list
,
2022 { .fniv
= tcg_gen_umin_vec
,
2023 .fno
= gen_helper_gvec_umin16
,
2024 .opt_opc
= vecop_list
,
2026 { .fni4
= tcg_gen_umin_i32
,
2027 .fniv
= tcg_gen_umin_vec
,
2028 .fno
= gen_helper_gvec_umin32
,
2029 .opt_opc
= vecop_list
,
2031 { .fni8
= tcg_gen_umin_i64
,
2032 .fniv
= tcg_gen_umin_vec
,
2033 .fno
= gen_helper_gvec_umin64
,
2034 .opt_opc
= vecop_list
,
2037 tcg_debug_assert(vece
<= MO_64
);
2038 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2041 void tcg_gen_gvec_smax(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2042 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2044 static const TCGOpcode vecop_list
[] = { INDEX_op_smax_vec
, 0 };
2045 static const GVecGen3 g
[4] = {
2046 { .fniv
= tcg_gen_smax_vec
,
2047 .fno
= gen_helper_gvec_smax8
,
2048 .opt_opc
= vecop_list
,
2050 { .fniv
= tcg_gen_smax_vec
,
2051 .fno
= gen_helper_gvec_smax16
,
2052 .opt_opc
= vecop_list
,
2054 { .fni4
= tcg_gen_smax_i32
,
2055 .fniv
= tcg_gen_smax_vec
,
2056 .fno
= gen_helper_gvec_smax32
,
2057 .opt_opc
= vecop_list
,
2059 { .fni8
= tcg_gen_smax_i64
,
2060 .fniv
= tcg_gen_smax_vec
,
2061 .fno
= gen_helper_gvec_smax64
,
2062 .opt_opc
= vecop_list
,
2065 tcg_debug_assert(vece
<= MO_64
);
2066 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2069 void tcg_gen_gvec_umax(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2070 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2072 static const TCGOpcode vecop_list
[] = { INDEX_op_umax_vec
, 0 };
2073 static const GVecGen3 g
[4] = {
2074 { .fniv
= tcg_gen_umax_vec
,
2075 .fno
= gen_helper_gvec_umax8
,
2076 .opt_opc
= vecop_list
,
2078 { .fniv
= tcg_gen_umax_vec
,
2079 .fno
= gen_helper_gvec_umax16
,
2080 .opt_opc
= vecop_list
,
2082 { .fni4
= tcg_gen_umax_i32
,
2083 .fniv
= tcg_gen_umax_vec
,
2084 .fno
= gen_helper_gvec_umax32
,
2085 .opt_opc
= vecop_list
,
2087 { .fni8
= tcg_gen_umax_i64
,
2088 .fniv
= tcg_gen_umax_vec
,
2089 .fno
= gen_helper_gvec_umax64
,
2090 .opt_opc
= vecop_list
,
2093 tcg_debug_assert(vece
<= MO_64
);
2094 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2097 /* Perform a vector negation using normal negation and a mask.
2098 Compare gen_subv_mask above. */
2099 static void gen_negv_mask(TCGv_i64 d
, TCGv_i64 b
, TCGv_i64 m
)
2101 TCGv_i64 t2
= tcg_temp_new_i64();
2102 TCGv_i64 t3
= tcg_temp_new_i64();
2104 tcg_gen_andc_i64(t3
, m
, b
);
2105 tcg_gen_andc_i64(t2
, b
, m
);
2106 tcg_gen_sub_i64(d
, m
, t2
);
2107 tcg_gen_xor_i64(d
, d
, t3
);
2109 tcg_temp_free_i64(t2
);
2110 tcg_temp_free_i64(t3
);
2113 void tcg_gen_vec_neg8_i64(TCGv_i64 d
, TCGv_i64 b
)
2115 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
2116 gen_negv_mask(d
, b
, m
);
2117 tcg_temp_free_i64(m
);
2120 void tcg_gen_vec_neg16_i64(TCGv_i64 d
, TCGv_i64 b
)
2122 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
2123 gen_negv_mask(d
, b
, m
);
2124 tcg_temp_free_i64(m
);
2127 void tcg_gen_vec_neg32_i64(TCGv_i64 d
, TCGv_i64 b
)
2129 TCGv_i64 t1
= tcg_temp_new_i64();
2130 TCGv_i64 t2
= tcg_temp_new_i64();
2132 tcg_gen_andi_i64(t1
, b
, ~0xffffffffull
);
2133 tcg_gen_neg_i64(t2
, b
);
2134 tcg_gen_neg_i64(t1
, t1
);
2135 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
2137 tcg_temp_free_i64(t1
);
2138 tcg_temp_free_i64(t2
);
2141 void tcg_gen_gvec_neg(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2142 uint32_t oprsz
, uint32_t maxsz
)
2144 static const TCGOpcode vecop_list
[] = { INDEX_op_neg_vec
, 0 };
2145 static const GVecGen2 g
[4] = {
2146 { .fni8
= tcg_gen_vec_neg8_i64
,
2147 .fniv
= tcg_gen_neg_vec
,
2148 .fno
= gen_helper_gvec_neg8
,
2149 .opt_opc
= vecop_list
,
2151 { .fni8
= tcg_gen_vec_neg16_i64
,
2152 .fniv
= tcg_gen_neg_vec
,
2153 .fno
= gen_helper_gvec_neg16
,
2154 .opt_opc
= vecop_list
,
2156 { .fni4
= tcg_gen_neg_i32
,
2157 .fniv
= tcg_gen_neg_vec
,
2158 .fno
= gen_helper_gvec_neg32
,
2159 .opt_opc
= vecop_list
,
2161 { .fni8
= tcg_gen_neg_i64
,
2162 .fniv
= tcg_gen_neg_vec
,
2163 .fno
= gen_helper_gvec_neg64
,
2164 .opt_opc
= vecop_list
,
2165 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2169 tcg_debug_assert(vece
<= MO_64
);
2170 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
[vece
]);
2173 void tcg_gen_gvec_and(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2174 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2176 static const GVecGen3 g
= {
2177 .fni8
= tcg_gen_and_i64
,
2178 .fniv
= tcg_gen_and_vec
,
2179 .fno
= gen_helper_gvec_and
,
2180 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2184 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2186 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2190 void tcg_gen_gvec_or(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2191 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2193 static const GVecGen3 g
= {
2194 .fni8
= tcg_gen_or_i64
,
2195 .fniv
= tcg_gen_or_vec
,
2196 .fno
= gen_helper_gvec_or
,
2197 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2201 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2203 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2207 void tcg_gen_gvec_xor(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2208 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2210 static const GVecGen3 g
= {
2211 .fni8
= tcg_gen_xor_i64
,
2212 .fniv
= tcg_gen_xor_vec
,
2213 .fno
= gen_helper_gvec_xor
,
2214 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2218 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, 0);
2220 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2224 void tcg_gen_gvec_andc(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2225 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2227 static const GVecGen3 g
= {
2228 .fni8
= tcg_gen_andc_i64
,
2229 .fniv
= tcg_gen_andc_vec
,
2230 .fno
= gen_helper_gvec_andc
,
2231 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2235 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, 0);
2237 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2241 void tcg_gen_gvec_orc(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2242 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2244 static const GVecGen3 g
= {
2245 .fni8
= tcg_gen_orc_i64
,
2246 .fniv
= tcg_gen_orc_vec
,
2247 .fno
= gen_helper_gvec_orc
,
2248 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2252 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, -1);
2254 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2258 void tcg_gen_gvec_nand(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2259 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2261 static const GVecGen3 g
= {
2262 .fni8
= tcg_gen_nand_i64
,
2263 .fniv
= tcg_gen_nand_vec
,
2264 .fno
= gen_helper_gvec_nand
,
2265 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2269 tcg_gen_gvec_not(vece
, dofs
, aofs
, oprsz
, maxsz
);
2271 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2275 void tcg_gen_gvec_nor(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2276 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2278 static const GVecGen3 g
= {
2279 .fni8
= tcg_gen_nor_i64
,
2280 .fniv
= tcg_gen_nor_vec
,
2281 .fno
= gen_helper_gvec_nor
,
2282 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2286 tcg_gen_gvec_not(vece
, dofs
, aofs
, oprsz
, maxsz
);
2288 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2292 void tcg_gen_gvec_eqv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2293 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2295 static const GVecGen3 g
= {
2296 .fni8
= tcg_gen_eqv_i64
,
2297 .fniv
= tcg_gen_eqv_vec
,
2298 .fno
= gen_helper_gvec_eqv
,
2299 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2303 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, -1);
2305 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2309 static const GVecGen2s gop_ands
= {
2310 .fni8
= tcg_gen_and_i64
,
2311 .fniv
= tcg_gen_and_vec
,
2312 .fno
= gen_helper_gvec_ands
,
2313 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2317 void tcg_gen_gvec_ands(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2318 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2320 TCGv_i64 tmp
= tcg_temp_new_i64();
2321 gen_dup_i64(vece
, tmp
, c
);
2322 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ands
);
2323 tcg_temp_free_i64(tmp
);
2326 void tcg_gen_gvec_andi(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2327 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2329 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2330 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ands
);
2331 tcg_temp_free_i64(tmp
);
2334 static const GVecGen2s gop_xors
= {
2335 .fni8
= tcg_gen_xor_i64
,
2336 .fniv
= tcg_gen_xor_vec
,
2337 .fno
= gen_helper_gvec_xors
,
2338 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2342 void tcg_gen_gvec_xors(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2343 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2345 TCGv_i64 tmp
= tcg_temp_new_i64();
2346 gen_dup_i64(vece
, tmp
, c
);
2347 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_xors
);
2348 tcg_temp_free_i64(tmp
);
2351 void tcg_gen_gvec_xori(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2352 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2354 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2355 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_xors
);
2356 tcg_temp_free_i64(tmp
);
2359 static const GVecGen2s gop_ors
= {
2360 .fni8
= tcg_gen_or_i64
,
2361 .fniv
= tcg_gen_or_vec
,
2362 .fno
= gen_helper_gvec_ors
,
2363 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2367 void tcg_gen_gvec_ors(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2368 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2370 TCGv_i64 tmp
= tcg_temp_new_i64();
2371 gen_dup_i64(vece
, tmp
, c
);
2372 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ors
);
2373 tcg_temp_free_i64(tmp
);
2376 void tcg_gen_gvec_ori(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2377 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2379 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2380 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ors
);
2381 tcg_temp_free_i64(tmp
);
2384 void tcg_gen_vec_shl8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2386 uint64_t mask
= dup_const(MO_8
, 0xff << c
);
2387 tcg_gen_shli_i64(d
, a
, c
);
2388 tcg_gen_andi_i64(d
, d
, mask
);
2391 void tcg_gen_vec_shl16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2393 uint64_t mask
= dup_const(MO_16
, 0xffff << c
);
2394 tcg_gen_shli_i64(d
, a
, c
);
2395 tcg_gen_andi_i64(d
, d
, mask
);
2398 void tcg_gen_gvec_shli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2399 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2401 static const TCGOpcode vecop_list
[] = { INDEX_op_shli_vec
, 0 };
2402 static const GVecGen2i g
[4] = {
2403 { .fni8
= tcg_gen_vec_shl8i_i64
,
2404 .fniv
= tcg_gen_shli_vec
,
2405 .fno
= gen_helper_gvec_shl8i
,
2406 .opt_opc
= vecop_list
,
2408 { .fni8
= tcg_gen_vec_shl16i_i64
,
2409 .fniv
= tcg_gen_shli_vec
,
2410 .fno
= gen_helper_gvec_shl16i
,
2411 .opt_opc
= vecop_list
,
2413 { .fni4
= tcg_gen_shli_i32
,
2414 .fniv
= tcg_gen_shli_vec
,
2415 .fno
= gen_helper_gvec_shl32i
,
2416 .opt_opc
= vecop_list
,
2418 { .fni8
= tcg_gen_shli_i64
,
2419 .fniv
= tcg_gen_shli_vec
,
2420 .fno
= gen_helper_gvec_shl64i
,
2421 .opt_opc
= vecop_list
,
2422 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2426 tcg_debug_assert(vece
<= MO_64
);
2427 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2429 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2431 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2435 void tcg_gen_vec_shr8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2437 uint64_t mask
= dup_const(MO_8
, 0xff >> c
);
2438 tcg_gen_shri_i64(d
, a
, c
);
2439 tcg_gen_andi_i64(d
, d
, mask
);
2442 void tcg_gen_vec_shr16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2444 uint64_t mask
= dup_const(MO_16
, 0xffff >> c
);
2445 tcg_gen_shri_i64(d
, a
, c
);
2446 tcg_gen_andi_i64(d
, d
, mask
);
2449 void tcg_gen_gvec_shri(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2450 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2452 static const TCGOpcode vecop_list
[] = { INDEX_op_shri_vec
, 0 };
2453 static const GVecGen2i g
[4] = {
2454 { .fni8
= tcg_gen_vec_shr8i_i64
,
2455 .fniv
= tcg_gen_shri_vec
,
2456 .fno
= gen_helper_gvec_shr8i
,
2457 .opt_opc
= vecop_list
,
2459 { .fni8
= tcg_gen_vec_shr16i_i64
,
2460 .fniv
= tcg_gen_shri_vec
,
2461 .fno
= gen_helper_gvec_shr16i
,
2462 .opt_opc
= vecop_list
,
2464 { .fni4
= tcg_gen_shri_i32
,
2465 .fniv
= tcg_gen_shri_vec
,
2466 .fno
= gen_helper_gvec_shr32i
,
2467 .opt_opc
= vecop_list
,
2469 { .fni8
= tcg_gen_shri_i64
,
2470 .fniv
= tcg_gen_shri_vec
,
2471 .fno
= gen_helper_gvec_shr64i
,
2472 .opt_opc
= vecop_list
,
2473 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2477 tcg_debug_assert(vece
<= MO_64
);
2478 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2480 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2482 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2486 void tcg_gen_vec_sar8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2488 uint64_t s_mask
= dup_const(MO_8
, 0x80 >> c
);
2489 uint64_t c_mask
= dup_const(MO_8
, 0xff >> c
);
2490 TCGv_i64 s
= tcg_temp_new_i64();
2492 tcg_gen_shri_i64(d
, a
, c
);
2493 tcg_gen_andi_i64(s
, d
, s_mask
); /* isolate (shifted) sign bit */
2494 tcg_gen_muli_i64(s
, s
, (2 << c
) - 2); /* replicate isolated signs */
2495 tcg_gen_andi_i64(d
, d
, c_mask
); /* clear out bits above sign */
2496 tcg_gen_or_i64(d
, d
, s
); /* include sign extension */
2497 tcg_temp_free_i64(s
);
2500 void tcg_gen_vec_sar16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2502 uint64_t s_mask
= dup_const(MO_16
, 0x8000 >> c
);
2503 uint64_t c_mask
= dup_const(MO_16
, 0xffff >> c
);
2504 TCGv_i64 s
= tcg_temp_new_i64();
2506 tcg_gen_shri_i64(d
, a
, c
);
2507 tcg_gen_andi_i64(s
, d
, s_mask
); /* isolate (shifted) sign bit */
2508 tcg_gen_andi_i64(d
, d
, c_mask
); /* clear out bits above sign */
2509 tcg_gen_muli_i64(s
, s
, (2 << c
) - 2); /* replicate isolated signs */
2510 tcg_gen_or_i64(d
, d
, s
); /* include sign extension */
2511 tcg_temp_free_i64(s
);
2514 void tcg_gen_gvec_sari(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2515 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2517 static const TCGOpcode vecop_list
[] = { INDEX_op_sari_vec
, 0 };
2518 static const GVecGen2i g
[4] = {
2519 { .fni8
= tcg_gen_vec_sar8i_i64
,
2520 .fniv
= tcg_gen_sari_vec
,
2521 .fno
= gen_helper_gvec_sar8i
,
2522 .opt_opc
= vecop_list
,
2524 { .fni8
= tcg_gen_vec_sar16i_i64
,
2525 .fniv
= tcg_gen_sari_vec
,
2526 .fno
= gen_helper_gvec_sar16i
,
2527 .opt_opc
= vecop_list
,
2529 { .fni4
= tcg_gen_sari_i32
,
2530 .fniv
= tcg_gen_sari_vec
,
2531 .fno
= gen_helper_gvec_sar32i
,
2532 .opt_opc
= vecop_list
,
2534 { .fni8
= tcg_gen_sari_i64
,
2535 .fniv
= tcg_gen_sari_vec
,
2536 .fno
= gen_helper_gvec_sar64i
,
2537 .opt_opc
= vecop_list
,
2538 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2542 tcg_debug_assert(vece
<= MO_64
);
2543 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2545 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2547 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2551 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
2552 static void expand_cmp_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
2553 uint32_t oprsz
, TCGCond cond
)
2555 TCGv_i32 t0
= tcg_temp_new_i32();
2556 TCGv_i32 t1
= tcg_temp_new_i32();
2559 for (i
= 0; i
< oprsz
; i
+= 4) {
2560 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
2561 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
2562 tcg_gen_setcond_i32(cond
, t0
, t0
, t1
);
2563 tcg_gen_neg_i32(t0
, t0
);
2564 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
2566 tcg_temp_free_i32(t1
);
2567 tcg_temp_free_i32(t0
);
2570 static void expand_cmp_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
2571 uint32_t oprsz
, TCGCond cond
)
2573 TCGv_i64 t0
= tcg_temp_new_i64();
2574 TCGv_i64 t1
= tcg_temp_new_i64();
2577 for (i
= 0; i
< oprsz
; i
+= 8) {
2578 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
2579 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
2580 tcg_gen_setcond_i64(cond
, t0
, t0
, t1
);
2581 tcg_gen_neg_i64(t0
, t0
);
2582 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
2584 tcg_temp_free_i64(t1
);
2585 tcg_temp_free_i64(t0
);
2588 static void expand_cmp_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2589 uint32_t bofs
, uint32_t oprsz
, uint32_t tysz
,
2590 TCGType type
, TCGCond cond
)
2592 TCGv_vec t0
= tcg_temp_new_vec(type
);
2593 TCGv_vec t1
= tcg_temp_new_vec(type
);
2596 for (i
= 0; i
< oprsz
; i
+= tysz
) {
2597 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
2598 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
2599 tcg_gen_cmp_vec(cond
, vece
, t0
, t0
, t1
);
2600 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
2602 tcg_temp_free_vec(t1
);
2603 tcg_temp_free_vec(t0
);
2606 void tcg_gen_gvec_cmp(TCGCond cond
, unsigned vece
, uint32_t dofs
,
2607 uint32_t aofs
, uint32_t bofs
,
2608 uint32_t oprsz
, uint32_t maxsz
)
2610 static const TCGOpcode cmp_list
[] = { INDEX_op_cmp_vec
, 0 };
2611 static gen_helper_gvec_3
* const eq_fn
[4] = {
2612 gen_helper_gvec_eq8
, gen_helper_gvec_eq16
,
2613 gen_helper_gvec_eq32
, gen_helper_gvec_eq64
2615 static gen_helper_gvec_3
* const ne_fn
[4] = {
2616 gen_helper_gvec_ne8
, gen_helper_gvec_ne16
,
2617 gen_helper_gvec_ne32
, gen_helper_gvec_ne64
2619 static gen_helper_gvec_3
* const lt_fn
[4] = {
2620 gen_helper_gvec_lt8
, gen_helper_gvec_lt16
,
2621 gen_helper_gvec_lt32
, gen_helper_gvec_lt64
2623 static gen_helper_gvec_3
* const le_fn
[4] = {
2624 gen_helper_gvec_le8
, gen_helper_gvec_le16
,
2625 gen_helper_gvec_le32
, gen_helper_gvec_le64
2627 static gen_helper_gvec_3
* const ltu_fn
[4] = {
2628 gen_helper_gvec_ltu8
, gen_helper_gvec_ltu16
,
2629 gen_helper_gvec_ltu32
, gen_helper_gvec_ltu64
2631 static gen_helper_gvec_3
* const leu_fn
[4] = {
2632 gen_helper_gvec_leu8
, gen_helper_gvec_leu16
,
2633 gen_helper_gvec_leu32
, gen_helper_gvec_leu64
2635 static gen_helper_gvec_3
* const * const fns
[16] = {
2636 [TCG_COND_EQ
] = eq_fn
,
2637 [TCG_COND_NE
] = ne_fn
,
2638 [TCG_COND_LT
] = lt_fn
,
2639 [TCG_COND_LE
] = le_fn
,
2640 [TCG_COND_LTU
] = ltu_fn
,
2641 [TCG_COND_LEU
] = leu_fn
,
2644 const TCGOpcode
*hold_list
;
2648 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
2649 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
2651 if (cond
== TCG_COND_NEVER
|| cond
== TCG_COND_ALWAYS
) {
2652 do_dup(MO_8
, dofs
, oprsz
, maxsz
,
2653 NULL
, NULL
, -(cond
== TCG_COND_ALWAYS
));
2658 * Implement inline with a vector type, if possible.
2659 * Prefer integer when 64-bit host and 64-bit comparison.
2661 hold_list
= tcg_swap_vecop_list(cmp_list
);
2662 type
= choose_vector_type(cmp_list
, vece
, oprsz
,
2663 TCG_TARGET_REG_BITS
== 64 && vece
== MO_64
);
2666 /* Recall that ARM SVE allows vector sizes that are not a
2667 * power of 2, but always a multiple of 16. The intent is
2668 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
2670 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
2671 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
, cond
);
2672 if (some
== oprsz
) {
2682 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
, cond
);
2685 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
, cond
);
2689 if (vece
== MO_64
&& check_size_impl(oprsz
, 8)) {
2690 expand_cmp_i64(dofs
, aofs
, bofs
, oprsz
, cond
);
2691 } else if (vece
== MO_32
&& check_size_impl(oprsz
, 4)) {
2692 expand_cmp_i32(dofs
, aofs
, bofs
, oprsz
, cond
);
2694 gen_helper_gvec_3
* const *fn
= fns
[cond
];
2698 tmp
= aofs
, aofs
= bofs
, bofs
= tmp
;
2699 cond
= tcg_swap_cond(cond
);
2703 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
, maxsz
, 0, fn
[vece
]);
2709 g_assert_not_reached();
2711 tcg_swap_vecop_list(hold_list
);
2713 if (oprsz
< maxsz
) {
2714 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);