]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tcg-op-vec.c
Merge tag 'pull-riscv-to-apply-20230120' of https://github.com/alistair23/qemu into...
[mirror_qemu.git] / tcg / tcg-op-vec.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 Linaro, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "tcg/tcg.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-mo.h"
24 #include "tcg-internal.h"
25
26
27 /* Reduce the number of ifdefs below. This assumes that all uses of
28 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
29 the compiler can eliminate. */
30 #if TCG_TARGET_REG_BITS == 64
31 extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
32 extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
33 #define TCGV_LOW TCGV_LOW_link_error
34 #define TCGV_HIGH TCGV_HIGH_link_error
35 #endif
36
37 /*
38 * Vector optional opcode tracking.
39 * Except for the basic logical operations (and, or, xor), and
40 * data movement (mov, ld, st, dupi), many vector opcodes are
41 * optional and may not be supported on the host. Thank Intel
42 * for the irregularity in their instruction set.
43 *
44 * The gvec expanders allow custom vector operations to be composed,
45 * generally via the .fniv callback in the GVecGen* structures. At
46 * the same time, in deciding whether to use this hook we need to
47 * know if the host supports the required operations. This is
48 * presented as an array of opcodes, terminated by 0. Each opcode
49 * is assumed to be expanded with the given VECE.
50 *
51 * For debugging, we want to validate this array. Therefore, when
52 * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders
53 * will validate that their opcode is present in the list.
54 */
55 #ifdef CONFIG_DEBUG_TCG
56 void tcg_assert_listed_vecop(TCGOpcode op)
57 {
58 const TCGOpcode *p = tcg_ctx->vecop_list;
59 if (p) {
60 for (; *p; ++p) {
61 if (*p == op) {
62 return;
63 }
64 }
65 g_assert_not_reached();
66 }
67 }
68 #endif
69
70 bool tcg_can_emit_vecop_list(const TCGOpcode *list,
71 TCGType type, unsigned vece)
72 {
73 if (list == NULL) {
74 return true;
75 }
76
77 for (; *list; ++list) {
78 TCGOpcode opc = *list;
79
80 #ifdef CONFIG_DEBUG_TCG
81 switch (opc) {
82 case INDEX_op_and_vec:
83 case INDEX_op_or_vec:
84 case INDEX_op_xor_vec:
85 case INDEX_op_mov_vec:
86 case INDEX_op_dup_vec:
87 case INDEX_op_dup2_vec:
88 case INDEX_op_ld_vec:
89 case INDEX_op_st_vec:
90 case INDEX_op_bitsel_vec:
91 /* These opcodes are mandatory and should not be listed. */
92 g_assert_not_reached();
93 case INDEX_op_not_vec:
94 /* These opcodes have generic expansions using the above. */
95 g_assert_not_reached();
96 default:
97 break;
98 }
99 #endif
100
101 if (tcg_can_emit_vec_op(opc, type, vece)) {
102 continue;
103 }
104
105 /*
106 * The opcode list is created by front ends based on what they
107 * actually invoke. We must mirror the logic in the routines
108 * below for generic expansions using other opcodes.
109 */
110 switch (opc) {
111 case INDEX_op_neg_vec:
112 if (tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece)) {
113 continue;
114 }
115 break;
116 case INDEX_op_abs_vec:
117 if (tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece)
118 && (tcg_can_emit_vec_op(INDEX_op_smax_vec, type, vece) > 0
119 || tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0
120 || tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece))) {
121 continue;
122 }
123 break;
124 case INDEX_op_usadd_vec:
125 if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece) ||
126 tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
127 continue;
128 }
129 break;
130 case INDEX_op_ussub_vec:
131 if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece) ||
132 tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
133 continue;
134 }
135 break;
136 case INDEX_op_cmpsel_vec:
137 case INDEX_op_smin_vec:
138 case INDEX_op_smax_vec:
139 case INDEX_op_umin_vec:
140 case INDEX_op_umax_vec:
141 if (tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
142 continue;
143 }
144 break;
145 default:
146 break;
147 }
148 return false;
149 }
150 return true;
151 }
152
153 void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a)
154 {
155 TCGOp *op = tcg_emit_op(opc, 2);
156 TCGOP_VECL(op) = type - TCG_TYPE_V64;
157 TCGOP_VECE(op) = vece;
158 op->args[0] = r;
159 op->args[1] = a;
160 }
161
162 void vec_gen_3(TCGOpcode opc, TCGType type, unsigned vece,
163 TCGArg r, TCGArg a, TCGArg b)
164 {
165 TCGOp *op = tcg_emit_op(opc, 3);
166 TCGOP_VECL(op) = type - TCG_TYPE_V64;
167 TCGOP_VECE(op) = vece;
168 op->args[0] = r;
169 op->args[1] = a;
170 op->args[2] = b;
171 }
172
173 void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece,
174 TCGArg r, TCGArg a, TCGArg b, TCGArg c)
175 {
176 TCGOp *op = tcg_emit_op(opc, 4);
177 TCGOP_VECL(op) = type - TCG_TYPE_V64;
178 TCGOP_VECE(op) = vece;
179 op->args[0] = r;
180 op->args[1] = a;
181 op->args[2] = b;
182 op->args[3] = c;
183 }
184
185 static void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
186 TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e)
187 {
188 TCGOp *op = tcg_emit_op(opc, 6);
189 TCGOP_VECL(op) = type - TCG_TYPE_V64;
190 TCGOP_VECE(op) = vece;
191 op->args[0] = r;
192 op->args[1] = a;
193 op->args[2] = b;
194 op->args[3] = c;
195 op->args[4] = d;
196 op->args[5] = e;
197 }
198
199 static void vec_gen_op2(TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a)
200 {
201 TCGTemp *rt = tcgv_vec_temp(r);
202 TCGTemp *at = tcgv_vec_temp(a);
203 TCGType type = rt->base_type;
204
205 /* Must enough inputs for the output. */
206 tcg_debug_assert(at->base_type >= type);
207 vec_gen_2(opc, type, vece, temp_arg(rt), temp_arg(at));
208 }
209
210 static void vec_gen_op3(TCGOpcode opc, unsigned vece,
211 TCGv_vec r, TCGv_vec a, TCGv_vec b)
212 {
213 TCGTemp *rt = tcgv_vec_temp(r);
214 TCGTemp *at = tcgv_vec_temp(a);
215 TCGTemp *bt = tcgv_vec_temp(b);
216 TCGType type = rt->base_type;
217
218 /* Must enough inputs for the output. */
219 tcg_debug_assert(at->base_type >= type);
220 tcg_debug_assert(bt->base_type >= type);
221 vec_gen_3(opc, type, vece, temp_arg(rt), temp_arg(at), temp_arg(bt));
222 }
223
224 void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a)
225 {
226 if (r != a) {
227 vec_gen_op2(INDEX_op_mov_vec, 0, r, a);
228 }
229 }
230
231 TCGv_vec tcg_const_zeros_vec(TCGType type)
232 {
233 TCGv_vec ret = tcg_temp_new_vec(type);
234 tcg_gen_dupi_vec(MO_64, ret, 0);
235 return ret;
236 }
237
238 TCGv_vec tcg_const_ones_vec(TCGType type)
239 {
240 TCGv_vec ret = tcg_temp_new_vec(type);
241 tcg_gen_dupi_vec(MO_64, ret, -1);
242 return ret;
243 }
244
245 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m)
246 {
247 TCGTemp *t = tcgv_vec_temp(m);
248 return tcg_const_zeros_vec(t->base_type);
249 }
250
251 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
252 {
253 TCGTemp *t = tcgv_vec_temp(m);
254 return tcg_const_ones_vec(t->base_type);
255 }
256
257 void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
258 {
259 TCGTemp *rt = tcgv_vec_temp(r);
260 tcg_gen_mov_vec(r, tcg_constant_vec(rt->base_type, vece, a));
261 }
262
263 void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a)
264 {
265 TCGArg ri = tcgv_vec_arg(r);
266 TCGTemp *rt = arg_temp(ri);
267 TCGType type = rt->base_type;
268
269 if (TCG_TARGET_REG_BITS == 64) {
270 TCGArg ai = tcgv_i64_arg(a);
271 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
272 } else if (vece == MO_64) {
273 TCGArg al = tcgv_i32_arg(TCGV_LOW(a));
274 TCGArg ah = tcgv_i32_arg(TCGV_HIGH(a));
275 vec_gen_3(INDEX_op_dup2_vec, type, MO_64, ri, al, ah);
276 } else {
277 TCGArg ai = tcgv_i32_arg(TCGV_LOW(a));
278 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
279 }
280 }
281
282 void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec r, TCGv_i32 a)
283 {
284 TCGArg ri = tcgv_vec_arg(r);
285 TCGArg ai = tcgv_i32_arg(a);
286 TCGTemp *rt = arg_temp(ri);
287 TCGType type = rt->base_type;
288
289 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
290 }
291
292 void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec r, TCGv_ptr b,
293 tcg_target_long ofs)
294 {
295 TCGArg ri = tcgv_vec_arg(r);
296 TCGArg bi = tcgv_ptr_arg(b);
297 TCGTemp *rt = arg_temp(ri);
298 TCGType type = rt->base_type;
299
300 vec_gen_3(INDEX_op_dupm_vec, type, vece, ri, bi, ofs);
301 }
302
303 static void vec_gen_ldst(TCGOpcode opc, TCGv_vec r, TCGv_ptr b, TCGArg o)
304 {
305 TCGArg ri = tcgv_vec_arg(r);
306 TCGArg bi = tcgv_ptr_arg(b);
307 TCGTemp *rt = arg_temp(ri);
308 TCGType type = rt->base_type;
309
310 vec_gen_3(opc, type, 0, ri, bi, o);
311 }
312
313 void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr b, TCGArg o)
314 {
315 vec_gen_ldst(INDEX_op_ld_vec, r, b, o);
316 }
317
318 void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr b, TCGArg o)
319 {
320 vec_gen_ldst(INDEX_op_st_vec, r, b, o);
321 }
322
323 void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr b, TCGArg o, TCGType low_type)
324 {
325 TCGArg ri = tcgv_vec_arg(r);
326 TCGArg bi = tcgv_ptr_arg(b);
327 TCGTemp *rt = arg_temp(ri);
328 TCGType type = rt->base_type;
329
330 tcg_debug_assert(low_type >= TCG_TYPE_V64);
331 tcg_debug_assert(low_type <= type);
332 vec_gen_3(INDEX_op_st_vec, low_type, 0, ri, bi, o);
333 }
334
335 void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
336 {
337 vec_gen_op3(INDEX_op_and_vec, 0, r, a, b);
338 }
339
340 void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
341 {
342 vec_gen_op3(INDEX_op_or_vec, 0, r, a, b);
343 }
344
345 void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
346 {
347 vec_gen_op3(INDEX_op_xor_vec, 0, r, a, b);
348 }
349
350 void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
351 {
352 if (TCG_TARGET_HAS_andc_vec) {
353 vec_gen_op3(INDEX_op_andc_vec, 0, r, a, b);
354 } else {
355 TCGv_vec t = tcg_temp_new_vec_matching(r);
356 tcg_gen_not_vec(0, t, b);
357 tcg_gen_and_vec(0, r, a, t);
358 tcg_temp_free_vec(t);
359 }
360 }
361
362 void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
363 {
364 if (TCG_TARGET_HAS_orc_vec) {
365 vec_gen_op3(INDEX_op_orc_vec, 0, r, a, b);
366 } else {
367 TCGv_vec t = tcg_temp_new_vec_matching(r);
368 tcg_gen_not_vec(0, t, b);
369 tcg_gen_or_vec(0, r, a, t);
370 tcg_temp_free_vec(t);
371 }
372 }
373
374 void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
375 {
376 if (TCG_TARGET_HAS_nand_vec) {
377 vec_gen_op3(INDEX_op_nand_vec, 0, r, a, b);
378 } else {
379 tcg_gen_and_vec(0, r, a, b);
380 tcg_gen_not_vec(0, r, r);
381 }
382 }
383
384 void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
385 {
386 if (TCG_TARGET_HAS_nor_vec) {
387 vec_gen_op3(INDEX_op_nor_vec, 0, r, a, b);
388 } else {
389 tcg_gen_or_vec(0, r, a, b);
390 tcg_gen_not_vec(0, r, r);
391 }
392 }
393
394 void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
395 {
396 if (TCG_TARGET_HAS_eqv_vec) {
397 vec_gen_op3(INDEX_op_eqv_vec, 0, r, a, b);
398 } else {
399 tcg_gen_xor_vec(0, r, a, b);
400 tcg_gen_not_vec(0, r, r);
401 }
402 }
403
404 static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
405 {
406 TCGTemp *rt = tcgv_vec_temp(r);
407 TCGTemp *at = tcgv_vec_temp(a);
408 TCGArg ri = temp_arg(rt);
409 TCGArg ai = temp_arg(at);
410 TCGType type = rt->base_type;
411 int can;
412
413 tcg_debug_assert(at->base_type >= type);
414 tcg_assert_listed_vecop(opc);
415 can = tcg_can_emit_vec_op(opc, type, vece);
416 if (can > 0) {
417 vec_gen_2(opc, type, vece, ri, ai);
418 } else if (can < 0) {
419 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
420 tcg_expand_vec_op(opc, type, vece, ri, ai);
421 tcg_swap_vecop_list(hold_list);
422 } else {
423 return false;
424 }
425 return true;
426 }
427
428 void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
429 {
430 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
431
432 if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
433 TCGv_vec t = tcg_const_ones_vec_matching(r);
434 tcg_gen_xor_vec(0, r, a, t);
435 tcg_temp_free_vec(t);
436 }
437 tcg_swap_vecop_list(hold_list);
438 }
439
440 void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
441 {
442 const TCGOpcode *hold_list;
443
444 tcg_assert_listed_vecop(INDEX_op_neg_vec);
445 hold_list = tcg_swap_vecop_list(NULL);
446
447 if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) {
448 TCGv_vec t = tcg_const_zeros_vec_matching(r);
449 tcg_gen_sub_vec(vece, r, t, a);
450 tcg_temp_free_vec(t);
451 }
452 tcg_swap_vecop_list(hold_list);
453 }
454
455 void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
456 {
457 const TCGOpcode *hold_list;
458
459 tcg_assert_listed_vecop(INDEX_op_abs_vec);
460 hold_list = tcg_swap_vecop_list(NULL);
461
462 if (!do_op2(vece, r, a, INDEX_op_abs_vec)) {
463 TCGType type = tcgv_vec_temp(r)->base_type;
464 TCGv_vec t = tcg_temp_new_vec(type);
465
466 tcg_debug_assert(tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece));
467 if (tcg_can_emit_vec_op(INDEX_op_smax_vec, type, vece) > 0) {
468 tcg_gen_neg_vec(vece, t, a);
469 tcg_gen_smax_vec(vece, r, a, t);
470 } else {
471 if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
472 tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1);
473 } else {
474 tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a,
475 tcg_constant_vec(type, vece, 0));
476 }
477 tcg_gen_xor_vec(vece, r, a, t);
478 tcg_gen_sub_vec(vece, r, r, t);
479 }
480
481 tcg_temp_free_vec(t);
482 }
483 tcg_swap_vecop_list(hold_list);
484 }
485
486 static void do_shifti(TCGOpcode opc, unsigned vece,
487 TCGv_vec r, TCGv_vec a, int64_t i)
488 {
489 TCGTemp *rt = tcgv_vec_temp(r);
490 TCGTemp *at = tcgv_vec_temp(a);
491 TCGArg ri = temp_arg(rt);
492 TCGArg ai = temp_arg(at);
493 TCGType type = rt->base_type;
494 int can;
495
496 tcg_debug_assert(at->base_type == type);
497 tcg_debug_assert(i >= 0 && i < (8 << vece));
498 tcg_assert_listed_vecop(opc);
499
500 if (i == 0) {
501 tcg_gen_mov_vec(r, a);
502 return;
503 }
504
505 can = tcg_can_emit_vec_op(opc, type, vece);
506 if (can > 0) {
507 vec_gen_3(opc, type, vece, ri, ai, i);
508 } else {
509 /* We leave the choice of expansion via scalar or vector shift
510 to the target. Often, but not always, dupi can feed a vector
511 shift easier than a scalar. */
512 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
513 tcg_debug_assert(can < 0);
514 tcg_expand_vec_op(opc, type, vece, ri, ai, i);
515 tcg_swap_vecop_list(hold_list);
516 }
517 }
518
519 void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
520 {
521 do_shifti(INDEX_op_shli_vec, vece, r, a, i);
522 }
523
524 void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
525 {
526 do_shifti(INDEX_op_shri_vec, vece, r, a, i);
527 }
528
529 void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
530 {
531 do_shifti(INDEX_op_sari_vec, vece, r, a, i);
532 }
533
534 void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
535 {
536 do_shifti(INDEX_op_rotli_vec, vece, r, a, i);
537 }
538
539 void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
540 {
541 int bits = 8 << vece;
542 tcg_debug_assert(i >= 0 && i < bits);
543 do_shifti(INDEX_op_rotli_vec, vece, r, a, -i & (bits - 1));
544 }
545
546 void tcg_gen_cmp_vec(TCGCond cond, unsigned vece,
547 TCGv_vec r, TCGv_vec a, TCGv_vec b)
548 {
549 TCGTemp *rt = tcgv_vec_temp(r);
550 TCGTemp *at = tcgv_vec_temp(a);
551 TCGTemp *bt = tcgv_vec_temp(b);
552 TCGArg ri = temp_arg(rt);
553 TCGArg ai = temp_arg(at);
554 TCGArg bi = temp_arg(bt);
555 TCGType type = rt->base_type;
556 int can;
557
558 tcg_debug_assert(at->base_type >= type);
559 tcg_debug_assert(bt->base_type >= type);
560 tcg_assert_listed_vecop(INDEX_op_cmp_vec);
561 can = tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece);
562 if (can > 0) {
563 vec_gen_4(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
564 } else {
565 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
566 tcg_debug_assert(can < 0);
567 tcg_expand_vec_op(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
568 tcg_swap_vecop_list(hold_list);
569 }
570 }
571
572 static bool do_op3(unsigned vece, TCGv_vec r, TCGv_vec a,
573 TCGv_vec b, TCGOpcode opc)
574 {
575 TCGTemp *rt = tcgv_vec_temp(r);
576 TCGTemp *at = tcgv_vec_temp(a);
577 TCGTemp *bt = tcgv_vec_temp(b);
578 TCGArg ri = temp_arg(rt);
579 TCGArg ai = temp_arg(at);
580 TCGArg bi = temp_arg(bt);
581 TCGType type = rt->base_type;
582 int can;
583
584 tcg_debug_assert(at->base_type >= type);
585 tcg_debug_assert(bt->base_type >= type);
586 tcg_assert_listed_vecop(opc);
587 can = tcg_can_emit_vec_op(opc, type, vece);
588 if (can > 0) {
589 vec_gen_3(opc, type, vece, ri, ai, bi);
590 } else if (can < 0) {
591 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
592 tcg_expand_vec_op(opc, type, vece, ri, ai, bi);
593 tcg_swap_vecop_list(hold_list);
594 } else {
595 return false;
596 }
597 return true;
598 }
599
600 static void do_op3_nofail(unsigned vece, TCGv_vec r, TCGv_vec a,
601 TCGv_vec b, TCGOpcode opc)
602 {
603 bool ok = do_op3(vece, r, a, b, opc);
604 tcg_debug_assert(ok);
605 }
606
607 void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
608 {
609 do_op3_nofail(vece, r, a, b, INDEX_op_add_vec);
610 }
611
612 void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
613 {
614 do_op3_nofail(vece, r, a, b, INDEX_op_sub_vec);
615 }
616
617 void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
618 {
619 do_op3_nofail(vece, r, a, b, INDEX_op_mul_vec);
620 }
621
622 void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
623 {
624 do_op3_nofail(vece, r, a, b, INDEX_op_ssadd_vec);
625 }
626
627 void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
628 {
629 if (!do_op3(vece, r, a, b, INDEX_op_usadd_vec)) {
630 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
631 TCGv_vec t = tcg_temp_new_vec_matching(r);
632
633 /* usadd(a, b) = min(a, ~b) + b */
634 tcg_gen_not_vec(vece, t, b);
635 tcg_gen_umin_vec(vece, t, t, a);
636 tcg_gen_add_vec(vece, r, t, b);
637
638 tcg_temp_free_vec(t);
639 tcg_swap_vecop_list(hold_list);
640 }
641 }
642
643 void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
644 {
645 do_op3_nofail(vece, r, a, b, INDEX_op_sssub_vec);
646 }
647
648 void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
649 {
650 if (!do_op3(vece, r, a, b, INDEX_op_ussub_vec)) {
651 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
652 TCGv_vec t = tcg_temp_new_vec_matching(r);
653
654 /* ussub(a, b) = max(a, b) - b */
655 tcg_gen_umax_vec(vece, t, a, b);
656 tcg_gen_sub_vec(vece, r, t, b);
657
658 tcg_temp_free_vec(t);
659 tcg_swap_vecop_list(hold_list);
660 }
661 }
662
663 static void do_minmax(unsigned vece, TCGv_vec r, TCGv_vec a,
664 TCGv_vec b, TCGOpcode opc, TCGCond cond)
665 {
666 if (!do_op3(vece, r, a, b, opc)) {
667 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
668 tcg_gen_cmpsel_vec(cond, vece, r, a, b, a, b);
669 tcg_swap_vecop_list(hold_list);
670 }
671 }
672
673 void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
674 {
675 do_minmax(vece, r, a, b, INDEX_op_smin_vec, TCG_COND_LT);
676 }
677
678 void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
679 {
680 do_minmax(vece, r, a, b, INDEX_op_umin_vec, TCG_COND_LTU);
681 }
682
683 void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
684 {
685 do_minmax(vece, r, a, b, INDEX_op_smax_vec, TCG_COND_GT);
686 }
687
688 void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
689 {
690 do_minmax(vece, r, a, b, INDEX_op_umax_vec, TCG_COND_GTU);
691 }
692
693 void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
694 {
695 do_op3_nofail(vece, r, a, b, INDEX_op_shlv_vec);
696 }
697
698 void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
699 {
700 do_op3_nofail(vece, r, a, b, INDEX_op_shrv_vec);
701 }
702
703 void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
704 {
705 do_op3_nofail(vece, r, a, b, INDEX_op_sarv_vec);
706 }
707
708 void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
709 {
710 do_op3_nofail(vece, r, a, b, INDEX_op_rotlv_vec);
711 }
712
713 void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
714 {
715 do_op3_nofail(vece, r, a, b, INDEX_op_rotrv_vec);
716 }
717
718 static void do_shifts(unsigned vece, TCGv_vec r, TCGv_vec a,
719 TCGv_i32 s, TCGOpcode opc)
720 {
721 TCGTemp *rt = tcgv_vec_temp(r);
722 TCGTemp *at = tcgv_vec_temp(a);
723 TCGTemp *st = tcgv_i32_temp(s);
724 TCGArg ri = temp_arg(rt);
725 TCGArg ai = temp_arg(at);
726 TCGArg si = temp_arg(st);
727 TCGType type = rt->base_type;
728 int can;
729
730 tcg_debug_assert(at->base_type >= type);
731 tcg_assert_listed_vecop(opc);
732 can = tcg_can_emit_vec_op(opc, type, vece);
733 if (can > 0) {
734 vec_gen_3(opc, type, vece, ri, ai, si);
735 } else if (can < 0) {
736 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
737 tcg_expand_vec_op(opc, type, vece, ri, ai, si);
738 tcg_swap_vecop_list(hold_list);
739 } else {
740 g_assert_not_reached();
741 }
742 }
743
744 void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
745 {
746 do_shifts(vece, r, a, b, INDEX_op_shls_vec);
747 }
748
749 void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
750 {
751 do_shifts(vece, r, a, b, INDEX_op_shrs_vec);
752 }
753
754 void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
755 {
756 do_shifts(vece, r, a, b, INDEX_op_sars_vec);
757 }
758
759 void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s)
760 {
761 do_shifts(vece, r, a, s, INDEX_op_rotls_vec);
762 }
763
764 void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
765 TCGv_vec b, TCGv_vec c)
766 {
767 TCGTemp *rt = tcgv_vec_temp(r);
768 TCGTemp *at = tcgv_vec_temp(a);
769 TCGTemp *bt = tcgv_vec_temp(b);
770 TCGTemp *ct = tcgv_vec_temp(c);
771 TCGType type = rt->base_type;
772
773 tcg_debug_assert(at->base_type >= type);
774 tcg_debug_assert(bt->base_type >= type);
775 tcg_debug_assert(ct->base_type >= type);
776
777 if (TCG_TARGET_HAS_bitsel_vec) {
778 vec_gen_4(INDEX_op_bitsel_vec, type, MO_8,
779 temp_arg(rt), temp_arg(at), temp_arg(bt), temp_arg(ct));
780 } else {
781 TCGv_vec t = tcg_temp_new_vec(type);
782 tcg_gen_and_vec(MO_8, t, a, b);
783 tcg_gen_andc_vec(MO_8, r, c, a);
784 tcg_gen_or_vec(MO_8, r, r, t);
785 tcg_temp_free_vec(t);
786 }
787 }
788
789 void tcg_gen_cmpsel_vec(TCGCond cond, unsigned vece, TCGv_vec r,
790 TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d)
791 {
792 TCGTemp *rt = tcgv_vec_temp(r);
793 TCGTemp *at = tcgv_vec_temp(a);
794 TCGTemp *bt = tcgv_vec_temp(b);
795 TCGTemp *ct = tcgv_vec_temp(c);
796 TCGTemp *dt = tcgv_vec_temp(d);
797 TCGArg ri = temp_arg(rt);
798 TCGArg ai = temp_arg(at);
799 TCGArg bi = temp_arg(bt);
800 TCGArg ci = temp_arg(ct);
801 TCGArg di = temp_arg(dt);
802 TCGType type = rt->base_type;
803 const TCGOpcode *hold_list;
804 int can;
805
806 tcg_debug_assert(at->base_type >= type);
807 tcg_debug_assert(bt->base_type >= type);
808 tcg_debug_assert(ct->base_type >= type);
809 tcg_debug_assert(dt->base_type >= type);
810
811 tcg_assert_listed_vecop(INDEX_op_cmpsel_vec);
812 hold_list = tcg_swap_vecop_list(NULL);
813 can = tcg_can_emit_vec_op(INDEX_op_cmpsel_vec, type, vece);
814
815 if (can > 0) {
816 vec_gen_6(INDEX_op_cmpsel_vec, type, vece, ri, ai, bi, ci, di, cond);
817 } else if (can < 0) {
818 tcg_expand_vec_op(INDEX_op_cmpsel_vec, type, vece,
819 ri, ai, bi, ci, di, cond);
820 } else {
821 TCGv_vec t = tcg_temp_new_vec(type);
822 tcg_gen_cmp_vec(cond, vece, t, a, b);
823 tcg_gen_bitsel_vec(vece, r, t, c, d);
824 tcg_temp_free_vec(t);
825 }
826 tcg_swap_vecop_list(hold_list);
827 }