]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg-op-vec.c
tcg: Promote tcg_out_{dup,dupi}_vec to backend interface
[mirror_qemu.git] / tcg / tcg-op-vec.c
CommitLineData
d2fd745f
RH
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 Linaro, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
d2fd745f
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "qemu-common.h"
22#include "cpu.h"
d2fd745f
RH
23#include "tcg.h"
24#include "tcg-op.h"
25#include "tcg-mo.h"
26
27/* Reduce the number of ifdefs below. This assumes that all uses of
28 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
29 the compiler can eliminate. */
30#if TCG_TARGET_REG_BITS == 64
31extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
32extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
33#define TCGV_LOW TCGV_LOW_link_error
34#define TCGV_HIGH TCGV_HIGH_link_error
35#endif
36
53229a77
RH
37/*
38 * Vector optional opcode tracking.
39 * Except for the basic logical operations (and, or, xor), and
40 * data movement (mov, ld, st, dupi), many vector opcodes are
41 * optional and may not be supported on the host. Thank Intel
42 * for the irregularity in their instruction set.
43 *
44 * The gvec expanders allow custom vector operations to be composed,
45 * generally via the .fniv callback in the GVecGen* structures. At
46 * the same time, in deciding whether to use this hook we need to
47 * know if the host supports the required operations. This is
48 * presented as an array of opcodes, terminated by 0. Each opcode
49 * is assumed to be expanded with the given VECE.
50 *
51 * For debugging, we want to validate this array. Therefore, when
52 * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders
53 * will validate that their opcode is present in the list.
54 */
55#ifdef CONFIG_DEBUG_TCG
56void tcg_assert_listed_vecop(TCGOpcode op)
57{
58 const TCGOpcode *p = tcg_ctx->vecop_list;
59 if (p) {
60 for (; *p; ++p) {
61 if (*p == op) {
62 return;
63 }
64 }
65 g_assert_not_reached();
66 }
67}
68#endif
69
70bool tcg_can_emit_vecop_list(const TCGOpcode *list,
71 TCGType type, unsigned vece)
72{
73 if (list == NULL) {
74 return true;
75 }
76
77 for (; *list; ++list) {
78 TCGOpcode opc = *list;
79
80#ifdef CONFIG_DEBUG_TCG
81 switch (opc) {
82 case INDEX_op_and_vec:
83 case INDEX_op_or_vec:
84 case INDEX_op_xor_vec:
85 case INDEX_op_mov_vec:
86 case INDEX_op_dup_vec:
87 case INDEX_op_dupi_vec:
88 case INDEX_op_dup2_vec:
89 case INDEX_op_ld_vec:
90 case INDEX_op_st_vec:
91 /* These opcodes are mandatory and should not be listed. */
92 g_assert_not_reached();
93 default:
94 break;
95 }
96#endif
97
98 if (tcg_can_emit_vec_op(opc, type, vece)) {
99 continue;
100 }
101
102 /*
103 * The opcode list is created by front ends based on what they
104 * actually invoke. We must mirror the logic in the routines
105 * below for generic expansions using other opcodes.
106 */
107 switch (opc) {
108 case INDEX_op_neg_vec:
109 if (tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece)) {
110 continue;
111 }
112 break;
113 default:
114 break;
115 }
116 return false;
117 }
118 return true;
119}
120
d2fd745f
RH
121void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a)
122{
123 TCGOp *op = tcg_emit_op(opc);
124 TCGOP_VECL(op) = type - TCG_TYPE_V64;
125 TCGOP_VECE(op) = vece;
126 op->args[0] = r;
127 op->args[1] = a;
128}
129
130void vec_gen_3(TCGOpcode opc, TCGType type, unsigned vece,
131 TCGArg r, TCGArg a, TCGArg b)
132{
133 TCGOp *op = tcg_emit_op(opc);
134 TCGOP_VECL(op) = type - TCG_TYPE_V64;
135 TCGOP_VECE(op) = vece;
136 op->args[0] = r;
137 op->args[1] = a;
138 op->args[2] = b;
139}
140
141void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece,
142 TCGArg r, TCGArg a, TCGArg b, TCGArg c)
143{
144 TCGOp *op = tcg_emit_op(opc);
145 TCGOP_VECL(op) = type - TCG_TYPE_V64;
146 TCGOP_VECE(op) = vece;
147 op->args[0] = r;
148 op->args[1] = a;
149 op->args[2] = b;
150 op->args[3] = c;
151}
152
153static void vec_gen_op2(TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a)
154{
155 TCGTemp *rt = tcgv_vec_temp(r);
156 TCGTemp *at = tcgv_vec_temp(a);
157 TCGType type = rt->base_type;
158
db432672
RH
159 /* Must enough inputs for the output. */
160 tcg_debug_assert(at->base_type >= type);
d2fd745f
RH
161 vec_gen_2(opc, type, vece, temp_arg(rt), temp_arg(at));
162}
163
164static void vec_gen_op3(TCGOpcode opc, unsigned vece,
165 TCGv_vec r, TCGv_vec a, TCGv_vec b)
166{
167 TCGTemp *rt = tcgv_vec_temp(r);
168 TCGTemp *at = tcgv_vec_temp(a);
169 TCGTemp *bt = tcgv_vec_temp(b);
170 TCGType type = rt->base_type;
171
db432672
RH
172 /* Must enough inputs for the output. */
173 tcg_debug_assert(at->base_type >= type);
174 tcg_debug_assert(bt->base_type >= type);
d2fd745f
RH
175 vec_gen_3(opc, type, vece, temp_arg(rt), temp_arg(at), temp_arg(bt));
176}
177
178void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a)
179{
180 if (r != a) {
181 vec_gen_op2(INDEX_op_mov_vec, 0, r, a);
182 }
183}
184
185#define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32)
186
db432672 187static void do_dupi_vec(TCGv_vec r, unsigned vece, TCGArg a)
d2fd745f
RH
188{
189 TCGTemp *rt = tcgv_vec_temp(r);
190 vec_gen_2(INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a);
191}
192
193TCGv_vec tcg_const_zeros_vec(TCGType type)
194{
195 TCGv_vec ret = tcg_temp_new_vec(type);
db432672 196 do_dupi_vec(ret, MO_REG, 0);
d2fd745f
RH
197 return ret;
198}
199
200TCGv_vec tcg_const_ones_vec(TCGType type)
201{
202 TCGv_vec ret = tcg_temp_new_vec(type);
db432672 203 do_dupi_vec(ret, MO_REG, -1);
d2fd745f
RH
204 return ret;
205}
206
207TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m)
208{
209 TCGTemp *t = tcgv_vec_temp(m);
210 return tcg_const_zeros_vec(t->base_type);
211}
212
213TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
214{
215 TCGTemp *t = tcgv_vec_temp(m);
216 return tcg_const_ones_vec(t->base_type);
217}
218
219void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a)
220{
221 if (TCG_TARGET_REG_BITS == 32 && a == deposit64(a, 32, 32, a)) {
db432672 222 do_dupi_vec(r, MO_32, a);
d2fd745f 223 } else if (TCG_TARGET_REG_BITS == 64 || a == (uint64_t)(int32_t)a) {
db432672 224 do_dupi_vec(r, MO_64, a);
d2fd745f
RH
225 } else {
226 TCGv_i64 c = tcg_const_i64(a);
227 tcg_gen_dup_i64_vec(MO_64, r, c);
228 tcg_temp_free_i64(c);
229 }
230}
231
232void tcg_gen_dup32i_vec(TCGv_vec r, uint32_t a)
233{
db432672 234 do_dupi_vec(r, MO_REG, dup_const(MO_32, a));
d2fd745f
RH
235}
236
237void tcg_gen_dup16i_vec(TCGv_vec r, uint32_t a)
238{
db432672 239 do_dupi_vec(r, MO_REG, dup_const(MO_16, a));
d2fd745f
RH
240}
241
242void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a)
243{
db432672
RH
244 do_dupi_vec(r, MO_REG, dup_const(MO_8, a));
245}
246
247void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
248{
249 do_dupi_vec(r, MO_REG, dup_const(vece, a));
d2fd745f
RH
250}
251
252void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a)
253{
254 TCGArg ri = tcgv_vec_arg(r);
255 TCGTemp *rt = arg_temp(ri);
256 TCGType type = rt->base_type;
257
258 if (TCG_TARGET_REG_BITS == 64) {
259 TCGArg ai = tcgv_i64_arg(a);
db432672 260 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
d2fd745f
RH
261 } else if (vece == MO_64) {
262 TCGArg al = tcgv_i32_arg(TCGV_LOW(a));
263 TCGArg ah = tcgv_i32_arg(TCGV_HIGH(a));
264 vec_gen_3(INDEX_op_dup2_vec, type, MO_64, ri, al, ah);
265 } else {
266 TCGArg ai = tcgv_i32_arg(TCGV_LOW(a));
db432672 267 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
d2fd745f
RH
268 }
269}
270
271void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec r, TCGv_i32 a)
272{
273 TCGArg ri = tcgv_vec_arg(r);
274 TCGArg ai = tcgv_i32_arg(a);
275 TCGTemp *rt = arg_temp(ri);
276 TCGType type = rt->base_type;
277
278 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
279}
280
281static void vec_gen_ldst(TCGOpcode opc, TCGv_vec r, TCGv_ptr b, TCGArg o)
282{
283 TCGArg ri = tcgv_vec_arg(r);
284 TCGArg bi = tcgv_ptr_arg(b);
285 TCGTemp *rt = arg_temp(ri);
286 TCGType type = rt->base_type;
287
288 vec_gen_3(opc, type, 0, ri, bi, o);
289}
290
291void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr b, TCGArg o)
292{
293 vec_gen_ldst(INDEX_op_ld_vec, r, b, o);
294}
295
296void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr b, TCGArg o)
297{
298 vec_gen_ldst(INDEX_op_st_vec, r, b, o);
299}
300
301void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr b, TCGArg o, TCGType low_type)
302{
303 TCGArg ri = tcgv_vec_arg(r);
304 TCGArg bi = tcgv_ptr_arg(b);
305 TCGTemp *rt = arg_temp(ri);
306 TCGType type = rt->base_type;
307
308 tcg_debug_assert(low_type >= TCG_TYPE_V64);
309 tcg_debug_assert(low_type <= type);
310 vec_gen_3(INDEX_op_st_vec, low_type, 0, ri, bi, o);
311}
312
d2fd745f
RH
313void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
314{
315 vec_gen_op3(INDEX_op_and_vec, 0, r, a, b);
316}
317
318void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
319{
320 vec_gen_op3(INDEX_op_or_vec, 0, r, a, b);
321}
322
323void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
324{
325 vec_gen_op3(INDEX_op_xor_vec, 0, r, a, b);
326}
327
328void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
329{
330 if (TCG_TARGET_HAS_andc_vec) {
331 vec_gen_op3(INDEX_op_andc_vec, 0, r, a, b);
332 } else {
333 TCGv_vec t = tcg_temp_new_vec_matching(r);
334 tcg_gen_not_vec(0, t, b);
335 tcg_gen_and_vec(0, r, a, t);
336 tcg_temp_free_vec(t);
337 }
338}
339
340void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
341{
342 if (TCG_TARGET_HAS_orc_vec) {
343 vec_gen_op3(INDEX_op_orc_vec, 0, r, a, b);
344 } else {
345 TCGv_vec t = tcg_temp_new_vec_matching(r);
346 tcg_gen_not_vec(0, t, b);
347 tcg_gen_or_vec(0, r, a, t);
348 tcg_temp_free_vec(t);
349 }
350}
351
f550805d
RH
352void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
353{
354 /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */
355 tcg_gen_and_vec(0, r, a, b);
356 tcg_gen_not_vec(0, r, r);
357}
358
359void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
360{
361 /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */
362 tcg_gen_or_vec(0, r, a, b);
363 tcg_gen_not_vec(0, r, r);
364}
365
366void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
367{
368 /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */
369 tcg_gen_xor_vec(0, r, a, b);
370 tcg_gen_not_vec(0, r, r);
371}
372
ce27c5d1 373static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
d2fd745f 374{
ce27c5d1
RH
375 TCGTemp *rt = tcgv_vec_temp(r);
376 TCGTemp *at = tcgv_vec_temp(a);
377 TCGArg ri = temp_arg(rt);
378 TCGArg ai = temp_arg(at);
379 TCGType type = rt->base_type;
380 int can;
381
382 tcg_debug_assert(at->base_type >= type);
53229a77 383 tcg_assert_listed_vecop(opc);
ce27c5d1
RH
384 can = tcg_can_emit_vec_op(opc, type, vece);
385 if (can > 0) {
386 vec_gen_2(opc, type, vece, ri, ai);
387 } else if (can < 0) {
53229a77 388 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
ce27c5d1 389 tcg_expand_vec_op(opc, type, vece, ri, ai);
53229a77 390 tcg_swap_vecop_list(hold_list);
d2fd745f 391 } else {
ce27c5d1
RH
392 return false;
393 }
394 return true;
395}
396
397void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
398{
399 if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
d2fd745f
RH
400 TCGv_vec t = tcg_const_ones_vec_matching(r);
401 tcg_gen_xor_vec(0, r, a, t);
402 tcg_temp_free_vec(t);
403 }
404}
405
406void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
407{
53229a77
RH
408 const TCGOpcode *hold_list;
409
410 tcg_assert_listed_vecop(INDEX_op_neg_vec);
411 hold_list = tcg_swap_vecop_list(NULL);
412
ce27c5d1 413 if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) {
d2fd745f
RH
414 TCGv_vec t = tcg_const_zeros_vec_matching(r);
415 tcg_gen_sub_vec(vece, r, t, a);
416 tcg_temp_free_vec(t);
417 }
53229a77 418 tcg_swap_vecop_list(hold_list);
d2fd745f 419}
d0ec9796
RH
420
421static void do_shifti(TCGOpcode opc, unsigned vece,
422 TCGv_vec r, TCGv_vec a, int64_t i)
423{
424 TCGTemp *rt = tcgv_vec_temp(r);
425 TCGTemp *at = tcgv_vec_temp(a);
426 TCGArg ri = temp_arg(rt);
427 TCGArg ai = temp_arg(at);
428 TCGType type = rt->base_type;
429 int can;
430
431 tcg_debug_assert(at->base_type == type);
432 tcg_debug_assert(i >= 0 && i < (8 << vece));
53229a77 433 tcg_assert_listed_vecop(opc);
d0ec9796
RH
434
435 if (i == 0) {
436 tcg_gen_mov_vec(r, a);
437 return;
438 }
439
440 can = tcg_can_emit_vec_op(opc, type, vece);
441 if (can > 0) {
442 vec_gen_3(opc, type, vece, ri, ai, i);
443 } else {
444 /* We leave the choice of expansion via scalar or vector shift
445 to the target. Often, but not always, dupi can feed a vector
446 shift easier than a scalar. */
53229a77 447 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
d0ec9796
RH
448 tcg_debug_assert(can < 0);
449 tcg_expand_vec_op(opc, type, vece, ri, ai, i);
53229a77 450 tcg_swap_vecop_list(hold_list);
d0ec9796
RH
451 }
452}
453
454void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
455{
456 do_shifti(INDEX_op_shli_vec, vece, r, a, i);
457}
458
459void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
460{
461 do_shifti(INDEX_op_shri_vec, vece, r, a, i);
462}
463
464void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
465{
466 do_shifti(INDEX_op_sari_vec, vece, r, a, i);
467}
212be173
RH
468
469void tcg_gen_cmp_vec(TCGCond cond, unsigned vece,
470 TCGv_vec r, TCGv_vec a, TCGv_vec b)
471{
472 TCGTemp *rt = tcgv_vec_temp(r);
473 TCGTemp *at = tcgv_vec_temp(a);
474 TCGTemp *bt = tcgv_vec_temp(b);
475 TCGArg ri = temp_arg(rt);
476 TCGArg ai = temp_arg(at);
477 TCGArg bi = temp_arg(bt);
478 TCGType type = rt->base_type;
479 int can;
480
9a938d86
RH
481 tcg_debug_assert(at->base_type >= type);
482 tcg_debug_assert(bt->base_type >= type);
53229a77 483 tcg_assert_listed_vecop(INDEX_op_cmp_vec);
212be173
RH
484 can = tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece);
485 if (can > 0) {
486 vec_gen_4(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
487 } else {
53229a77 488 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
212be173
RH
489 tcg_debug_assert(can < 0);
490 tcg_expand_vec_op(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
53229a77 491 tcg_swap_vecop_list(hold_list);
212be173
RH
492 }
493}
3774030a 494
8afaf050
RH
495static void do_op3(unsigned vece, TCGv_vec r, TCGv_vec a,
496 TCGv_vec b, TCGOpcode opc)
3774030a
RH
497{
498 TCGTemp *rt = tcgv_vec_temp(r);
499 TCGTemp *at = tcgv_vec_temp(a);
500 TCGTemp *bt = tcgv_vec_temp(b);
501 TCGArg ri = temp_arg(rt);
502 TCGArg ai = temp_arg(at);
503 TCGArg bi = temp_arg(bt);
504 TCGType type = rt->base_type;
505 int can;
506
9a938d86
RH
507 tcg_debug_assert(at->base_type >= type);
508 tcg_debug_assert(bt->base_type >= type);
53229a77 509 tcg_assert_listed_vecop(opc);
8afaf050 510 can = tcg_can_emit_vec_op(opc, type, vece);
3774030a 511 if (can > 0) {
8afaf050 512 vec_gen_3(opc, type, vece, ri, ai, bi);
3774030a 513 } else {
53229a77 514 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
3774030a 515 tcg_debug_assert(can < 0);
8afaf050 516 tcg_expand_vec_op(opc, type, vece, ri, ai, bi);
53229a77 517 tcg_swap_vecop_list(hold_list);
3774030a
RH
518 }
519}
8afaf050 520
ce27c5d1
RH
521void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
522{
523 do_op3(vece, r, a, b, INDEX_op_add_vec);
524}
525
526void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
527{
528 do_op3(vece, r, a, b, INDEX_op_sub_vec);
529}
530
8afaf050
RH
531void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
532{
533 do_op3(vece, r, a, b, INDEX_op_mul_vec);
534}
535
536void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
537{
538 do_op3(vece, r, a, b, INDEX_op_ssadd_vec);
539}
540
541void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
542{
543 do_op3(vece, r, a, b, INDEX_op_usadd_vec);
544}
545
546void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
547{
548 do_op3(vece, r, a, b, INDEX_op_sssub_vec);
549}
550
551void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
552{
553 do_op3(vece, r, a, b, INDEX_op_ussub_vec);
554}
dd0a0fcd
RH
555
556void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
557{
558 do_op3(vece, r, a, b, INDEX_op_smin_vec);
559}
560
561void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
562{
563 do_op3(vece, r, a, b, INDEX_op_umin_vec);
564}
565
566void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
567{
568 do_op3(vece, r, a, b, INDEX_op_smax_vec);
569}
570
571void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
572{
573 do_op3(vece, r, a, b, INDEX_op_umax_vec);
574}