]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tcg-op-vec.c
tcg: Add INDEX_op_dupm_vec
[mirror_qemu.git] / tcg / tcg-op-vec.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 Linaro, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "cpu.h"
23 #include "tcg.h"
24 #include "tcg-op.h"
25 #include "tcg-mo.h"
26
27 /* Reduce the number of ifdefs below. This assumes that all uses of
28 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
29 the compiler can eliminate. */
30 #if TCG_TARGET_REG_BITS == 64
31 extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
32 extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
33 #define TCGV_LOW TCGV_LOW_link_error
34 #define TCGV_HIGH TCGV_HIGH_link_error
35 #endif
36
37 /*
38 * Vector optional opcode tracking.
39 * Except for the basic logical operations (and, or, xor), and
40 * data movement (mov, ld, st, dupi), many vector opcodes are
41 * optional and may not be supported on the host. Thank Intel
42 * for the irregularity in their instruction set.
43 *
44 * The gvec expanders allow custom vector operations to be composed,
45 * generally via the .fniv callback in the GVecGen* structures. At
46 * the same time, in deciding whether to use this hook we need to
47 * know if the host supports the required operations. This is
48 * presented as an array of opcodes, terminated by 0. Each opcode
49 * is assumed to be expanded with the given VECE.
50 *
51 * For debugging, we want to validate this array. Therefore, when
52 * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders
53 * will validate that their opcode is present in the list.
54 */
55 #ifdef CONFIG_DEBUG_TCG
56 void tcg_assert_listed_vecop(TCGOpcode op)
57 {
58 const TCGOpcode *p = tcg_ctx->vecop_list;
59 if (p) {
60 for (; *p; ++p) {
61 if (*p == op) {
62 return;
63 }
64 }
65 g_assert_not_reached();
66 }
67 }
68 #endif
69
70 bool tcg_can_emit_vecop_list(const TCGOpcode *list,
71 TCGType type, unsigned vece)
72 {
73 if (list == NULL) {
74 return true;
75 }
76
77 for (; *list; ++list) {
78 TCGOpcode opc = *list;
79
80 #ifdef CONFIG_DEBUG_TCG
81 switch (opc) {
82 case INDEX_op_and_vec:
83 case INDEX_op_or_vec:
84 case INDEX_op_xor_vec:
85 case INDEX_op_mov_vec:
86 case INDEX_op_dup_vec:
87 case INDEX_op_dupi_vec:
88 case INDEX_op_dup2_vec:
89 case INDEX_op_ld_vec:
90 case INDEX_op_st_vec:
91 /* These opcodes are mandatory and should not be listed. */
92 g_assert_not_reached();
93 default:
94 break;
95 }
96 #endif
97
98 if (tcg_can_emit_vec_op(opc, type, vece)) {
99 continue;
100 }
101
102 /*
103 * The opcode list is created by front ends based on what they
104 * actually invoke. We must mirror the logic in the routines
105 * below for generic expansions using other opcodes.
106 */
107 switch (opc) {
108 case INDEX_op_neg_vec:
109 if (tcg_can_emit_vec_op(INDEX_op_sub_vec, type, vece)) {
110 continue;
111 }
112 break;
113 default:
114 break;
115 }
116 return false;
117 }
118 return true;
119 }
120
121 void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a)
122 {
123 TCGOp *op = tcg_emit_op(opc);
124 TCGOP_VECL(op) = type - TCG_TYPE_V64;
125 TCGOP_VECE(op) = vece;
126 op->args[0] = r;
127 op->args[1] = a;
128 }
129
130 void vec_gen_3(TCGOpcode opc, TCGType type, unsigned vece,
131 TCGArg r, TCGArg a, TCGArg b)
132 {
133 TCGOp *op = tcg_emit_op(opc);
134 TCGOP_VECL(op) = type - TCG_TYPE_V64;
135 TCGOP_VECE(op) = vece;
136 op->args[0] = r;
137 op->args[1] = a;
138 op->args[2] = b;
139 }
140
141 void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece,
142 TCGArg r, TCGArg a, TCGArg b, TCGArg c)
143 {
144 TCGOp *op = tcg_emit_op(opc);
145 TCGOP_VECL(op) = type - TCG_TYPE_V64;
146 TCGOP_VECE(op) = vece;
147 op->args[0] = r;
148 op->args[1] = a;
149 op->args[2] = b;
150 op->args[3] = c;
151 }
152
153 static void vec_gen_op2(TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a)
154 {
155 TCGTemp *rt = tcgv_vec_temp(r);
156 TCGTemp *at = tcgv_vec_temp(a);
157 TCGType type = rt->base_type;
158
159 /* Must enough inputs for the output. */
160 tcg_debug_assert(at->base_type >= type);
161 vec_gen_2(opc, type, vece, temp_arg(rt), temp_arg(at));
162 }
163
164 static void vec_gen_op3(TCGOpcode opc, unsigned vece,
165 TCGv_vec r, TCGv_vec a, TCGv_vec b)
166 {
167 TCGTemp *rt = tcgv_vec_temp(r);
168 TCGTemp *at = tcgv_vec_temp(a);
169 TCGTemp *bt = tcgv_vec_temp(b);
170 TCGType type = rt->base_type;
171
172 /* Must enough inputs for the output. */
173 tcg_debug_assert(at->base_type >= type);
174 tcg_debug_assert(bt->base_type >= type);
175 vec_gen_3(opc, type, vece, temp_arg(rt), temp_arg(at), temp_arg(bt));
176 }
177
178 void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a)
179 {
180 if (r != a) {
181 vec_gen_op2(INDEX_op_mov_vec, 0, r, a);
182 }
183 }
184
185 #define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32)
186
187 static void do_dupi_vec(TCGv_vec r, unsigned vece, TCGArg a)
188 {
189 TCGTemp *rt = tcgv_vec_temp(r);
190 vec_gen_2(INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a);
191 }
192
193 TCGv_vec tcg_const_zeros_vec(TCGType type)
194 {
195 TCGv_vec ret = tcg_temp_new_vec(type);
196 do_dupi_vec(ret, MO_REG, 0);
197 return ret;
198 }
199
200 TCGv_vec tcg_const_ones_vec(TCGType type)
201 {
202 TCGv_vec ret = tcg_temp_new_vec(type);
203 do_dupi_vec(ret, MO_REG, -1);
204 return ret;
205 }
206
207 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m)
208 {
209 TCGTemp *t = tcgv_vec_temp(m);
210 return tcg_const_zeros_vec(t->base_type);
211 }
212
213 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
214 {
215 TCGTemp *t = tcgv_vec_temp(m);
216 return tcg_const_ones_vec(t->base_type);
217 }
218
219 void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a)
220 {
221 if (TCG_TARGET_REG_BITS == 32 && a == deposit64(a, 32, 32, a)) {
222 do_dupi_vec(r, MO_32, a);
223 } else if (TCG_TARGET_REG_BITS == 64 || a == (uint64_t)(int32_t)a) {
224 do_dupi_vec(r, MO_64, a);
225 } else {
226 TCGv_i64 c = tcg_const_i64(a);
227 tcg_gen_dup_i64_vec(MO_64, r, c);
228 tcg_temp_free_i64(c);
229 }
230 }
231
232 void tcg_gen_dup32i_vec(TCGv_vec r, uint32_t a)
233 {
234 do_dupi_vec(r, MO_REG, dup_const(MO_32, a));
235 }
236
237 void tcg_gen_dup16i_vec(TCGv_vec r, uint32_t a)
238 {
239 do_dupi_vec(r, MO_REG, dup_const(MO_16, a));
240 }
241
242 void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a)
243 {
244 do_dupi_vec(r, MO_REG, dup_const(MO_8, a));
245 }
246
247 void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
248 {
249 do_dupi_vec(r, MO_REG, dup_const(vece, a));
250 }
251
252 void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a)
253 {
254 TCGArg ri = tcgv_vec_arg(r);
255 TCGTemp *rt = arg_temp(ri);
256 TCGType type = rt->base_type;
257
258 if (TCG_TARGET_REG_BITS == 64) {
259 TCGArg ai = tcgv_i64_arg(a);
260 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
261 } else if (vece == MO_64) {
262 TCGArg al = tcgv_i32_arg(TCGV_LOW(a));
263 TCGArg ah = tcgv_i32_arg(TCGV_HIGH(a));
264 vec_gen_3(INDEX_op_dup2_vec, type, MO_64, ri, al, ah);
265 } else {
266 TCGArg ai = tcgv_i32_arg(TCGV_LOW(a));
267 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
268 }
269 }
270
271 void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec r, TCGv_i32 a)
272 {
273 TCGArg ri = tcgv_vec_arg(r);
274 TCGArg ai = tcgv_i32_arg(a);
275 TCGTemp *rt = arg_temp(ri);
276 TCGType type = rt->base_type;
277
278 vec_gen_2(INDEX_op_dup_vec, type, vece, ri, ai);
279 }
280
281 void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec r, TCGv_ptr b,
282 tcg_target_long ofs)
283 {
284 TCGArg ri = tcgv_vec_arg(r);
285 TCGArg bi = tcgv_ptr_arg(b);
286 TCGTemp *rt = arg_temp(ri);
287 TCGType type = rt->base_type;
288
289 vec_gen_3(INDEX_op_dupm_vec, type, vece, ri, bi, ofs);
290 }
291
292 static void vec_gen_ldst(TCGOpcode opc, TCGv_vec r, TCGv_ptr b, TCGArg o)
293 {
294 TCGArg ri = tcgv_vec_arg(r);
295 TCGArg bi = tcgv_ptr_arg(b);
296 TCGTemp *rt = arg_temp(ri);
297 TCGType type = rt->base_type;
298
299 vec_gen_3(opc, type, 0, ri, bi, o);
300 }
301
302 void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr b, TCGArg o)
303 {
304 vec_gen_ldst(INDEX_op_ld_vec, r, b, o);
305 }
306
307 void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr b, TCGArg o)
308 {
309 vec_gen_ldst(INDEX_op_st_vec, r, b, o);
310 }
311
312 void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr b, TCGArg o, TCGType low_type)
313 {
314 TCGArg ri = tcgv_vec_arg(r);
315 TCGArg bi = tcgv_ptr_arg(b);
316 TCGTemp *rt = arg_temp(ri);
317 TCGType type = rt->base_type;
318
319 tcg_debug_assert(low_type >= TCG_TYPE_V64);
320 tcg_debug_assert(low_type <= type);
321 vec_gen_3(INDEX_op_st_vec, low_type, 0, ri, bi, o);
322 }
323
324 void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
325 {
326 vec_gen_op3(INDEX_op_and_vec, 0, r, a, b);
327 }
328
329 void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
330 {
331 vec_gen_op3(INDEX_op_or_vec, 0, r, a, b);
332 }
333
334 void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
335 {
336 vec_gen_op3(INDEX_op_xor_vec, 0, r, a, b);
337 }
338
339 void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
340 {
341 if (TCG_TARGET_HAS_andc_vec) {
342 vec_gen_op3(INDEX_op_andc_vec, 0, r, a, b);
343 } else {
344 TCGv_vec t = tcg_temp_new_vec_matching(r);
345 tcg_gen_not_vec(0, t, b);
346 tcg_gen_and_vec(0, r, a, t);
347 tcg_temp_free_vec(t);
348 }
349 }
350
351 void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
352 {
353 if (TCG_TARGET_HAS_orc_vec) {
354 vec_gen_op3(INDEX_op_orc_vec, 0, r, a, b);
355 } else {
356 TCGv_vec t = tcg_temp_new_vec_matching(r);
357 tcg_gen_not_vec(0, t, b);
358 tcg_gen_or_vec(0, r, a, t);
359 tcg_temp_free_vec(t);
360 }
361 }
362
363 void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
364 {
365 /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */
366 tcg_gen_and_vec(0, r, a, b);
367 tcg_gen_not_vec(0, r, r);
368 }
369
370 void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
371 {
372 /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */
373 tcg_gen_or_vec(0, r, a, b);
374 tcg_gen_not_vec(0, r, r);
375 }
376
377 void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
378 {
379 /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */
380 tcg_gen_xor_vec(0, r, a, b);
381 tcg_gen_not_vec(0, r, r);
382 }
383
384 static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
385 {
386 TCGTemp *rt = tcgv_vec_temp(r);
387 TCGTemp *at = tcgv_vec_temp(a);
388 TCGArg ri = temp_arg(rt);
389 TCGArg ai = temp_arg(at);
390 TCGType type = rt->base_type;
391 int can;
392
393 tcg_debug_assert(at->base_type >= type);
394 tcg_assert_listed_vecop(opc);
395 can = tcg_can_emit_vec_op(opc, type, vece);
396 if (can > 0) {
397 vec_gen_2(opc, type, vece, ri, ai);
398 } else if (can < 0) {
399 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
400 tcg_expand_vec_op(opc, type, vece, ri, ai);
401 tcg_swap_vecop_list(hold_list);
402 } else {
403 return false;
404 }
405 return true;
406 }
407
408 void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
409 {
410 if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
411 TCGv_vec t = tcg_const_ones_vec_matching(r);
412 tcg_gen_xor_vec(0, r, a, t);
413 tcg_temp_free_vec(t);
414 }
415 }
416
417 void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
418 {
419 const TCGOpcode *hold_list;
420
421 tcg_assert_listed_vecop(INDEX_op_neg_vec);
422 hold_list = tcg_swap_vecop_list(NULL);
423
424 if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) {
425 TCGv_vec t = tcg_const_zeros_vec_matching(r);
426 tcg_gen_sub_vec(vece, r, t, a);
427 tcg_temp_free_vec(t);
428 }
429 tcg_swap_vecop_list(hold_list);
430 }
431
432 static void do_shifti(TCGOpcode opc, unsigned vece,
433 TCGv_vec r, TCGv_vec a, int64_t i)
434 {
435 TCGTemp *rt = tcgv_vec_temp(r);
436 TCGTemp *at = tcgv_vec_temp(a);
437 TCGArg ri = temp_arg(rt);
438 TCGArg ai = temp_arg(at);
439 TCGType type = rt->base_type;
440 int can;
441
442 tcg_debug_assert(at->base_type == type);
443 tcg_debug_assert(i >= 0 && i < (8 << vece));
444 tcg_assert_listed_vecop(opc);
445
446 if (i == 0) {
447 tcg_gen_mov_vec(r, a);
448 return;
449 }
450
451 can = tcg_can_emit_vec_op(opc, type, vece);
452 if (can > 0) {
453 vec_gen_3(opc, type, vece, ri, ai, i);
454 } else {
455 /* We leave the choice of expansion via scalar or vector shift
456 to the target. Often, but not always, dupi can feed a vector
457 shift easier than a scalar. */
458 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
459 tcg_debug_assert(can < 0);
460 tcg_expand_vec_op(opc, type, vece, ri, ai, i);
461 tcg_swap_vecop_list(hold_list);
462 }
463 }
464
465 void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
466 {
467 do_shifti(INDEX_op_shli_vec, vece, r, a, i);
468 }
469
470 void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
471 {
472 do_shifti(INDEX_op_shri_vec, vece, r, a, i);
473 }
474
475 void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
476 {
477 do_shifti(INDEX_op_sari_vec, vece, r, a, i);
478 }
479
480 void tcg_gen_cmp_vec(TCGCond cond, unsigned vece,
481 TCGv_vec r, TCGv_vec a, TCGv_vec b)
482 {
483 TCGTemp *rt = tcgv_vec_temp(r);
484 TCGTemp *at = tcgv_vec_temp(a);
485 TCGTemp *bt = tcgv_vec_temp(b);
486 TCGArg ri = temp_arg(rt);
487 TCGArg ai = temp_arg(at);
488 TCGArg bi = temp_arg(bt);
489 TCGType type = rt->base_type;
490 int can;
491
492 tcg_debug_assert(at->base_type >= type);
493 tcg_debug_assert(bt->base_type >= type);
494 tcg_assert_listed_vecop(INDEX_op_cmp_vec);
495 can = tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece);
496 if (can > 0) {
497 vec_gen_4(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
498 } else {
499 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
500 tcg_debug_assert(can < 0);
501 tcg_expand_vec_op(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
502 tcg_swap_vecop_list(hold_list);
503 }
504 }
505
506 static void do_op3(unsigned vece, TCGv_vec r, TCGv_vec a,
507 TCGv_vec b, TCGOpcode opc)
508 {
509 TCGTemp *rt = tcgv_vec_temp(r);
510 TCGTemp *at = tcgv_vec_temp(a);
511 TCGTemp *bt = tcgv_vec_temp(b);
512 TCGArg ri = temp_arg(rt);
513 TCGArg ai = temp_arg(at);
514 TCGArg bi = temp_arg(bt);
515 TCGType type = rt->base_type;
516 int can;
517
518 tcg_debug_assert(at->base_type >= type);
519 tcg_debug_assert(bt->base_type >= type);
520 tcg_assert_listed_vecop(opc);
521 can = tcg_can_emit_vec_op(opc, type, vece);
522 if (can > 0) {
523 vec_gen_3(opc, type, vece, ri, ai, bi);
524 } else {
525 const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
526 tcg_debug_assert(can < 0);
527 tcg_expand_vec_op(opc, type, vece, ri, ai, bi);
528 tcg_swap_vecop_list(hold_list);
529 }
530 }
531
532 void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
533 {
534 do_op3(vece, r, a, b, INDEX_op_add_vec);
535 }
536
537 void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
538 {
539 do_op3(vece, r, a, b, INDEX_op_sub_vec);
540 }
541
542 void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
543 {
544 do_op3(vece, r, a, b, INDEX_op_mul_vec);
545 }
546
547 void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
548 {
549 do_op3(vece, r, a, b, INDEX_op_ssadd_vec);
550 }
551
552 void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
553 {
554 do_op3(vece, r, a, b, INDEX_op_usadd_vec);
555 }
556
557 void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
558 {
559 do_op3(vece, r, a, b, INDEX_op_sssub_vec);
560 }
561
562 void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
563 {
564 do_op3(vece, r, a, b, INDEX_op_ussub_vec);
565 }
566
567 void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
568 {
569 do_op3(vece, r, a, b, INDEX_op_smin_vec);
570 }
571
572 void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
573 {
574 do_op3(vece, r, a, b, INDEX_op_umin_vec);
575 }
576
577 void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
578 {
579 do_op3(vece, r, a, b, INDEX_op_smax_vec);
580 }
581
582 void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
583 {
584 do_op3(vece, r, a, b, INDEX_op_umax_vec);
585 }