]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/translate-neon.inc.c
target/arm: Make gen_swap_half() take separate src and dest
[mirror_qemu.git] / target / arm / translate-neon.inc.c
1 /*
2 * ARM translation: AArch32 Neon instructions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2020 Linaro, Ltd.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 */
22
23 /*
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
27 */
28
29 static inline int plus1(DisasContext *s, int x)
30 {
31 return x + 1;
32 }
33
34 static inline int rsub_64(DisasContext *s, int x)
35 {
36 return 64 - x;
37 }
38
39 static inline int rsub_32(DisasContext *s, int x)
40 {
41 return 32 - x;
42 }
43 static inline int rsub_16(DisasContext *s, int x)
44 {
45 return 16 - x;
46 }
47 static inline int rsub_8(DisasContext *s, int x)
48 {
49 return 8 - x;
50 }
51
52 /* Include the generated Neon decoder */
53 #include "decode-neon-dp.inc.c"
54 #include "decode-neon-ls.inc.c"
55 #include "decode-neon-shared.inc.c"
56
57 static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
58 {
59 int opr_sz;
60 TCGv_ptr fpst;
61 gen_helper_gvec_3_ptr *fn_gvec_ptr;
62
63 if (!dc_isar_feature(aa32_vcma, s)
64 || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
65 return false;
66 }
67
68 /* UNDEF accesses to D16-D31 if they don't exist. */
69 if (!dc_isar_feature(aa32_simd_r32, s) &&
70 ((a->vd | a->vn | a->vm) & 0x10)) {
71 return false;
72 }
73
74 if ((a->vn | a->vm | a->vd) & a->q) {
75 return false;
76 }
77
78 if (!vfp_access_check(s)) {
79 return true;
80 }
81
82 opr_sz = (1 + a->q) * 8;
83 fpst = get_fpstatus_ptr(1);
84 fn_gvec_ptr = a->size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
85 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
86 vfp_reg_offset(1, a->vn),
87 vfp_reg_offset(1, a->vm),
88 fpst, opr_sz, opr_sz, a->rot,
89 fn_gvec_ptr);
90 tcg_temp_free_ptr(fpst);
91 return true;
92 }
93
94 static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
95 {
96 int opr_sz;
97 TCGv_ptr fpst;
98 gen_helper_gvec_3_ptr *fn_gvec_ptr;
99
100 if (!dc_isar_feature(aa32_vcma, s)
101 || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
102 return false;
103 }
104
105 /* UNDEF accesses to D16-D31 if they don't exist. */
106 if (!dc_isar_feature(aa32_simd_r32, s) &&
107 ((a->vd | a->vn | a->vm) & 0x10)) {
108 return false;
109 }
110
111 if ((a->vn | a->vm | a->vd) & a->q) {
112 return false;
113 }
114
115 if (!vfp_access_check(s)) {
116 return true;
117 }
118
119 opr_sz = (1 + a->q) * 8;
120 fpst = get_fpstatus_ptr(1);
121 fn_gvec_ptr = a->size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
122 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
123 vfp_reg_offset(1, a->vn),
124 vfp_reg_offset(1, a->vm),
125 fpst, opr_sz, opr_sz, a->rot,
126 fn_gvec_ptr);
127 tcg_temp_free_ptr(fpst);
128 return true;
129 }
130
131 static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
132 {
133 int opr_sz;
134 gen_helper_gvec_3 *fn_gvec;
135
136 if (!dc_isar_feature(aa32_dp, s)) {
137 return false;
138 }
139
140 /* UNDEF accesses to D16-D31 if they don't exist. */
141 if (!dc_isar_feature(aa32_simd_r32, s) &&
142 ((a->vd | a->vn | a->vm) & 0x10)) {
143 return false;
144 }
145
146 if ((a->vn | a->vm | a->vd) & a->q) {
147 return false;
148 }
149
150 if (!vfp_access_check(s)) {
151 return true;
152 }
153
154 opr_sz = (1 + a->q) * 8;
155 fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
156 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
157 vfp_reg_offset(1, a->vn),
158 vfp_reg_offset(1, a->vm),
159 opr_sz, opr_sz, 0, fn_gvec);
160 return true;
161 }
162
163 static bool trans_VFML(DisasContext *s, arg_VFML *a)
164 {
165 int opr_sz;
166
167 if (!dc_isar_feature(aa32_fhm, s)) {
168 return false;
169 }
170
171 /* UNDEF accesses to D16-D31 if they don't exist. */
172 if (!dc_isar_feature(aa32_simd_r32, s) &&
173 (a->vd & 0x10)) {
174 return false;
175 }
176
177 if (a->vd & a->q) {
178 return false;
179 }
180
181 if (!vfp_access_check(s)) {
182 return true;
183 }
184
185 opr_sz = (1 + a->q) * 8;
186 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
187 vfp_reg_offset(a->q, a->vn),
188 vfp_reg_offset(a->q, a->vm),
189 cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
190 gen_helper_gvec_fmlal_a32);
191 return true;
192 }
193
194 static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
195 {
196 gen_helper_gvec_3_ptr *fn_gvec_ptr;
197 int opr_sz;
198 TCGv_ptr fpst;
199
200 if (!dc_isar_feature(aa32_vcma, s)) {
201 return false;
202 }
203 if (a->size == 0 && !dc_isar_feature(aa32_fp16_arith, s)) {
204 return false;
205 }
206
207 /* UNDEF accesses to D16-D31 if they don't exist. */
208 if (!dc_isar_feature(aa32_simd_r32, s) &&
209 ((a->vd | a->vn | a->vm) & 0x10)) {
210 return false;
211 }
212
213 if ((a->vd | a->vn) & a->q) {
214 return false;
215 }
216
217 if (!vfp_access_check(s)) {
218 return true;
219 }
220
221 fn_gvec_ptr = (a->size ? gen_helper_gvec_fcmlas_idx
222 : gen_helper_gvec_fcmlah_idx);
223 opr_sz = (1 + a->q) * 8;
224 fpst = get_fpstatus_ptr(1);
225 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
226 vfp_reg_offset(1, a->vn),
227 vfp_reg_offset(1, a->vm),
228 fpst, opr_sz, opr_sz,
229 (a->index << 2) | a->rot, fn_gvec_ptr);
230 tcg_temp_free_ptr(fpst);
231 return true;
232 }
233
234 static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
235 {
236 gen_helper_gvec_3 *fn_gvec;
237 int opr_sz;
238 TCGv_ptr fpst;
239
240 if (!dc_isar_feature(aa32_dp, s)) {
241 return false;
242 }
243
244 /* UNDEF accesses to D16-D31 if they don't exist. */
245 if (!dc_isar_feature(aa32_simd_r32, s) &&
246 ((a->vd | a->vn) & 0x10)) {
247 return false;
248 }
249
250 if ((a->vd | a->vn) & a->q) {
251 return false;
252 }
253
254 if (!vfp_access_check(s)) {
255 return true;
256 }
257
258 fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
259 opr_sz = (1 + a->q) * 8;
260 fpst = get_fpstatus_ptr(1);
261 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
262 vfp_reg_offset(1, a->vn),
263 vfp_reg_offset(1, a->rm),
264 opr_sz, opr_sz, a->index, fn_gvec);
265 tcg_temp_free_ptr(fpst);
266 return true;
267 }
268
269 static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
270 {
271 int opr_sz;
272
273 if (!dc_isar_feature(aa32_fhm, s)) {
274 return false;
275 }
276
277 /* UNDEF accesses to D16-D31 if they don't exist. */
278 if (!dc_isar_feature(aa32_simd_r32, s) &&
279 ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
280 return false;
281 }
282
283 if (a->vd & a->q) {
284 return false;
285 }
286
287 if (!vfp_access_check(s)) {
288 return true;
289 }
290
291 opr_sz = (1 + a->q) * 8;
292 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
293 vfp_reg_offset(a->q, a->vn),
294 vfp_reg_offset(a->q, a->rm),
295 cpu_env, opr_sz, opr_sz,
296 (a->index << 2) | a->s, /* is_2 == 0 */
297 gen_helper_gvec_fmlal_idx_a32);
298 return true;
299 }
300
301 static struct {
302 int nregs;
303 int interleave;
304 int spacing;
305 } const neon_ls_element_type[11] = {
306 {1, 4, 1},
307 {1, 4, 2},
308 {4, 1, 1},
309 {2, 2, 2},
310 {1, 3, 1},
311 {1, 3, 2},
312 {3, 1, 1},
313 {1, 1, 1},
314 {1, 2, 1},
315 {1, 2, 2},
316 {2, 1, 1}
317 };
318
319 static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
320 int stride)
321 {
322 if (rm != 15) {
323 TCGv_i32 base;
324
325 base = load_reg(s, rn);
326 if (rm == 13) {
327 tcg_gen_addi_i32(base, base, stride);
328 } else {
329 TCGv_i32 index;
330 index = load_reg(s, rm);
331 tcg_gen_add_i32(base, base, index);
332 tcg_temp_free_i32(index);
333 }
334 store_reg(s, rn, base);
335 }
336 }
337
338 static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
339 {
340 /* Neon load/store multiple structures */
341 int nregs, interleave, spacing, reg, n;
342 MemOp endian = s->be_data;
343 int mmu_idx = get_mem_index(s);
344 int size = a->size;
345 TCGv_i64 tmp64;
346 TCGv_i32 addr, tmp;
347
348 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
349 return false;
350 }
351
352 /* UNDEF accesses to D16-D31 if they don't exist */
353 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
354 return false;
355 }
356 if (a->itype > 10) {
357 return false;
358 }
359 /* Catch UNDEF cases for bad values of align field */
360 switch (a->itype & 0xc) {
361 case 4:
362 if (a->align >= 2) {
363 return false;
364 }
365 break;
366 case 8:
367 if (a->align == 3) {
368 return false;
369 }
370 break;
371 default:
372 break;
373 }
374 nregs = neon_ls_element_type[a->itype].nregs;
375 interleave = neon_ls_element_type[a->itype].interleave;
376 spacing = neon_ls_element_type[a->itype].spacing;
377 if (size == 3 && (interleave | spacing) != 1) {
378 return false;
379 }
380
381 if (!vfp_access_check(s)) {
382 return true;
383 }
384
385 /* For our purposes, bytes are always little-endian. */
386 if (size == 0) {
387 endian = MO_LE;
388 }
389 /*
390 * Consecutive little-endian elements from a single register
391 * can be promoted to a larger little-endian operation.
392 */
393 if (interleave == 1 && endian == MO_LE) {
394 size = 3;
395 }
396 tmp64 = tcg_temp_new_i64();
397 addr = tcg_temp_new_i32();
398 tmp = tcg_const_i32(1 << size);
399 load_reg_var(s, addr, a->rn);
400 for (reg = 0; reg < nregs; reg++) {
401 for (n = 0; n < 8 >> size; n++) {
402 int xs;
403 for (xs = 0; xs < interleave; xs++) {
404 int tt = a->vd + reg + spacing * xs;
405
406 if (a->l) {
407 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
408 neon_store_element64(tt, n, size, tmp64);
409 } else {
410 neon_load_element64(tmp64, tt, n, size);
411 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
412 }
413 tcg_gen_add_i32(addr, addr, tmp);
414 }
415 }
416 }
417 tcg_temp_free_i32(addr);
418 tcg_temp_free_i32(tmp);
419 tcg_temp_free_i64(tmp64);
420
421 gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
422 return true;
423 }
424
425 static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
426 {
427 /* Neon load single structure to all lanes */
428 int reg, stride, vec_size;
429 int vd = a->vd;
430 int size = a->size;
431 int nregs = a->n + 1;
432 TCGv_i32 addr, tmp;
433
434 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
435 return false;
436 }
437
438 /* UNDEF accesses to D16-D31 if they don't exist */
439 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
440 return false;
441 }
442
443 if (size == 3) {
444 if (nregs != 4 || a->a == 0) {
445 return false;
446 }
447 /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
448 size = 2;
449 }
450 if (nregs == 1 && a->a == 1 && size == 0) {
451 return false;
452 }
453 if (nregs == 3 && a->a == 1) {
454 return false;
455 }
456
457 if (!vfp_access_check(s)) {
458 return true;
459 }
460
461 /*
462 * VLD1 to all lanes: T bit indicates how many Dregs to write.
463 * VLD2/3/4 to all lanes: T bit indicates register stride.
464 */
465 stride = a->t ? 2 : 1;
466 vec_size = nregs == 1 ? stride * 8 : 8;
467
468 tmp = tcg_temp_new_i32();
469 addr = tcg_temp_new_i32();
470 load_reg_var(s, addr, a->rn);
471 for (reg = 0; reg < nregs; reg++) {
472 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
473 s->be_data | size);
474 if ((vd & 1) && vec_size == 16) {
475 /*
476 * We cannot write 16 bytes at once because the
477 * destination is unaligned.
478 */
479 tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
480 8, 8, tmp);
481 tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
482 neon_reg_offset(vd, 0), 8, 8);
483 } else {
484 tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
485 vec_size, vec_size, tmp);
486 }
487 tcg_gen_addi_i32(addr, addr, 1 << size);
488 vd += stride;
489 }
490 tcg_temp_free_i32(tmp);
491 tcg_temp_free_i32(addr);
492
493 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
494
495 return true;
496 }
497
498 static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
499 {
500 /* Neon load/store single structure to one lane */
501 int reg;
502 int nregs = a->n + 1;
503 int vd = a->vd;
504 TCGv_i32 addr, tmp;
505
506 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
507 return false;
508 }
509
510 /* UNDEF accesses to D16-D31 if they don't exist */
511 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
512 return false;
513 }
514
515 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
516 switch (nregs) {
517 case 1:
518 if (((a->align & (1 << a->size)) != 0) ||
519 (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
520 return false;
521 }
522 break;
523 case 3:
524 if ((a->align & 1) != 0) {
525 return false;
526 }
527 /* fall through */
528 case 2:
529 if (a->size == 2 && (a->align & 2) != 0) {
530 return false;
531 }
532 break;
533 case 4:
534 if ((a->size == 2) && ((a->align & 3) == 3)) {
535 return false;
536 }
537 break;
538 default:
539 abort();
540 }
541 if ((vd + a->stride * (nregs - 1)) > 31) {
542 /*
543 * Attempts to write off the end of the register file are
544 * UNPREDICTABLE; we choose to UNDEF because otherwise we would
545 * access off the end of the array that holds the register data.
546 */
547 return false;
548 }
549
550 if (!vfp_access_check(s)) {
551 return true;
552 }
553
554 tmp = tcg_temp_new_i32();
555 addr = tcg_temp_new_i32();
556 load_reg_var(s, addr, a->rn);
557 /*
558 * TODO: if we implemented alignment exceptions, we should check
559 * addr against the alignment encoded in a->align here.
560 */
561 for (reg = 0; reg < nregs; reg++) {
562 if (a->l) {
563 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
564 s->be_data | a->size);
565 neon_store_element(vd, a->reg_idx, a->size, tmp);
566 } else { /* Store */
567 neon_load_element(tmp, vd, a->reg_idx, a->size);
568 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
569 s->be_data | a->size);
570 }
571 vd += a->stride;
572 tcg_gen_addi_i32(addr, addr, 1 << a->size);
573 }
574 tcg_temp_free_i32(addr);
575 tcg_temp_free_i32(tmp);
576
577 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
578
579 return true;
580 }
581
582 static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
583 {
584 int vec_size = a->q ? 16 : 8;
585 int rd_ofs = neon_reg_offset(a->vd, 0);
586 int rn_ofs = neon_reg_offset(a->vn, 0);
587 int rm_ofs = neon_reg_offset(a->vm, 0);
588
589 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
590 return false;
591 }
592
593 /* UNDEF accesses to D16-D31 if they don't exist. */
594 if (!dc_isar_feature(aa32_simd_r32, s) &&
595 ((a->vd | a->vn | a->vm) & 0x10)) {
596 return false;
597 }
598
599 if ((a->vn | a->vm | a->vd) & a->q) {
600 return false;
601 }
602
603 if (!vfp_access_check(s)) {
604 return true;
605 }
606
607 fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
608 return true;
609 }
610
611 #define DO_3SAME(INSN, FUNC) \
612 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
613 { \
614 return do_3same(s, a, FUNC); \
615 }
616
617 DO_3SAME(VADD, tcg_gen_gvec_add)
618 DO_3SAME(VSUB, tcg_gen_gvec_sub)
619 DO_3SAME(VAND, tcg_gen_gvec_and)
620 DO_3SAME(VBIC, tcg_gen_gvec_andc)
621 DO_3SAME(VORR, tcg_gen_gvec_or)
622 DO_3SAME(VORN, tcg_gen_gvec_orc)
623 DO_3SAME(VEOR, tcg_gen_gvec_xor)
624 DO_3SAME(VSHL_S, gen_gvec_sshl)
625 DO_3SAME(VSHL_U, gen_gvec_ushl)
626 DO_3SAME(VQADD_S, gen_gvec_sqadd_qc)
627 DO_3SAME(VQADD_U, gen_gvec_uqadd_qc)
628 DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc)
629 DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc)
630
631 /* These insns are all gvec_bitsel but with the inputs in various orders. */
632 #define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
633 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
634 uint32_t rn_ofs, uint32_t rm_ofs, \
635 uint32_t oprsz, uint32_t maxsz) \
636 { \
637 tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
638 } \
639 DO_3SAME(INSN, gen_##INSN##_3s)
640
641 DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
642 DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
643 DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
644
645 #define DO_3SAME_NO_SZ_3(INSN, FUNC) \
646 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
647 { \
648 if (a->size == 3) { \
649 return false; \
650 } \
651 return do_3same(s, a, FUNC); \
652 }
653
654 DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
655 DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
656 DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
657 DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
658 DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
659 DO_3SAME_NO_SZ_3(VMLA, gen_gvec_mla)
660 DO_3SAME_NO_SZ_3(VMLS, gen_gvec_mls)
661 DO_3SAME_NO_SZ_3(VTST, gen_gvec_cmtst)
662 DO_3SAME_NO_SZ_3(VABD_S, gen_gvec_sabd)
663 DO_3SAME_NO_SZ_3(VABA_S, gen_gvec_saba)
664 DO_3SAME_NO_SZ_3(VABD_U, gen_gvec_uabd)
665 DO_3SAME_NO_SZ_3(VABA_U, gen_gvec_uaba)
666
667 #define DO_3SAME_CMP(INSN, COND) \
668 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
669 uint32_t rn_ofs, uint32_t rm_ofs, \
670 uint32_t oprsz, uint32_t maxsz) \
671 { \
672 tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
673 } \
674 DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
675
676 DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
677 DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
678 DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
679 DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
680 DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
681
682 #define WRAP_OOL_FN(WRAPNAME, FUNC) \
683 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
684 uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
685 { \
686 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
687 }
688
689 WRAP_OOL_FN(gen_VMUL_p_3s, gen_helper_gvec_pmul_b)
690
691 static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
692 {
693 if (a->size != 0) {
694 return false;
695 }
696 return do_3same(s, a, gen_VMUL_p_3s);
697 }
698
699 #define DO_VQRDMLAH(INSN, FUNC) \
700 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
701 { \
702 if (!dc_isar_feature(aa32_rdm, s)) { \
703 return false; \
704 } \
705 if (a->size != 1 && a->size != 2) { \
706 return false; \
707 } \
708 return do_3same(s, a, FUNC); \
709 }
710
711 DO_VQRDMLAH(VQRDMLAH, gen_gvec_sqrdmlah_qc)
712 DO_VQRDMLAH(VQRDMLSH, gen_gvec_sqrdmlsh_qc)
713
714 #define DO_SHA1(NAME, FUNC) \
715 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
716 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
717 { \
718 if (!dc_isar_feature(aa32_sha1, s)) { \
719 return false; \
720 } \
721 return do_3same(s, a, gen_##NAME##_3s); \
722 }
723
724 DO_SHA1(SHA1C, gen_helper_crypto_sha1c)
725 DO_SHA1(SHA1P, gen_helper_crypto_sha1p)
726 DO_SHA1(SHA1M, gen_helper_crypto_sha1m)
727 DO_SHA1(SHA1SU0, gen_helper_crypto_sha1su0)
728
729 #define DO_SHA2(NAME, FUNC) \
730 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
731 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
732 { \
733 if (!dc_isar_feature(aa32_sha2, s)) { \
734 return false; \
735 } \
736 return do_3same(s, a, gen_##NAME##_3s); \
737 }
738
739 DO_SHA2(SHA256H, gen_helper_crypto_sha256h)
740 DO_SHA2(SHA256H2, gen_helper_crypto_sha256h2)
741 DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
742
743 #define DO_3SAME_64(INSN, FUNC) \
744 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
745 uint32_t rn_ofs, uint32_t rm_ofs, \
746 uint32_t oprsz, uint32_t maxsz) \
747 { \
748 static const GVecGen3 op = { .fni8 = FUNC }; \
749 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &op); \
750 } \
751 DO_3SAME(INSN, gen_##INSN##_3s)
752
753 #define DO_3SAME_64_ENV(INSN, FUNC) \
754 static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
755 { \
756 FUNC(d, cpu_env, n, m); \
757 } \
758 DO_3SAME_64(INSN, gen_##INSN##_elt)
759
760 DO_3SAME_64(VRSHL_S64, gen_helper_neon_rshl_s64)
761 DO_3SAME_64(VRSHL_U64, gen_helper_neon_rshl_u64)
762 DO_3SAME_64_ENV(VQSHL_S64, gen_helper_neon_qshl_s64)
763 DO_3SAME_64_ENV(VQSHL_U64, gen_helper_neon_qshl_u64)
764 DO_3SAME_64_ENV(VQRSHL_S64, gen_helper_neon_qrshl_s64)
765 DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
766
767 #define DO_3SAME_32(INSN, FUNC) \
768 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
769 uint32_t rn_ofs, uint32_t rm_ofs, \
770 uint32_t oprsz, uint32_t maxsz) \
771 { \
772 static const GVecGen3 ops[4] = { \
773 { .fni4 = gen_helper_neon_##FUNC##8 }, \
774 { .fni4 = gen_helper_neon_##FUNC##16 }, \
775 { .fni4 = gen_helper_neon_##FUNC##32 }, \
776 { 0 }, \
777 }; \
778 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
779 } \
780 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
781 { \
782 if (a->size > 2) { \
783 return false; \
784 } \
785 return do_3same(s, a, gen_##INSN##_3s); \
786 }
787
788 /*
789 * Some helper functions need to be passed the cpu_env. In order
790 * to use those with the gvec APIs like tcg_gen_gvec_3() we need
791 * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
792 * and which call a NeonGenTwoOpEnvFn().
793 */
794 #define WRAP_ENV_FN(WRAPNAME, FUNC) \
795 static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
796 { \
797 FUNC(d, cpu_env, n, m); \
798 }
799
800 #define DO_3SAME_32_ENV(INSN, FUNC) \
801 WRAP_ENV_FN(gen_##INSN##_tramp8, gen_helper_neon_##FUNC##8); \
802 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##16); \
803 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##32); \
804 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
805 uint32_t rn_ofs, uint32_t rm_ofs, \
806 uint32_t oprsz, uint32_t maxsz) \
807 { \
808 static const GVecGen3 ops[4] = { \
809 { .fni4 = gen_##INSN##_tramp8 }, \
810 { .fni4 = gen_##INSN##_tramp16 }, \
811 { .fni4 = gen_##INSN##_tramp32 }, \
812 { 0 }, \
813 }; \
814 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
815 } \
816 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
817 { \
818 if (a->size > 2) { \
819 return false; \
820 } \
821 return do_3same(s, a, gen_##INSN##_3s); \
822 }
823
824 DO_3SAME_32(VHADD_S, hadd_s)
825 DO_3SAME_32(VHADD_U, hadd_u)
826 DO_3SAME_32(VHSUB_S, hsub_s)
827 DO_3SAME_32(VHSUB_U, hsub_u)
828 DO_3SAME_32(VRHADD_S, rhadd_s)
829 DO_3SAME_32(VRHADD_U, rhadd_u)
830 DO_3SAME_32(VRSHL_S, rshl_s)
831 DO_3SAME_32(VRSHL_U, rshl_u)
832
833 DO_3SAME_32_ENV(VQSHL_S, qshl_s)
834 DO_3SAME_32_ENV(VQSHL_U, qshl_u)
835 DO_3SAME_32_ENV(VQRSHL_S, qrshl_s)
836 DO_3SAME_32_ENV(VQRSHL_U, qrshl_u)
837
838 static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
839 {
840 /* Operations handled pairwise 32 bits at a time */
841 TCGv_i32 tmp, tmp2, tmp3;
842
843 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
844 return false;
845 }
846
847 /* UNDEF accesses to D16-D31 if they don't exist. */
848 if (!dc_isar_feature(aa32_simd_r32, s) &&
849 ((a->vd | a->vn | a->vm) & 0x10)) {
850 return false;
851 }
852
853 if (a->size == 3) {
854 return false;
855 }
856
857 if (!vfp_access_check(s)) {
858 return true;
859 }
860
861 assert(a->q == 0); /* enforced by decode patterns */
862
863 /*
864 * Note that we have to be careful not to clobber the source operands
865 * in the "vm == vd" case by storing the result of the first pass too
866 * early. Since Q is 0 there are always just two passes, so instead
867 * of a complicated loop over each pass we just unroll.
868 */
869 tmp = neon_load_reg(a->vn, 0);
870 tmp2 = neon_load_reg(a->vn, 1);
871 fn(tmp, tmp, tmp2);
872 tcg_temp_free_i32(tmp2);
873
874 tmp3 = neon_load_reg(a->vm, 0);
875 tmp2 = neon_load_reg(a->vm, 1);
876 fn(tmp3, tmp3, tmp2);
877 tcg_temp_free_i32(tmp2);
878
879 neon_store_reg(a->vd, 0, tmp);
880 neon_store_reg(a->vd, 1, tmp3);
881 return true;
882 }
883
884 #define DO_3SAME_PAIR(INSN, func) \
885 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
886 { \
887 static NeonGenTwoOpFn * const fns[] = { \
888 gen_helper_neon_##func##8, \
889 gen_helper_neon_##func##16, \
890 gen_helper_neon_##func##32, \
891 }; \
892 if (a->size > 2) { \
893 return false; \
894 } \
895 return do_3same_pair(s, a, fns[a->size]); \
896 }
897
898 /* 32-bit pairwise ops end up the same as the elementwise versions. */
899 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
900 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
901 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
902 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
903 #define gen_helper_neon_padd_u32 tcg_gen_add_i32
904
905 DO_3SAME_PAIR(VPMAX_S, pmax_s)
906 DO_3SAME_PAIR(VPMIN_S, pmin_s)
907 DO_3SAME_PAIR(VPMAX_U, pmax_u)
908 DO_3SAME_PAIR(VPMIN_U, pmin_u)
909 DO_3SAME_PAIR(VPADD, padd_u)
910
911 #define DO_3SAME_VQDMULH(INSN, FUNC) \
912 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##_s16); \
913 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##_s32); \
914 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
915 uint32_t rn_ofs, uint32_t rm_ofs, \
916 uint32_t oprsz, uint32_t maxsz) \
917 { \
918 static const GVecGen3 ops[2] = { \
919 { .fni4 = gen_##INSN##_tramp16 }, \
920 { .fni4 = gen_##INSN##_tramp32 }, \
921 }; \
922 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece - 1]); \
923 } \
924 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
925 { \
926 if (a->size != 1 && a->size != 2) { \
927 return false; \
928 } \
929 return do_3same(s, a, gen_##INSN##_3s); \
930 }
931
932 DO_3SAME_VQDMULH(VQDMULH, qdmulh)
933 DO_3SAME_VQDMULH(VQRDMULH, qrdmulh)
934
935 static bool do_3same_fp(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn,
936 bool reads_vd)
937 {
938 /*
939 * FP operations handled elementwise 32 bits at a time.
940 * If reads_vd is true then the old value of Vd will be
941 * loaded before calling the callback function. This is
942 * used for multiply-accumulate type operations.
943 */
944 TCGv_i32 tmp, tmp2;
945 int pass;
946
947 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
948 return false;
949 }
950
951 /* UNDEF accesses to D16-D31 if they don't exist. */
952 if (!dc_isar_feature(aa32_simd_r32, s) &&
953 ((a->vd | a->vn | a->vm) & 0x10)) {
954 return false;
955 }
956
957 if ((a->vn | a->vm | a->vd) & a->q) {
958 return false;
959 }
960
961 if (!vfp_access_check(s)) {
962 return true;
963 }
964
965 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
966 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
967 tmp = neon_load_reg(a->vn, pass);
968 tmp2 = neon_load_reg(a->vm, pass);
969 if (reads_vd) {
970 TCGv_i32 tmp_rd = neon_load_reg(a->vd, pass);
971 fn(tmp_rd, tmp, tmp2, fpstatus);
972 neon_store_reg(a->vd, pass, tmp_rd);
973 tcg_temp_free_i32(tmp);
974 } else {
975 fn(tmp, tmp, tmp2, fpstatus);
976 neon_store_reg(a->vd, pass, tmp);
977 }
978 tcg_temp_free_i32(tmp2);
979 }
980 tcg_temp_free_ptr(fpstatus);
981 return true;
982 }
983
984 /*
985 * For all the functions using this macro, size == 1 means fp16,
986 * which is an architecture extension we don't implement yet.
987 */
988 #define DO_3S_FP_GVEC(INSN,FUNC) \
989 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
990 uint32_t rn_ofs, uint32_t rm_ofs, \
991 uint32_t oprsz, uint32_t maxsz) \
992 { \
993 TCGv_ptr fpst = get_fpstatus_ptr(1); \
994 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
995 oprsz, maxsz, 0, FUNC); \
996 tcg_temp_free_ptr(fpst); \
997 } \
998 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
999 { \
1000 if (a->size != 0) { \
1001 /* TODO fp16 support */ \
1002 return false; \
1003 } \
1004 return do_3same(s, a, gen_##INSN##_3s); \
1005 }
1006
1007
1008 DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s)
1009 DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s)
1010 DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s)
1011 DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s)
1012
1013 /*
1014 * For all the functions using this macro, size == 1 means fp16,
1015 * which is an architecture extension we don't implement yet.
1016 */
1017 #define DO_3S_FP(INSN,FUNC,READS_VD) \
1018 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1019 { \
1020 if (a->size != 0) { \
1021 /* TODO fp16 support */ \
1022 return false; \
1023 } \
1024 return do_3same_fp(s, a, FUNC, READS_VD); \
1025 }
1026
1027 DO_3S_FP(VCEQ, gen_helper_neon_ceq_f32, false)
1028 DO_3S_FP(VCGE, gen_helper_neon_cge_f32, false)
1029 DO_3S_FP(VCGT, gen_helper_neon_cgt_f32, false)
1030 DO_3S_FP(VACGE, gen_helper_neon_acge_f32, false)
1031 DO_3S_FP(VACGT, gen_helper_neon_acgt_f32, false)
1032 DO_3S_FP(VMAX, gen_helper_vfp_maxs, false)
1033 DO_3S_FP(VMIN, gen_helper_vfp_mins, false)
1034
1035 static void gen_VMLA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1036 TCGv_ptr fpstatus)
1037 {
1038 gen_helper_vfp_muls(vn, vn, vm, fpstatus);
1039 gen_helper_vfp_adds(vd, vd, vn, fpstatus);
1040 }
1041
1042 static void gen_VMLS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1043 TCGv_ptr fpstatus)
1044 {
1045 gen_helper_vfp_muls(vn, vn, vm, fpstatus);
1046 gen_helper_vfp_subs(vd, vd, vn, fpstatus);
1047 }
1048
1049 DO_3S_FP(VMLA, gen_VMLA_fp_3s, true)
1050 DO_3S_FP(VMLS, gen_VMLS_fp_3s, true)
1051
1052 static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
1053 {
1054 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1055 return false;
1056 }
1057
1058 if (a->size != 0) {
1059 /* TODO fp16 support */
1060 return false;
1061 }
1062
1063 return do_3same_fp(s, a, gen_helper_vfp_maxnums, false);
1064 }
1065
1066 static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
1067 {
1068 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1069 return false;
1070 }
1071
1072 if (a->size != 0) {
1073 /* TODO fp16 support */
1074 return false;
1075 }
1076
1077 return do_3same_fp(s, a, gen_helper_vfp_minnums, false);
1078 }
1079
1080 WRAP_ENV_FN(gen_VRECPS_tramp, gen_helper_recps_f32)
1081
1082 static void gen_VRECPS_fp_3s(unsigned vece, uint32_t rd_ofs,
1083 uint32_t rn_ofs, uint32_t rm_ofs,
1084 uint32_t oprsz, uint32_t maxsz)
1085 {
1086 static const GVecGen3 ops = { .fni4 = gen_VRECPS_tramp };
1087 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
1088 }
1089
1090 static bool trans_VRECPS_fp_3s(DisasContext *s, arg_3same *a)
1091 {
1092 if (a->size != 0) {
1093 /* TODO fp16 support */
1094 return false;
1095 }
1096
1097 return do_3same(s, a, gen_VRECPS_fp_3s);
1098 }
1099
1100 WRAP_ENV_FN(gen_VRSQRTS_tramp, gen_helper_rsqrts_f32)
1101
1102 static void gen_VRSQRTS_fp_3s(unsigned vece, uint32_t rd_ofs,
1103 uint32_t rn_ofs, uint32_t rm_ofs,
1104 uint32_t oprsz, uint32_t maxsz)
1105 {
1106 static const GVecGen3 ops = { .fni4 = gen_VRSQRTS_tramp };
1107 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
1108 }
1109
1110 static bool trans_VRSQRTS_fp_3s(DisasContext *s, arg_3same *a)
1111 {
1112 if (a->size != 0) {
1113 /* TODO fp16 support */
1114 return false;
1115 }
1116
1117 return do_3same(s, a, gen_VRSQRTS_fp_3s);
1118 }
1119
1120 static void gen_VFMA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1121 TCGv_ptr fpstatus)
1122 {
1123 gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
1124 }
1125
1126 static bool trans_VFMA_fp_3s(DisasContext *s, arg_3same *a)
1127 {
1128 if (!dc_isar_feature(aa32_simdfmac, s)) {
1129 return false;
1130 }
1131
1132 if (a->size != 0) {
1133 /* TODO fp16 support */
1134 return false;
1135 }
1136
1137 return do_3same_fp(s, a, gen_VFMA_fp_3s, true);
1138 }
1139
1140 static void gen_VFMS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1141 TCGv_ptr fpstatus)
1142 {
1143 gen_helper_vfp_negs(vn, vn);
1144 gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
1145 }
1146
1147 static bool trans_VFMS_fp_3s(DisasContext *s, arg_3same *a)
1148 {
1149 if (!dc_isar_feature(aa32_simdfmac, s)) {
1150 return false;
1151 }
1152
1153 if (a->size != 0) {
1154 /* TODO fp16 support */
1155 return false;
1156 }
1157
1158 return do_3same_fp(s, a, gen_VFMS_fp_3s, true);
1159 }
1160
1161 static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
1162 {
1163 /* FP operations handled pairwise 32 bits at a time */
1164 TCGv_i32 tmp, tmp2, tmp3;
1165 TCGv_ptr fpstatus;
1166
1167 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1168 return false;
1169 }
1170
1171 /* UNDEF accesses to D16-D31 if they don't exist. */
1172 if (!dc_isar_feature(aa32_simd_r32, s) &&
1173 ((a->vd | a->vn | a->vm) & 0x10)) {
1174 return false;
1175 }
1176
1177 if (!vfp_access_check(s)) {
1178 return true;
1179 }
1180
1181 assert(a->q == 0); /* enforced by decode patterns */
1182
1183 /*
1184 * Note that we have to be careful not to clobber the source operands
1185 * in the "vm == vd" case by storing the result of the first pass too
1186 * early. Since Q is 0 there are always just two passes, so instead
1187 * of a complicated loop over each pass we just unroll.
1188 */
1189 fpstatus = get_fpstatus_ptr(1);
1190 tmp = neon_load_reg(a->vn, 0);
1191 tmp2 = neon_load_reg(a->vn, 1);
1192 fn(tmp, tmp, tmp2, fpstatus);
1193 tcg_temp_free_i32(tmp2);
1194
1195 tmp3 = neon_load_reg(a->vm, 0);
1196 tmp2 = neon_load_reg(a->vm, 1);
1197 fn(tmp3, tmp3, tmp2, fpstatus);
1198 tcg_temp_free_i32(tmp2);
1199 tcg_temp_free_ptr(fpstatus);
1200
1201 neon_store_reg(a->vd, 0, tmp);
1202 neon_store_reg(a->vd, 1, tmp3);
1203 return true;
1204 }
1205
1206 /*
1207 * For all the functions using this macro, size == 1 means fp16,
1208 * which is an architecture extension we don't implement yet.
1209 */
1210 #define DO_3S_FP_PAIR(INSN,FUNC) \
1211 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1212 { \
1213 if (a->size != 0) { \
1214 /* TODO fp16 support */ \
1215 return false; \
1216 } \
1217 return do_3same_fp_pair(s, a, FUNC); \
1218 }
1219
1220 DO_3S_FP_PAIR(VPADD, gen_helper_vfp_adds)
1221 DO_3S_FP_PAIR(VPMAX, gen_helper_vfp_maxs)
1222 DO_3S_FP_PAIR(VPMIN, gen_helper_vfp_mins)
1223
1224 static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
1225 {
1226 /* Handle a 2-reg-shift insn which can be vectorized. */
1227 int vec_size = a->q ? 16 : 8;
1228 int rd_ofs = neon_reg_offset(a->vd, 0);
1229 int rm_ofs = neon_reg_offset(a->vm, 0);
1230
1231 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1232 return false;
1233 }
1234
1235 /* UNDEF accesses to D16-D31 if they don't exist. */
1236 if (!dc_isar_feature(aa32_simd_r32, s) &&
1237 ((a->vd | a->vm) & 0x10)) {
1238 return false;
1239 }
1240
1241 if ((a->vm | a->vd) & a->q) {
1242 return false;
1243 }
1244
1245 if (!vfp_access_check(s)) {
1246 return true;
1247 }
1248
1249 fn(a->size, rd_ofs, rm_ofs, a->shift, vec_size, vec_size);
1250 return true;
1251 }
1252
1253 #define DO_2SH(INSN, FUNC) \
1254 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1255 { \
1256 return do_vector_2sh(s, a, FUNC); \
1257 } \
1258
1259 DO_2SH(VSHL, tcg_gen_gvec_shli)
1260 DO_2SH(VSLI, gen_gvec_sli)
1261 DO_2SH(VSRI, gen_gvec_sri)
1262 DO_2SH(VSRA_S, gen_gvec_ssra)
1263 DO_2SH(VSRA_U, gen_gvec_usra)
1264 DO_2SH(VRSHR_S, gen_gvec_srshr)
1265 DO_2SH(VRSHR_U, gen_gvec_urshr)
1266 DO_2SH(VRSRA_S, gen_gvec_srsra)
1267 DO_2SH(VRSRA_U, gen_gvec_ursra)
1268
1269 static bool trans_VSHR_S_2sh(DisasContext *s, arg_2reg_shift *a)
1270 {
1271 /* Signed shift out of range results in all-sign-bits */
1272 a->shift = MIN(a->shift, (8 << a->size) - 1);
1273 return do_vector_2sh(s, a, tcg_gen_gvec_sari);
1274 }
1275
1276 static void gen_zero_rd_2sh(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
1277 int64_t shift, uint32_t oprsz, uint32_t maxsz)
1278 {
1279 tcg_gen_gvec_dup_imm(vece, rd_ofs, oprsz, maxsz, 0);
1280 }
1281
1282 static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a)
1283 {
1284 /* Shift out of range is architecturally valid and results in zero. */
1285 if (a->shift >= (8 << a->size)) {
1286 return do_vector_2sh(s, a, gen_zero_rd_2sh);
1287 } else {
1288 return do_vector_2sh(s, a, tcg_gen_gvec_shri);
1289 }
1290 }
1291
1292 static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
1293 NeonGenTwo64OpEnvFn *fn)
1294 {
1295 /*
1296 * 2-reg-and-shift operations, size == 3 case, where the
1297 * function needs to be passed cpu_env.
1298 */
1299 TCGv_i64 constimm;
1300 int pass;
1301
1302 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1303 return false;
1304 }
1305
1306 /* UNDEF accesses to D16-D31 if they don't exist. */
1307 if (!dc_isar_feature(aa32_simd_r32, s) &&
1308 ((a->vd | a->vm) & 0x10)) {
1309 return false;
1310 }
1311
1312 if ((a->vm | a->vd) & a->q) {
1313 return false;
1314 }
1315
1316 if (!vfp_access_check(s)) {
1317 return true;
1318 }
1319
1320 /*
1321 * To avoid excessive duplication of ops we implement shift
1322 * by immediate using the variable shift operations.
1323 */
1324 constimm = tcg_const_i64(dup_const(a->size, a->shift));
1325
1326 for (pass = 0; pass < a->q + 1; pass++) {
1327 TCGv_i64 tmp = tcg_temp_new_i64();
1328
1329 neon_load_reg64(tmp, a->vm + pass);
1330 fn(tmp, cpu_env, tmp, constimm);
1331 neon_store_reg64(tmp, a->vd + pass);
1332 tcg_temp_free_i64(tmp);
1333 }
1334 tcg_temp_free_i64(constimm);
1335 return true;
1336 }
1337
1338 static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
1339 NeonGenTwoOpEnvFn *fn)
1340 {
1341 /*
1342 * 2-reg-and-shift operations, size < 3 case, where the
1343 * helper needs to be passed cpu_env.
1344 */
1345 TCGv_i32 constimm;
1346 int pass;
1347
1348 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1349 return false;
1350 }
1351
1352 /* UNDEF accesses to D16-D31 if they don't exist. */
1353 if (!dc_isar_feature(aa32_simd_r32, s) &&
1354 ((a->vd | a->vm) & 0x10)) {
1355 return false;
1356 }
1357
1358 if ((a->vm | a->vd) & a->q) {
1359 return false;
1360 }
1361
1362 if (!vfp_access_check(s)) {
1363 return true;
1364 }
1365
1366 /*
1367 * To avoid excessive duplication of ops we implement shift
1368 * by immediate using the variable shift operations.
1369 */
1370 constimm = tcg_const_i32(dup_const(a->size, a->shift));
1371
1372 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
1373 TCGv_i32 tmp = neon_load_reg(a->vm, pass);
1374 fn(tmp, cpu_env, tmp, constimm);
1375 neon_store_reg(a->vd, pass, tmp);
1376 }
1377 tcg_temp_free_i32(constimm);
1378 return true;
1379 }
1380
1381 #define DO_2SHIFT_ENV(INSN, FUNC) \
1382 static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
1383 { \
1384 return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
1385 } \
1386 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1387 { \
1388 static NeonGenTwoOpEnvFn * const fns[] = { \
1389 gen_helper_neon_##FUNC##8, \
1390 gen_helper_neon_##FUNC##16, \
1391 gen_helper_neon_##FUNC##32, \
1392 }; \
1393 assert(a->size < ARRAY_SIZE(fns)); \
1394 return do_2shift_env_32(s, a, fns[a->size]); \
1395 }
1396
1397 DO_2SHIFT_ENV(VQSHLU, qshlu_s)
1398 DO_2SHIFT_ENV(VQSHL_U, qshl_u)
1399 DO_2SHIFT_ENV(VQSHL_S, qshl_s)
1400
1401 static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
1402 NeonGenTwo64OpFn *shiftfn,
1403 NeonGenNarrowEnvFn *narrowfn)
1404 {
1405 /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
1406 TCGv_i64 constimm, rm1, rm2;
1407 TCGv_i32 rd;
1408
1409 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1410 return false;
1411 }
1412
1413 /* UNDEF accesses to D16-D31 if they don't exist. */
1414 if (!dc_isar_feature(aa32_simd_r32, s) &&
1415 ((a->vd | a->vm) & 0x10)) {
1416 return false;
1417 }
1418
1419 if (a->vm & 1) {
1420 return false;
1421 }
1422
1423 if (!vfp_access_check(s)) {
1424 return true;
1425 }
1426
1427 /*
1428 * This is always a right shift, and the shiftfn is always a
1429 * left-shift helper, which thus needs the negated shift count.
1430 */
1431 constimm = tcg_const_i64(-a->shift);
1432 rm1 = tcg_temp_new_i64();
1433 rm2 = tcg_temp_new_i64();
1434
1435 /* Load both inputs first to avoid potential overwrite if rm == rd */
1436 neon_load_reg64(rm1, a->vm);
1437 neon_load_reg64(rm2, a->vm + 1);
1438
1439 shiftfn(rm1, rm1, constimm);
1440 rd = tcg_temp_new_i32();
1441 narrowfn(rd, cpu_env, rm1);
1442 neon_store_reg(a->vd, 0, rd);
1443
1444 shiftfn(rm2, rm2, constimm);
1445 rd = tcg_temp_new_i32();
1446 narrowfn(rd, cpu_env, rm2);
1447 neon_store_reg(a->vd, 1, rd);
1448
1449 tcg_temp_free_i64(rm1);
1450 tcg_temp_free_i64(rm2);
1451 tcg_temp_free_i64(constimm);
1452
1453 return true;
1454 }
1455
1456 static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
1457 NeonGenTwoOpFn *shiftfn,
1458 NeonGenNarrowEnvFn *narrowfn)
1459 {
1460 /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
1461 TCGv_i32 constimm, rm1, rm2, rm3, rm4;
1462 TCGv_i64 rtmp;
1463 uint32_t imm;
1464
1465 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1466 return false;
1467 }
1468
1469 /* UNDEF accesses to D16-D31 if they don't exist. */
1470 if (!dc_isar_feature(aa32_simd_r32, s) &&
1471 ((a->vd | a->vm) & 0x10)) {
1472 return false;
1473 }
1474
1475 if (a->vm & 1) {
1476 return false;
1477 }
1478
1479 if (!vfp_access_check(s)) {
1480 return true;
1481 }
1482
1483 /*
1484 * This is always a right shift, and the shiftfn is always a
1485 * left-shift helper, which thus needs the negated shift count
1486 * duplicated into each lane of the immediate value.
1487 */
1488 if (a->size == 1) {
1489 imm = (uint16_t)(-a->shift);
1490 imm |= imm << 16;
1491 } else {
1492 /* size == 2 */
1493 imm = -a->shift;
1494 }
1495 constimm = tcg_const_i32(imm);
1496
1497 /* Load all inputs first to avoid potential overwrite */
1498 rm1 = neon_load_reg(a->vm, 0);
1499 rm2 = neon_load_reg(a->vm, 1);
1500 rm3 = neon_load_reg(a->vm + 1, 0);
1501 rm4 = neon_load_reg(a->vm + 1, 1);
1502 rtmp = tcg_temp_new_i64();
1503
1504 shiftfn(rm1, rm1, constimm);
1505 shiftfn(rm2, rm2, constimm);
1506
1507 tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
1508 tcg_temp_free_i32(rm2);
1509
1510 narrowfn(rm1, cpu_env, rtmp);
1511 neon_store_reg(a->vd, 0, rm1);
1512
1513 shiftfn(rm3, rm3, constimm);
1514 shiftfn(rm4, rm4, constimm);
1515 tcg_temp_free_i32(constimm);
1516
1517 tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
1518 tcg_temp_free_i32(rm4);
1519
1520 narrowfn(rm3, cpu_env, rtmp);
1521 tcg_temp_free_i64(rtmp);
1522 neon_store_reg(a->vd, 1, rm3);
1523 return true;
1524 }
1525
1526 #define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
1527 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1528 { \
1529 return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
1530 }
1531 #define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
1532 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1533 { \
1534 return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
1535 }
1536
1537 static void gen_neon_narrow_u32(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1538 {
1539 tcg_gen_extrl_i64_i32(dest, src);
1540 }
1541
1542 static void gen_neon_narrow_u16(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1543 {
1544 gen_helper_neon_narrow_u16(dest, src);
1545 }
1546
1547 static void gen_neon_narrow_u8(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1548 {
1549 gen_helper_neon_narrow_u8(dest, src);
1550 }
1551
1552 DO_2SN_64(VSHRN_64, gen_ushl_i64, gen_neon_narrow_u32)
1553 DO_2SN_32(VSHRN_32, gen_ushl_i32, gen_neon_narrow_u16)
1554 DO_2SN_32(VSHRN_16, gen_helper_neon_shl_u16, gen_neon_narrow_u8)
1555
1556 DO_2SN_64(VRSHRN_64, gen_helper_neon_rshl_u64, gen_neon_narrow_u32)
1557 DO_2SN_32(VRSHRN_32, gen_helper_neon_rshl_u32, gen_neon_narrow_u16)
1558 DO_2SN_32(VRSHRN_16, gen_helper_neon_rshl_u16, gen_neon_narrow_u8)
1559
1560 DO_2SN_64(VQSHRUN_64, gen_sshl_i64, gen_helper_neon_unarrow_sat32)
1561 DO_2SN_32(VQSHRUN_32, gen_sshl_i32, gen_helper_neon_unarrow_sat16)
1562 DO_2SN_32(VQSHRUN_16, gen_helper_neon_shl_s16, gen_helper_neon_unarrow_sat8)
1563
1564 DO_2SN_64(VQRSHRUN_64, gen_helper_neon_rshl_s64, gen_helper_neon_unarrow_sat32)
1565 DO_2SN_32(VQRSHRUN_32, gen_helper_neon_rshl_s32, gen_helper_neon_unarrow_sat16)
1566 DO_2SN_32(VQRSHRUN_16, gen_helper_neon_rshl_s16, gen_helper_neon_unarrow_sat8)
1567 DO_2SN_64(VQSHRN_S64, gen_sshl_i64, gen_helper_neon_narrow_sat_s32)
1568 DO_2SN_32(VQSHRN_S32, gen_sshl_i32, gen_helper_neon_narrow_sat_s16)
1569 DO_2SN_32(VQSHRN_S16, gen_helper_neon_shl_s16, gen_helper_neon_narrow_sat_s8)
1570
1571 DO_2SN_64(VQRSHRN_S64, gen_helper_neon_rshl_s64, gen_helper_neon_narrow_sat_s32)
1572 DO_2SN_32(VQRSHRN_S32, gen_helper_neon_rshl_s32, gen_helper_neon_narrow_sat_s16)
1573 DO_2SN_32(VQRSHRN_S16, gen_helper_neon_rshl_s16, gen_helper_neon_narrow_sat_s8)
1574
1575 DO_2SN_64(VQSHRN_U64, gen_ushl_i64, gen_helper_neon_narrow_sat_u32)
1576 DO_2SN_32(VQSHRN_U32, gen_ushl_i32, gen_helper_neon_narrow_sat_u16)
1577 DO_2SN_32(VQSHRN_U16, gen_helper_neon_shl_u16, gen_helper_neon_narrow_sat_u8)
1578
1579 DO_2SN_64(VQRSHRN_U64, gen_helper_neon_rshl_u64, gen_helper_neon_narrow_sat_u32)
1580 DO_2SN_32(VQRSHRN_U32, gen_helper_neon_rshl_u32, gen_helper_neon_narrow_sat_u16)
1581 DO_2SN_32(VQRSHRN_U16, gen_helper_neon_rshl_u16, gen_helper_neon_narrow_sat_u8)
1582
1583 static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
1584 NeonGenWidenFn *widenfn, bool u)
1585 {
1586 TCGv_i64 tmp;
1587 TCGv_i32 rm0, rm1;
1588 uint64_t widen_mask = 0;
1589
1590 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1591 return false;
1592 }
1593
1594 /* UNDEF accesses to D16-D31 if they don't exist. */
1595 if (!dc_isar_feature(aa32_simd_r32, s) &&
1596 ((a->vd | a->vm) & 0x10)) {
1597 return false;
1598 }
1599
1600 if (a->vd & 1) {
1601 return false;
1602 }
1603
1604 if (!vfp_access_check(s)) {
1605 return true;
1606 }
1607
1608 /*
1609 * This is a widen-and-shift operation. The shift is always less
1610 * than the width of the source type, so after widening the input
1611 * vector we can simply shift the whole 64-bit widened register,
1612 * and then clear the potential overflow bits resulting from left
1613 * bits of the narrow input appearing as right bits of the left
1614 * neighbour narrow input. Calculate a mask of bits to clear.
1615 */
1616 if ((a->shift != 0) && (a->size < 2 || u)) {
1617 int esize = 8 << a->size;
1618 widen_mask = MAKE_64BIT_MASK(0, esize);
1619 widen_mask >>= esize - a->shift;
1620 widen_mask = dup_const(a->size + 1, widen_mask);
1621 }
1622
1623 rm0 = neon_load_reg(a->vm, 0);
1624 rm1 = neon_load_reg(a->vm, 1);
1625 tmp = tcg_temp_new_i64();
1626
1627 widenfn(tmp, rm0);
1628 tcg_temp_free_i32(rm0);
1629 if (a->shift != 0) {
1630 tcg_gen_shli_i64(tmp, tmp, a->shift);
1631 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1632 }
1633 neon_store_reg64(tmp, a->vd);
1634
1635 widenfn(tmp, rm1);
1636 tcg_temp_free_i32(rm1);
1637 if (a->shift != 0) {
1638 tcg_gen_shli_i64(tmp, tmp, a->shift);
1639 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1640 }
1641 neon_store_reg64(tmp, a->vd + 1);
1642 tcg_temp_free_i64(tmp);
1643 return true;
1644 }
1645
1646 static bool trans_VSHLL_S_2sh(DisasContext *s, arg_2reg_shift *a)
1647 {
1648 static NeonGenWidenFn * const widenfn[] = {
1649 gen_helper_neon_widen_s8,
1650 gen_helper_neon_widen_s16,
1651 tcg_gen_ext_i32_i64,
1652 };
1653 return do_vshll_2sh(s, a, widenfn[a->size], false);
1654 }
1655
1656 static bool trans_VSHLL_U_2sh(DisasContext *s, arg_2reg_shift *a)
1657 {
1658 static NeonGenWidenFn * const widenfn[] = {
1659 gen_helper_neon_widen_u8,
1660 gen_helper_neon_widen_u16,
1661 tcg_gen_extu_i32_i64,
1662 };
1663 return do_vshll_2sh(s, a, widenfn[a->size], true);
1664 }
1665
1666 static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
1667 NeonGenTwoSingleOpFn *fn)
1668 {
1669 /* FP operations in 2-reg-and-shift group */
1670 TCGv_i32 tmp, shiftv;
1671 TCGv_ptr fpstatus;
1672 int pass;
1673
1674 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1675 return false;
1676 }
1677
1678 /* UNDEF accesses to D16-D31 if they don't exist. */
1679 if (!dc_isar_feature(aa32_simd_r32, s) &&
1680 ((a->vd | a->vm) & 0x10)) {
1681 return false;
1682 }
1683
1684 if ((a->vm | a->vd) & a->q) {
1685 return false;
1686 }
1687
1688 if (!vfp_access_check(s)) {
1689 return true;
1690 }
1691
1692 fpstatus = get_fpstatus_ptr(1);
1693 shiftv = tcg_const_i32(a->shift);
1694 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
1695 tmp = neon_load_reg(a->vm, pass);
1696 fn(tmp, tmp, shiftv, fpstatus);
1697 neon_store_reg(a->vd, pass, tmp);
1698 }
1699 tcg_temp_free_ptr(fpstatus);
1700 tcg_temp_free_i32(shiftv);
1701 return true;
1702 }
1703
1704 #define DO_FP_2SH(INSN, FUNC) \
1705 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1706 { \
1707 return do_fp_2sh(s, a, FUNC); \
1708 }
1709
1710 DO_FP_2SH(VCVT_SF, gen_helper_vfp_sltos)
1711 DO_FP_2SH(VCVT_UF, gen_helper_vfp_ultos)
1712 DO_FP_2SH(VCVT_FS, gen_helper_vfp_tosls_round_to_zero)
1713 DO_FP_2SH(VCVT_FU, gen_helper_vfp_touls_round_to_zero)
1714
1715 static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
1716 {
1717 /*
1718 * Expand the encoded constant.
1719 * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
1720 * We choose to not special-case this and will behave as if a
1721 * valid constant encoding of 0 had been given.
1722 * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
1723 */
1724 switch (cmode) {
1725 case 0: case 1:
1726 /* no-op */
1727 break;
1728 case 2: case 3:
1729 imm <<= 8;
1730 break;
1731 case 4: case 5:
1732 imm <<= 16;
1733 break;
1734 case 6: case 7:
1735 imm <<= 24;
1736 break;
1737 case 8: case 9:
1738 imm |= imm << 16;
1739 break;
1740 case 10: case 11:
1741 imm = (imm << 8) | (imm << 24);
1742 break;
1743 case 12:
1744 imm = (imm << 8) | 0xff;
1745 break;
1746 case 13:
1747 imm = (imm << 16) | 0xffff;
1748 break;
1749 case 14:
1750 if (op) {
1751 /*
1752 * This is the only case where the top and bottom 32 bits
1753 * of the encoded constant differ.
1754 */
1755 uint64_t imm64 = 0;
1756 int n;
1757
1758 for (n = 0; n < 8; n++) {
1759 if (imm & (1 << n)) {
1760 imm64 |= (0xffULL << (n * 8));
1761 }
1762 }
1763 return imm64;
1764 }
1765 imm |= (imm << 8) | (imm << 16) | (imm << 24);
1766 break;
1767 case 15:
1768 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
1769 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
1770 break;
1771 }
1772 if (op) {
1773 imm = ~imm;
1774 }
1775 return dup_const(MO_32, imm);
1776 }
1777
1778 static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
1779 GVecGen2iFn *fn)
1780 {
1781 uint64_t imm;
1782 int reg_ofs, vec_size;
1783
1784 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1785 return false;
1786 }
1787
1788 /* UNDEF accesses to D16-D31 if they don't exist. */
1789 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
1790 return false;
1791 }
1792
1793 if (a->vd & a->q) {
1794 return false;
1795 }
1796
1797 if (!vfp_access_check(s)) {
1798 return true;
1799 }
1800
1801 reg_ofs = neon_reg_offset(a->vd, 0);
1802 vec_size = a->q ? 16 : 8;
1803 imm = asimd_imm_const(a->imm, a->cmode, a->op);
1804
1805 fn(MO_64, reg_ofs, reg_ofs, imm, vec_size, vec_size);
1806 return true;
1807 }
1808
1809 static void gen_VMOV_1r(unsigned vece, uint32_t dofs, uint32_t aofs,
1810 int64_t c, uint32_t oprsz, uint32_t maxsz)
1811 {
1812 tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
1813 }
1814
1815 static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
1816 {
1817 /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
1818 GVecGen2iFn *fn;
1819
1820 if ((a->cmode & 1) && a->cmode < 12) {
1821 /* for op=1, the imm will be inverted, so BIC becomes AND. */
1822 fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
1823 } else {
1824 /* There is one unallocated cmode/op combination in this space */
1825 if (a->cmode == 15 && a->op == 1) {
1826 return false;
1827 }
1828 fn = gen_VMOV_1r;
1829 }
1830 return do_1reg_imm(s, a, fn);
1831 }
1832
1833 static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
1834 NeonGenWidenFn *widenfn,
1835 NeonGenTwo64OpFn *opfn,
1836 bool src1_wide)
1837 {
1838 /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
1839 TCGv_i64 rn0_64, rn1_64, rm_64;
1840 TCGv_i32 rm;
1841
1842 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1843 return false;
1844 }
1845
1846 /* UNDEF accesses to D16-D31 if they don't exist. */
1847 if (!dc_isar_feature(aa32_simd_r32, s) &&
1848 ((a->vd | a->vn | a->vm) & 0x10)) {
1849 return false;
1850 }
1851
1852 if (!widenfn || !opfn) {
1853 /* size == 3 case, which is an entirely different insn group */
1854 return false;
1855 }
1856
1857 if ((a->vd & 1) || (src1_wide && (a->vn & 1))) {
1858 return false;
1859 }
1860
1861 if (!vfp_access_check(s)) {
1862 return true;
1863 }
1864
1865 rn0_64 = tcg_temp_new_i64();
1866 rn1_64 = tcg_temp_new_i64();
1867 rm_64 = tcg_temp_new_i64();
1868
1869 if (src1_wide) {
1870 neon_load_reg64(rn0_64, a->vn);
1871 } else {
1872 TCGv_i32 tmp = neon_load_reg(a->vn, 0);
1873 widenfn(rn0_64, tmp);
1874 tcg_temp_free_i32(tmp);
1875 }
1876 rm = neon_load_reg(a->vm, 0);
1877
1878 widenfn(rm_64, rm);
1879 tcg_temp_free_i32(rm);
1880 opfn(rn0_64, rn0_64, rm_64);
1881
1882 /*
1883 * Load second pass inputs before storing the first pass result, to
1884 * avoid incorrect results if a narrow input overlaps with the result.
1885 */
1886 if (src1_wide) {
1887 neon_load_reg64(rn1_64, a->vn + 1);
1888 } else {
1889 TCGv_i32 tmp = neon_load_reg(a->vn, 1);
1890 widenfn(rn1_64, tmp);
1891 tcg_temp_free_i32(tmp);
1892 }
1893 rm = neon_load_reg(a->vm, 1);
1894
1895 neon_store_reg64(rn0_64, a->vd);
1896
1897 widenfn(rm_64, rm);
1898 tcg_temp_free_i32(rm);
1899 opfn(rn1_64, rn1_64, rm_64);
1900 neon_store_reg64(rn1_64, a->vd + 1);
1901
1902 tcg_temp_free_i64(rn0_64);
1903 tcg_temp_free_i64(rn1_64);
1904 tcg_temp_free_i64(rm_64);
1905
1906 return true;
1907 }
1908
1909 #define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
1910 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1911 { \
1912 static NeonGenWidenFn * const widenfn[] = { \
1913 gen_helper_neon_widen_##S##8, \
1914 gen_helper_neon_widen_##S##16, \
1915 tcg_gen_##EXT##_i32_i64, \
1916 NULL, \
1917 }; \
1918 static NeonGenTwo64OpFn * const addfn[] = { \
1919 gen_helper_neon_##OP##l_u16, \
1920 gen_helper_neon_##OP##l_u32, \
1921 tcg_gen_##OP##_i64, \
1922 NULL, \
1923 }; \
1924 return do_prewiden_3d(s, a, widenfn[a->size], \
1925 addfn[a->size], SRC1WIDE); \
1926 }
1927
1928 DO_PREWIDEN(VADDL_S, s, ext, add, false)
1929 DO_PREWIDEN(VADDL_U, u, extu, add, false)
1930 DO_PREWIDEN(VSUBL_S, s, ext, sub, false)
1931 DO_PREWIDEN(VSUBL_U, u, extu, sub, false)
1932 DO_PREWIDEN(VADDW_S, s, ext, add, true)
1933 DO_PREWIDEN(VADDW_U, u, extu, add, true)
1934 DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
1935 DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
1936
1937 static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
1938 NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
1939 {
1940 /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
1941 TCGv_i64 rn_64, rm_64;
1942 TCGv_i32 rd0, rd1;
1943
1944 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1945 return false;
1946 }
1947
1948 /* UNDEF accesses to D16-D31 if they don't exist. */
1949 if (!dc_isar_feature(aa32_simd_r32, s) &&
1950 ((a->vd | a->vn | a->vm) & 0x10)) {
1951 return false;
1952 }
1953
1954 if (!opfn || !narrowfn) {
1955 /* size == 3 case, which is an entirely different insn group */
1956 return false;
1957 }
1958
1959 if ((a->vn | a->vm) & 1) {
1960 return false;
1961 }
1962
1963 if (!vfp_access_check(s)) {
1964 return true;
1965 }
1966
1967 rn_64 = tcg_temp_new_i64();
1968 rm_64 = tcg_temp_new_i64();
1969 rd0 = tcg_temp_new_i32();
1970 rd1 = tcg_temp_new_i32();
1971
1972 neon_load_reg64(rn_64, a->vn);
1973 neon_load_reg64(rm_64, a->vm);
1974
1975 opfn(rn_64, rn_64, rm_64);
1976
1977 narrowfn(rd0, rn_64);
1978
1979 neon_load_reg64(rn_64, a->vn + 1);
1980 neon_load_reg64(rm_64, a->vm + 1);
1981
1982 opfn(rn_64, rn_64, rm_64);
1983
1984 narrowfn(rd1, rn_64);
1985
1986 neon_store_reg(a->vd, 0, rd0);
1987 neon_store_reg(a->vd, 1, rd1);
1988
1989 tcg_temp_free_i64(rn_64);
1990 tcg_temp_free_i64(rm_64);
1991
1992 return true;
1993 }
1994
1995 #define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
1996 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1997 { \
1998 static NeonGenTwo64OpFn * const addfn[] = { \
1999 gen_helper_neon_##OP##l_u16, \
2000 gen_helper_neon_##OP##l_u32, \
2001 tcg_gen_##OP##_i64, \
2002 NULL, \
2003 }; \
2004 static NeonGenNarrowFn * const narrowfn[] = { \
2005 gen_helper_neon_##NARROWTYPE##_high_u8, \
2006 gen_helper_neon_##NARROWTYPE##_high_u16, \
2007 EXTOP, \
2008 NULL, \
2009 }; \
2010 return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
2011 }
2012
2013 static void gen_narrow_round_high_u32(TCGv_i32 rd, TCGv_i64 rn)
2014 {
2015 tcg_gen_addi_i64(rn, rn, 1u << 31);
2016 tcg_gen_extrh_i64_i32(rd, rn);
2017 }
2018
2019 DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
2020 DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
2021 DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
2022 DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)
2023
2024 static bool do_long_3d(DisasContext *s, arg_3diff *a,
2025 NeonGenTwoOpWidenFn *opfn,
2026 NeonGenTwo64OpFn *accfn)
2027 {
2028 /*
2029 * 3-regs different lengths, long operations.
2030 * These perform an operation on two inputs that returns a double-width
2031 * result, and then possibly perform an accumulation operation of
2032 * that result into the double-width destination.
2033 */
2034 TCGv_i64 rd0, rd1, tmp;
2035 TCGv_i32 rn, rm;
2036
2037 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2038 return false;
2039 }
2040
2041 /* UNDEF accesses to D16-D31 if they don't exist. */
2042 if (!dc_isar_feature(aa32_simd_r32, s) &&
2043 ((a->vd | a->vn | a->vm) & 0x10)) {
2044 return false;
2045 }
2046
2047 if (!opfn) {
2048 /* size == 3 case, which is an entirely different insn group */
2049 return false;
2050 }
2051
2052 if (a->vd & 1) {
2053 return false;
2054 }
2055
2056 if (!vfp_access_check(s)) {
2057 return true;
2058 }
2059
2060 rd0 = tcg_temp_new_i64();
2061 rd1 = tcg_temp_new_i64();
2062
2063 rn = neon_load_reg(a->vn, 0);
2064 rm = neon_load_reg(a->vm, 0);
2065 opfn(rd0, rn, rm);
2066 tcg_temp_free_i32(rn);
2067 tcg_temp_free_i32(rm);
2068
2069 rn = neon_load_reg(a->vn, 1);
2070 rm = neon_load_reg(a->vm, 1);
2071 opfn(rd1, rn, rm);
2072 tcg_temp_free_i32(rn);
2073 tcg_temp_free_i32(rm);
2074
2075 /* Don't store results until after all loads: they might overlap */
2076 if (accfn) {
2077 tmp = tcg_temp_new_i64();
2078 neon_load_reg64(tmp, a->vd);
2079 accfn(tmp, tmp, rd0);
2080 neon_store_reg64(tmp, a->vd);
2081 neon_load_reg64(tmp, a->vd + 1);
2082 accfn(tmp, tmp, rd1);
2083 neon_store_reg64(tmp, a->vd + 1);
2084 tcg_temp_free_i64(tmp);
2085 } else {
2086 neon_store_reg64(rd0, a->vd);
2087 neon_store_reg64(rd1, a->vd + 1);
2088 }
2089
2090 tcg_temp_free_i64(rd0);
2091 tcg_temp_free_i64(rd1);
2092
2093 return true;
2094 }
2095
2096 static bool trans_VABDL_S_3d(DisasContext *s, arg_3diff *a)
2097 {
2098 static NeonGenTwoOpWidenFn * const opfn[] = {
2099 gen_helper_neon_abdl_s16,
2100 gen_helper_neon_abdl_s32,
2101 gen_helper_neon_abdl_s64,
2102 NULL,
2103 };
2104
2105 return do_long_3d(s, a, opfn[a->size], NULL);
2106 }
2107
2108 static bool trans_VABDL_U_3d(DisasContext *s, arg_3diff *a)
2109 {
2110 static NeonGenTwoOpWidenFn * const opfn[] = {
2111 gen_helper_neon_abdl_u16,
2112 gen_helper_neon_abdl_u32,
2113 gen_helper_neon_abdl_u64,
2114 NULL,
2115 };
2116
2117 return do_long_3d(s, a, opfn[a->size], NULL);
2118 }
2119
2120 static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
2121 {
2122 static NeonGenTwoOpWidenFn * const opfn[] = {
2123 gen_helper_neon_abdl_s16,
2124 gen_helper_neon_abdl_s32,
2125 gen_helper_neon_abdl_s64,
2126 NULL,
2127 };
2128 static NeonGenTwo64OpFn * const addfn[] = {
2129 gen_helper_neon_addl_u16,
2130 gen_helper_neon_addl_u32,
2131 tcg_gen_add_i64,
2132 NULL,
2133 };
2134
2135 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
2136 }
2137
2138 static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
2139 {
2140 static NeonGenTwoOpWidenFn * const opfn[] = {
2141 gen_helper_neon_abdl_u16,
2142 gen_helper_neon_abdl_u32,
2143 gen_helper_neon_abdl_u64,
2144 NULL,
2145 };
2146 static NeonGenTwo64OpFn * const addfn[] = {
2147 gen_helper_neon_addl_u16,
2148 gen_helper_neon_addl_u32,
2149 tcg_gen_add_i64,
2150 NULL,
2151 };
2152
2153 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
2154 }
2155
2156 static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2157 {
2158 TCGv_i32 lo = tcg_temp_new_i32();
2159 TCGv_i32 hi = tcg_temp_new_i32();
2160
2161 tcg_gen_muls2_i32(lo, hi, rn, rm);
2162 tcg_gen_concat_i32_i64(rd, lo, hi);
2163
2164 tcg_temp_free_i32(lo);
2165 tcg_temp_free_i32(hi);
2166 }
2167
2168 static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2169 {
2170 TCGv_i32 lo = tcg_temp_new_i32();
2171 TCGv_i32 hi = tcg_temp_new_i32();
2172
2173 tcg_gen_mulu2_i32(lo, hi, rn, rm);
2174 tcg_gen_concat_i32_i64(rd, lo, hi);
2175
2176 tcg_temp_free_i32(lo);
2177 tcg_temp_free_i32(hi);
2178 }
2179
2180 static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a)
2181 {
2182 static NeonGenTwoOpWidenFn * const opfn[] = {
2183 gen_helper_neon_mull_s8,
2184 gen_helper_neon_mull_s16,
2185 gen_mull_s32,
2186 NULL,
2187 };
2188
2189 return do_long_3d(s, a, opfn[a->size], NULL);
2190 }
2191
2192 static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a)
2193 {
2194 static NeonGenTwoOpWidenFn * const opfn[] = {
2195 gen_helper_neon_mull_u8,
2196 gen_helper_neon_mull_u16,
2197 gen_mull_u32,
2198 NULL,
2199 };
2200
2201 return do_long_3d(s, a, opfn[a->size], NULL);
2202 }
2203
2204 #define DO_VMLAL(INSN,MULL,ACC) \
2205 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
2206 { \
2207 static NeonGenTwoOpWidenFn * const opfn[] = { \
2208 gen_helper_neon_##MULL##8, \
2209 gen_helper_neon_##MULL##16, \
2210 gen_##MULL##32, \
2211 NULL, \
2212 }; \
2213 static NeonGenTwo64OpFn * const accfn[] = { \
2214 gen_helper_neon_##ACC##l_u16, \
2215 gen_helper_neon_##ACC##l_u32, \
2216 tcg_gen_##ACC##_i64, \
2217 NULL, \
2218 }; \
2219 return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
2220 }
2221
2222 DO_VMLAL(VMLAL_S,mull_s,add)
2223 DO_VMLAL(VMLAL_U,mull_u,add)
2224 DO_VMLAL(VMLSL_S,mull_s,sub)
2225 DO_VMLAL(VMLSL_U,mull_u,sub)
2226
2227 static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2228 {
2229 gen_helper_neon_mull_s16(rd, rn, rm);
2230 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rd, rd);
2231 }
2232
2233 static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2234 {
2235 gen_mull_s32(rd, rn, rm);
2236 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rd, rd);
2237 }
2238
2239 static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
2240 {
2241 static NeonGenTwoOpWidenFn * const opfn[] = {
2242 NULL,
2243 gen_VQDMULL_16,
2244 gen_VQDMULL_32,
2245 NULL,
2246 };
2247
2248 return do_long_3d(s, a, opfn[a->size], NULL);
2249 }
2250
2251 static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2252 {
2253 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
2254 }
2255
2256 static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2257 {
2258 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
2259 }
2260
2261 static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
2262 {
2263 static NeonGenTwoOpWidenFn * const opfn[] = {
2264 NULL,
2265 gen_VQDMULL_16,
2266 gen_VQDMULL_32,
2267 NULL,
2268 };
2269 static NeonGenTwo64OpFn * const accfn[] = {
2270 NULL,
2271 gen_VQDMLAL_acc_16,
2272 gen_VQDMLAL_acc_32,
2273 NULL,
2274 };
2275
2276 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
2277 }
2278
2279 static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2280 {
2281 gen_helper_neon_negl_u32(rm, rm);
2282 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
2283 }
2284
2285 static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2286 {
2287 tcg_gen_neg_i64(rm, rm);
2288 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
2289 }
2290
2291 static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
2292 {
2293 static NeonGenTwoOpWidenFn * const opfn[] = {
2294 NULL,
2295 gen_VQDMULL_16,
2296 gen_VQDMULL_32,
2297 NULL,
2298 };
2299 static NeonGenTwo64OpFn * const accfn[] = {
2300 NULL,
2301 gen_VQDMLSL_acc_16,
2302 gen_VQDMLSL_acc_32,
2303 NULL,
2304 };
2305
2306 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
2307 }
2308
2309 static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
2310 {
2311 gen_helper_gvec_3 *fn_gvec;
2312
2313 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2314 return false;
2315 }
2316
2317 /* UNDEF accesses to D16-D31 if they don't exist. */
2318 if (!dc_isar_feature(aa32_simd_r32, s) &&
2319 ((a->vd | a->vn | a->vm) & 0x10)) {
2320 return false;
2321 }
2322
2323 if (a->vd & 1) {
2324 return false;
2325 }
2326
2327 switch (a->size) {
2328 case 0:
2329 fn_gvec = gen_helper_neon_pmull_h;
2330 break;
2331 case 2:
2332 if (!dc_isar_feature(aa32_pmull, s)) {
2333 return false;
2334 }
2335 fn_gvec = gen_helper_gvec_pmull_q;
2336 break;
2337 default:
2338 return false;
2339 }
2340
2341 if (!vfp_access_check(s)) {
2342 return true;
2343 }
2344
2345 tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0),
2346 neon_reg_offset(a->vn, 0),
2347 neon_reg_offset(a->vm, 0),
2348 16, 16, 0, fn_gvec);
2349 return true;
2350 }
2351
2352 static void gen_neon_dup_low16(TCGv_i32 var)
2353 {
2354 TCGv_i32 tmp = tcg_temp_new_i32();
2355 tcg_gen_ext16u_i32(var, var);
2356 tcg_gen_shli_i32(tmp, var, 16);
2357 tcg_gen_or_i32(var, var, tmp);
2358 tcg_temp_free_i32(tmp);
2359 }
2360
2361 static void gen_neon_dup_high16(TCGv_i32 var)
2362 {
2363 TCGv_i32 tmp = tcg_temp_new_i32();
2364 tcg_gen_andi_i32(var, var, 0xffff0000);
2365 tcg_gen_shri_i32(tmp, var, 16);
2366 tcg_gen_or_i32(var, var, tmp);
2367 tcg_temp_free_i32(tmp);
2368 }
2369
2370 static inline TCGv_i32 neon_get_scalar(int size, int reg)
2371 {
2372 TCGv_i32 tmp;
2373 if (size == 1) {
2374 tmp = neon_load_reg(reg & 7, reg >> 4);
2375 if (reg & 8) {
2376 gen_neon_dup_high16(tmp);
2377 } else {
2378 gen_neon_dup_low16(tmp);
2379 }
2380 } else {
2381 tmp = neon_load_reg(reg & 15, reg >> 4);
2382 }
2383 return tmp;
2384 }
2385
2386 static bool do_2scalar(DisasContext *s, arg_2scalar *a,
2387 NeonGenTwoOpFn *opfn, NeonGenTwoOpFn *accfn)
2388 {
2389 /*
2390 * Two registers and a scalar: perform an operation between
2391 * the input elements and the scalar, and then possibly
2392 * perform an accumulation operation of that result into the
2393 * destination.
2394 */
2395 TCGv_i32 scalar;
2396 int pass;
2397
2398 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2399 return false;
2400 }
2401
2402 /* UNDEF accesses to D16-D31 if they don't exist. */
2403 if (!dc_isar_feature(aa32_simd_r32, s) &&
2404 ((a->vd | a->vn | a->vm) & 0x10)) {
2405 return false;
2406 }
2407
2408 if (!opfn) {
2409 /* Bad size (including size == 3, which is a different insn group) */
2410 return false;
2411 }
2412
2413 if (a->q && ((a->vd | a->vn) & 1)) {
2414 return false;
2415 }
2416
2417 if (!vfp_access_check(s)) {
2418 return true;
2419 }
2420
2421 scalar = neon_get_scalar(a->size, a->vm);
2422
2423 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
2424 TCGv_i32 tmp = neon_load_reg(a->vn, pass);
2425 opfn(tmp, tmp, scalar);
2426 if (accfn) {
2427 TCGv_i32 rd = neon_load_reg(a->vd, pass);
2428 accfn(tmp, rd, tmp);
2429 tcg_temp_free_i32(rd);
2430 }
2431 neon_store_reg(a->vd, pass, tmp);
2432 }
2433 tcg_temp_free_i32(scalar);
2434 return true;
2435 }
2436
2437 static bool trans_VMUL_2sc(DisasContext *s, arg_2scalar *a)
2438 {
2439 static NeonGenTwoOpFn * const opfn[] = {
2440 NULL,
2441 gen_helper_neon_mul_u16,
2442 tcg_gen_mul_i32,
2443 NULL,
2444 };
2445
2446 return do_2scalar(s, a, opfn[a->size], NULL);
2447 }
2448
2449 static bool trans_VMLA_2sc(DisasContext *s, arg_2scalar *a)
2450 {
2451 static NeonGenTwoOpFn * const opfn[] = {
2452 NULL,
2453 gen_helper_neon_mul_u16,
2454 tcg_gen_mul_i32,
2455 NULL,
2456 };
2457 static NeonGenTwoOpFn * const accfn[] = {
2458 NULL,
2459 gen_helper_neon_add_u16,
2460 tcg_gen_add_i32,
2461 NULL,
2462 };
2463
2464 return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
2465 }
2466
2467 static bool trans_VMLS_2sc(DisasContext *s, arg_2scalar *a)
2468 {
2469 static NeonGenTwoOpFn * const opfn[] = {
2470 NULL,
2471 gen_helper_neon_mul_u16,
2472 tcg_gen_mul_i32,
2473 NULL,
2474 };
2475 static NeonGenTwoOpFn * const accfn[] = {
2476 NULL,
2477 gen_helper_neon_sub_u16,
2478 tcg_gen_sub_i32,
2479 NULL,
2480 };
2481
2482 return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
2483 }
2484
2485 /*
2486 * Rather than have a float-specific version of do_2scalar just for
2487 * three insns, we wrap a NeonGenTwoSingleOpFn to turn it into
2488 * a NeonGenTwoOpFn.
2489 */
2490 #define WRAP_FP_FN(WRAPNAME, FUNC) \
2491 static void WRAPNAME(TCGv_i32 rd, TCGv_i32 rn, TCGv_i32 rm) \
2492 { \
2493 TCGv_ptr fpstatus = get_fpstatus_ptr(1); \
2494 FUNC(rd, rn, rm, fpstatus); \
2495 tcg_temp_free_ptr(fpstatus); \
2496 }
2497
2498 WRAP_FP_FN(gen_VMUL_F_mul, gen_helper_vfp_muls)
2499 WRAP_FP_FN(gen_VMUL_F_add, gen_helper_vfp_adds)
2500 WRAP_FP_FN(gen_VMUL_F_sub, gen_helper_vfp_subs)
2501
2502 static bool trans_VMUL_F_2sc(DisasContext *s, arg_2scalar *a)
2503 {
2504 static NeonGenTwoOpFn * const opfn[] = {
2505 NULL,
2506 NULL, /* TODO: fp16 support */
2507 gen_VMUL_F_mul,
2508 NULL,
2509 };
2510
2511 return do_2scalar(s, a, opfn[a->size], NULL);
2512 }
2513
2514 static bool trans_VMLA_F_2sc(DisasContext *s, arg_2scalar *a)
2515 {
2516 static NeonGenTwoOpFn * const opfn[] = {
2517 NULL,
2518 NULL, /* TODO: fp16 support */
2519 gen_VMUL_F_mul,
2520 NULL,
2521 };
2522 static NeonGenTwoOpFn * const accfn[] = {
2523 NULL,
2524 NULL, /* TODO: fp16 support */
2525 gen_VMUL_F_add,
2526 NULL,
2527 };
2528
2529 return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
2530 }
2531
2532 static bool trans_VMLS_F_2sc(DisasContext *s, arg_2scalar *a)
2533 {
2534 static NeonGenTwoOpFn * const opfn[] = {
2535 NULL,
2536 NULL, /* TODO: fp16 support */
2537 gen_VMUL_F_mul,
2538 NULL,
2539 };
2540 static NeonGenTwoOpFn * const accfn[] = {
2541 NULL,
2542 NULL, /* TODO: fp16 support */
2543 gen_VMUL_F_sub,
2544 NULL,
2545 };
2546
2547 return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
2548 }
2549
2550 WRAP_ENV_FN(gen_VQDMULH_16, gen_helper_neon_qdmulh_s16)
2551 WRAP_ENV_FN(gen_VQDMULH_32, gen_helper_neon_qdmulh_s32)
2552 WRAP_ENV_FN(gen_VQRDMULH_16, gen_helper_neon_qrdmulh_s16)
2553 WRAP_ENV_FN(gen_VQRDMULH_32, gen_helper_neon_qrdmulh_s32)
2554
2555 static bool trans_VQDMULH_2sc(DisasContext *s, arg_2scalar *a)
2556 {
2557 static NeonGenTwoOpFn * const opfn[] = {
2558 NULL,
2559 gen_VQDMULH_16,
2560 gen_VQDMULH_32,
2561 NULL,
2562 };
2563
2564 return do_2scalar(s, a, opfn[a->size], NULL);
2565 }
2566
2567 static bool trans_VQRDMULH_2sc(DisasContext *s, arg_2scalar *a)
2568 {
2569 static NeonGenTwoOpFn * const opfn[] = {
2570 NULL,
2571 gen_VQRDMULH_16,
2572 gen_VQRDMULH_32,
2573 NULL,
2574 };
2575
2576 return do_2scalar(s, a, opfn[a->size], NULL);
2577 }
2578
2579 static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
2580 NeonGenThreeOpEnvFn *opfn)
2581 {
2582 /*
2583 * VQRDMLAH/VQRDMLSH: this is like do_2scalar, but the opfn
2584 * performs a kind of fused op-then-accumulate using a helper
2585 * function that takes all of rd, rn and the scalar at once.
2586 */
2587 TCGv_i32 scalar;
2588 int pass;
2589
2590 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2591 return false;
2592 }
2593
2594 if (!dc_isar_feature(aa32_rdm, s)) {
2595 return false;
2596 }
2597
2598 /* UNDEF accesses to D16-D31 if they don't exist. */
2599 if (!dc_isar_feature(aa32_simd_r32, s) &&
2600 ((a->vd | a->vn | a->vm) & 0x10)) {
2601 return false;
2602 }
2603
2604 if (!opfn) {
2605 /* Bad size (including size == 3, which is a different insn group) */
2606 return false;
2607 }
2608
2609 if (a->q && ((a->vd | a->vn) & 1)) {
2610 return false;
2611 }
2612
2613 if (!vfp_access_check(s)) {
2614 return true;
2615 }
2616
2617 scalar = neon_get_scalar(a->size, a->vm);
2618
2619 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
2620 TCGv_i32 rn = neon_load_reg(a->vn, pass);
2621 TCGv_i32 rd = neon_load_reg(a->vd, pass);
2622 opfn(rd, cpu_env, rn, scalar, rd);
2623 tcg_temp_free_i32(rn);
2624 neon_store_reg(a->vd, pass, rd);
2625 }
2626 tcg_temp_free_i32(scalar);
2627
2628 return true;
2629 }
2630
2631 static bool trans_VQRDMLAH_2sc(DisasContext *s, arg_2scalar *a)
2632 {
2633 static NeonGenThreeOpEnvFn *opfn[] = {
2634 NULL,
2635 gen_helper_neon_qrdmlah_s16,
2636 gen_helper_neon_qrdmlah_s32,
2637 NULL,
2638 };
2639 return do_vqrdmlah_2sc(s, a, opfn[a->size]);
2640 }
2641
2642 static bool trans_VQRDMLSH_2sc(DisasContext *s, arg_2scalar *a)
2643 {
2644 static NeonGenThreeOpEnvFn *opfn[] = {
2645 NULL,
2646 gen_helper_neon_qrdmlsh_s16,
2647 gen_helper_neon_qrdmlsh_s32,
2648 NULL,
2649 };
2650 return do_vqrdmlah_2sc(s, a, opfn[a->size]);
2651 }
2652
2653 static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
2654 NeonGenTwoOpWidenFn *opfn,
2655 NeonGenTwo64OpFn *accfn)
2656 {
2657 /*
2658 * Two registers and a scalar, long operations: perform an
2659 * operation on the input elements and the scalar which produces
2660 * a double-width result, and then possibly perform an accumulation
2661 * operation of that result into the destination.
2662 */
2663 TCGv_i32 scalar, rn;
2664 TCGv_i64 rn0_64, rn1_64;
2665
2666 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2667 return false;
2668 }
2669
2670 /* UNDEF accesses to D16-D31 if they don't exist. */
2671 if (!dc_isar_feature(aa32_simd_r32, s) &&
2672 ((a->vd | a->vn | a->vm) & 0x10)) {
2673 return false;
2674 }
2675
2676 if (!opfn) {
2677 /* Bad size (including size == 3, which is a different insn group) */
2678 return false;
2679 }
2680
2681 if (a->vd & 1) {
2682 return false;
2683 }
2684
2685 if (!vfp_access_check(s)) {
2686 return true;
2687 }
2688
2689 scalar = neon_get_scalar(a->size, a->vm);
2690
2691 /* Load all inputs before writing any outputs, in case of overlap */
2692 rn = neon_load_reg(a->vn, 0);
2693 rn0_64 = tcg_temp_new_i64();
2694 opfn(rn0_64, rn, scalar);
2695 tcg_temp_free_i32(rn);
2696
2697 rn = neon_load_reg(a->vn, 1);
2698 rn1_64 = tcg_temp_new_i64();
2699 opfn(rn1_64, rn, scalar);
2700 tcg_temp_free_i32(rn);
2701 tcg_temp_free_i32(scalar);
2702
2703 if (accfn) {
2704 TCGv_i64 t64 = tcg_temp_new_i64();
2705 neon_load_reg64(t64, a->vd);
2706 accfn(t64, t64, rn0_64);
2707 neon_store_reg64(t64, a->vd);
2708 neon_load_reg64(t64, a->vd + 1);
2709 accfn(t64, t64, rn1_64);
2710 neon_store_reg64(t64, a->vd + 1);
2711 tcg_temp_free_i64(t64);
2712 } else {
2713 neon_store_reg64(rn0_64, a->vd);
2714 neon_store_reg64(rn1_64, a->vd + 1);
2715 }
2716 tcg_temp_free_i64(rn0_64);
2717 tcg_temp_free_i64(rn1_64);
2718 return true;
2719 }
2720
2721 static bool trans_VMULL_S_2sc(DisasContext *s, arg_2scalar *a)
2722 {
2723 static NeonGenTwoOpWidenFn * const opfn[] = {
2724 NULL,
2725 gen_helper_neon_mull_s16,
2726 gen_mull_s32,
2727 NULL,
2728 };
2729
2730 return do_2scalar_long(s, a, opfn[a->size], NULL);
2731 }
2732
2733 static bool trans_VMULL_U_2sc(DisasContext *s, arg_2scalar *a)
2734 {
2735 static NeonGenTwoOpWidenFn * const opfn[] = {
2736 NULL,
2737 gen_helper_neon_mull_u16,
2738 gen_mull_u32,
2739 NULL,
2740 };
2741
2742 return do_2scalar_long(s, a, opfn[a->size], NULL);
2743 }
2744
2745 #define DO_VMLAL_2SC(INSN, MULL, ACC) \
2746 static bool trans_##INSN##_2sc(DisasContext *s, arg_2scalar *a) \
2747 { \
2748 static NeonGenTwoOpWidenFn * const opfn[] = { \
2749 NULL, \
2750 gen_helper_neon_##MULL##16, \
2751 gen_##MULL##32, \
2752 NULL, \
2753 }; \
2754 static NeonGenTwo64OpFn * const accfn[] = { \
2755 NULL, \
2756 gen_helper_neon_##ACC##l_u32, \
2757 tcg_gen_##ACC##_i64, \
2758 NULL, \
2759 }; \
2760 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]); \
2761 }
2762
2763 DO_VMLAL_2SC(VMLAL_S, mull_s, add)
2764 DO_VMLAL_2SC(VMLAL_U, mull_u, add)
2765 DO_VMLAL_2SC(VMLSL_S, mull_s, sub)
2766 DO_VMLAL_2SC(VMLSL_U, mull_u, sub)
2767
2768 static bool trans_VQDMULL_2sc(DisasContext *s, arg_2scalar *a)
2769 {
2770 static NeonGenTwoOpWidenFn * const opfn[] = {
2771 NULL,
2772 gen_VQDMULL_16,
2773 gen_VQDMULL_32,
2774 NULL,
2775 };
2776
2777 return do_2scalar_long(s, a, opfn[a->size], NULL);
2778 }
2779
2780 static bool trans_VQDMLAL_2sc(DisasContext *s, arg_2scalar *a)
2781 {
2782 static NeonGenTwoOpWidenFn * const opfn[] = {
2783 NULL,
2784 gen_VQDMULL_16,
2785 gen_VQDMULL_32,
2786 NULL,
2787 };
2788 static NeonGenTwo64OpFn * const accfn[] = {
2789 NULL,
2790 gen_VQDMLAL_acc_16,
2791 gen_VQDMLAL_acc_32,
2792 NULL,
2793 };
2794
2795 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
2796 }
2797
2798 static bool trans_VQDMLSL_2sc(DisasContext *s, arg_2scalar *a)
2799 {
2800 static NeonGenTwoOpWidenFn * const opfn[] = {
2801 NULL,
2802 gen_VQDMULL_16,
2803 gen_VQDMULL_32,
2804 NULL,
2805 };
2806 static NeonGenTwo64OpFn * const accfn[] = {
2807 NULL,
2808 gen_VQDMLSL_acc_16,
2809 gen_VQDMLSL_acc_32,
2810 NULL,
2811 };
2812
2813 return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
2814 }
2815
2816 static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
2817 {
2818 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2819 return false;
2820 }
2821
2822 /* UNDEF accesses to D16-D31 if they don't exist. */
2823 if (!dc_isar_feature(aa32_simd_r32, s) &&
2824 ((a->vd | a->vn | a->vm) & 0x10)) {
2825 return false;
2826 }
2827
2828 if ((a->vn | a->vm | a->vd) & a->q) {
2829 return false;
2830 }
2831
2832 if (a->imm > 7 && !a->q) {
2833 return false;
2834 }
2835
2836 if (!vfp_access_check(s)) {
2837 return true;
2838 }
2839
2840 if (!a->q) {
2841 /* Extract 64 bits from <Vm:Vn> */
2842 TCGv_i64 left, right, dest;
2843
2844 left = tcg_temp_new_i64();
2845 right = tcg_temp_new_i64();
2846 dest = tcg_temp_new_i64();
2847
2848 neon_load_reg64(right, a->vn);
2849 neon_load_reg64(left, a->vm);
2850 tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
2851 neon_store_reg64(dest, a->vd);
2852
2853 tcg_temp_free_i64(left);
2854 tcg_temp_free_i64(right);
2855 tcg_temp_free_i64(dest);
2856 } else {
2857 /* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */
2858 TCGv_i64 left, middle, right, destleft, destright;
2859
2860 left = tcg_temp_new_i64();
2861 middle = tcg_temp_new_i64();
2862 right = tcg_temp_new_i64();
2863 destleft = tcg_temp_new_i64();
2864 destright = tcg_temp_new_i64();
2865
2866 if (a->imm < 8) {
2867 neon_load_reg64(right, a->vn);
2868 neon_load_reg64(middle, a->vn + 1);
2869 tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
2870 neon_load_reg64(left, a->vm);
2871 tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
2872 } else {
2873 neon_load_reg64(right, a->vn + 1);
2874 neon_load_reg64(middle, a->vm);
2875 tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
2876 neon_load_reg64(left, a->vm + 1);
2877 tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
2878 }
2879
2880 neon_store_reg64(destright, a->vd);
2881 neon_store_reg64(destleft, a->vd + 1);
2882
2883 tcg_temp_free_i64(destright);
2884 tcg_temp_free_i64(destleft);
2885 tcg_temp_free_i64(right);
2886 tcg_temp_free_i64(middle);
2887 tcg_temp_free_i64(left);
2888 }
2889 return true;
2890 }
2891
2892 static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
2893 {
2894 int n;
2895 TCGv_i32 tmp, tmp2, tmp3, tmp4;
2896 TCGv_ptr ptr1;
2897
2898 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2899 return false;
2900 }
2901
2902 /* UNDEF accesses to D16-D31 if they don't exist. */
2903 if (!dc_isar_feature(aa32_simd_r32, s) &&
2904 ((a->vd | a->vn | a->vm) & 0x10)) {
2905 return false;
2906 }
2907
2908 if (!vfp_access_check(s)) {
2909 return true;
2910 }
2911
2912 n = a->len + 1;
2913 if ((a->vn + n) > 32) {
2914 /*
2915 * This is UNPREDICTABLE; we choose to UNDEF to avoid the
2916 * helper function running off the end of the register file.
2917 */
2918 return false;
2919 }
2920 n <<= 3;
2921 if (a->op) {
2922 tmp = neon_load_reg(a->vd, 0);
2923 } else {
2924 tmp = tcg_temp_new_i32();
2925 tcg_gen_movi_i32(tmp, 0);
2926 }
2927 tmp2 = neon_load_reg(a->vm, 0);
2928 ptr1 = vfp_reg_ptr(true, a->vn);
2929 tmp4 = tcg_const_i32(n);
2930 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp4);
2931 tcg_temp_free_i32(tmp);
2932 if (a->op) {
2933 tmp = neon_load_reg(a->vd, 1);
2934 } else {
2935 tmp = tcg_temp_new_i32();
2936 tcg_gen_movi_i32(tmp, 0);
2937 }
2938 tmp3 = neon_load_reg(a->vm, 1);
2939 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp4);
2940 tcg_temp_free_i32(tmp4);
2941 tcg_temp_free_ptr(ptr1);
2942 neon_store_reg(a->vd, 0, tmp2);
2943 neon_store_reg(a->vd, 1, tmp3);
2944 tcg_temp_free_i32(tmp);
2945 return true;
2946 }
2947
2948 static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
2949 {
2950 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2951 return false;
2952 }
2953
2954 /* UNDEF accesses to D16-D31 if they don't exist. */
2955 if (!dc_isar_feature(aa32_simd_r32, s) &&
2956 ((a->vd | a->vm) & 0x10)) {
2957 return false;
2958 }
2959
2960 if (a->vd & a->q) {
2961 return false;
2962 }
2963
2964 if (!vfp_access_check(s)) {
2965 return true;
2966 }
2967
2968 tcg_gen_gvec_dup_mem(a->size, neon_reg_offset(a->vd, 0),
2969 neon_element_offset(a->vm, a->index, a->size),
2970 a->q ? 16 : 8, a->q ? 16 : 8);
2971 return true;
2972 }
2973
2974 static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
2975 {
2976 int pass, half;
2977
2978 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2979 return false;
2980 }
2981
2982 /* UNDEF accesses to D16-D31 if they don't exist. */
2983 if (!dc_isar_feature(aa32_simd_r32, s) &&
2984 ((a->vd | a->vm) & 0x10)) {
2985 return false;
2986 }
2987
2988 if ((a->vd | a->vm) & a->q) {
2989 return false;
2990 }
2991
2992 if (a->size == 3) {
2993 return false;
2994 }
2995
2996 if (!vfp_access_check(s)) {
2997 return true;
2998 }
2999
3000 for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
3001 TCGv_i32 tmp[2];
3002
3003 for (half = 0; half < 2; half++) {
3004 tmp[half] = neon_load_reg(a->vm, pass * 2 + half);
3005 switch (a->size) {
3006 case 0:
3007 tcg_gen_bswap32_i32(tmp[half], tmp[half]);
3008 break;
3009 case 1:
3010 gen_swap_half(tmp[half], tmp[half]);
3011 break;
3012 case 2:
3013 break;
3014 default:
3015 g_assert_not_reached();
3016 }
3017 }
3018 neon_store_reg(a->vd, pass * 2, tmp[1]);
3019 neon_store_reg(a->vd, pass * 2 + 1, tmp[0]);
3020 }
3021 return true;
3022 }
3023
3024 static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
3025 NeonGenWidenFn *widenfn,
3026 NeonGenTwo64OpFn *opfn,
3027 NeonGenTwo64OpFn *accfn)
3028 {
3029 /*
3030 * Pairwise long operations: widen both halves of the pair,
3031 * combine the pairs with the opfn, and then possibly accumulate
3032 * into the destination with the accfn.
3033 */
3034 int pass;
3035
3036 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3037 return false;
3038 }
3039
3040 /* UNDEF accesses to D16-D31 if they don't exist. */
3041 if (!dc_isar_feature(aa32_simd_r32, s) &&
3042 ((a->vd | a->vm) & 0x10)) {
3043 return false;
3044 }
3045
3046 if ((a->vd | a->vm) & a->q) {
3047 return false;
3048 }
3049
3050 if (!widenfn) {
3051 return false;
3052 }
3053
3054 if (!vfp_access_check(s)) {
3055 return true;
3056 }
3057
3058 for (pass = 0; pass < a->q + 1; pass++) {
3059 TCGv_i32 tmp;
3060 TCGv_i64 rm0_64, rm1_64, rd_64;
3061
3062 rm0_64 = tcg_temp_new_i64();
3063 rm1_64 = tcg_temp_new_i64();
3064 rd_64 = tcg_temp_new_i64();
3065 tmp = neon_load_reg(a->vm, pass * 2);
3066 widenfn(rm0_64, tmp);
3067 tcg_temp_free_i32(tmp);
3068 tmp = neon_load_reg(a->vm, pass * 2 + 1);
3069 widenfn(rm1_64, tmp);
3070 tcg_temp_free_i32(tmp);
3071 opfn(rd_64, rm0_64, rm1_64);
3072 tcg_temp_free_i64(rm0_64);
3073 tcg_temp_free_i64(rm1_64);
3074
3075 if (accfn) {
3076 TCGv_i64 tmp64 = tcg_temp_new_i64();
3077 neon_load_reg64(tmp64, a->vd + pass);
3078 accfn(rd_64, tmp64, rd_64);
3079 tcg_temp_free_i64(tmp64);
3080 }
3081 neon_store_reg64(rd_64, a->vd + pass);
3082 tcg_temp_free_i64(rd_64);
3083 }
3084 return true;
3085 }
3086
3087 static bool trans_VPADDL_S(DisasContext *s, arg_2misc *a)
3088 {
3089 static NeonGenWidenFn * const widenfn[] = {
3090 gen_helper_neon_widen_s8,
3091 gen_helper_neon_widen_s16,
3092 tcg_gen_ext_i32_i64,
3093 NULL,
3094 };
3095 static NeonGenTwo64OpFn * const opfn[] = {
3096 gen_helper_neon_paddl_u16,
3097 gen_helper_neon_paddl_u32,
3098 tcg_gen_add_i64,
3099 NULL,
3100 };
3101
3102 return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
3103 }
3104
3105 static bool trans_VPADDL_U(DisasContext *s, arg_2misc *a)
3106 {
3107 static NeonGenWidenFn * const widenfn[] = {
3108 gen_helper_neon_widen_u8,
3109 gen_helper_neon_widen_u16,
3110 tcg_gen_extu_i32_i64,
3111 NULL,
3112 };
3113 static NeonGenTwo64OpFn * const opfn[] = {
3114 gen_helper_neon_paddl_u16,
3115 gen_helper_neon_paddl_u32,
3116 tcg_gen_add_i64,
3117 NULL,
3118 };
3119
3120 return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
3121 }
3122
3123 static bool trans_VPADAL_S(DisasContext *s, arg_2misc *a)
3124 {
3125 static NeonGenWidenFn * const widenfn[] = {
3126 gen_helper_neon_widen_s8,
3127 gen_helper_neon_widen_s16,
3128 tcg_gen_ext_i32_i64,
3129 NULL,
3130 };
3131 static NeonGenTwo64OpFn * const opfn[] = {
3132 gen_helper_neon_paddl_u16,
3133 gen_helper_neon_paddl_u32,
3134 tcg_gen_add_i64,
3135 NULL,
3136 };
3137 static NeonGenTwo64OpFn * const accfn[] = {
3138 gen_helper_neon_addl_u16,
3139 gen_helper_neon_addl_u32,
3140 tcg_gen_add_i64,
3141 NULL,
3142 };
3143
3144 return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
3145 accfn[a->size]);
3146 }
3147
3148 static bool trans_VPADAL_U(DisasContext *s, arg_2misc *a)
3149 {
3150 static NeonGenWidenFn * const widenfn[] = {
3151 gen_helper_neon_widen_u8,
3152 gen_helper_neon_widen_u16,
3153 tcg_gen_extu_i32_i64,
3154 NULL,
3155 };
3156 static NeonGenTwo64OpFn * const opfn[] = {
3157 gen_helper_neon_paddl_u16,
3158 gen_helper_neon_paddl_u32,
3159 tcg_gen_add_i64,
3160 NULL,
3161 };
3162 static NeonGenTwo64OpFn * const accfn[] = {
3163 gen_helper_neon_addl_u16,
3164 gen_helper_neon_addl_u32,
3165 tcg_gen_add_i64,
3166 NULL,
3167 };
3168
3169 return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
3170 accfn[a->size]);
3171 }
3172
3173 typedef void ZipFn(TCGv_ptr, TCGv_ptr);
3174
3175 static bool do_zip_uzp(DisasContext *s, arg_2misc *a,
3176 ZipFn *fn)
3177 {
3178 TCGv_ptr pd, pm;
3179
3180 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3181 return false;
3182 }
3183
3184 /* UNDEF accesses to D16-D31 if they don't exist. */
3185 if (!dc_isar_feature(aa32_simd_r32, s) &&
3186 ((a->vd | a->vm) & 0x10)) {
3187 return false;
3188 }
3189
3190 if ((a->vd | a->vm) & a->q) {
3191 return false;
3192 }
3193
3194 if (!fn) {
3195 /* Bad size or size/q combination */
3196 return false;
3197 }
3198
3199 if (!vfp_access_check(s)) {
3200 return true;
3201 }
3202
3203 pd = vfp_reg_ptr(true, a->vd);
3204 pm = vfp_reg_ptr(true, a->vm);
3205 fn(pd, pm);
3206 tcg_temp_free_ptr(pd);
3207 tcg_temp_free_ptr(pm);
3208 return true;
3209 }
3210
3211 static bool trans_VUZP(DisasContext *s, arg_2misc *a)
3212 {
3213 static ZipFn * const fn[2][4] = {
3214 {
3215 gen_helper_neon_unzip8,
3216 gen_helper_neon_unzip16,
3217 NULL,
3218 NULL,
3219 }, {
3220 gen_helper_neon_qunzip8,
3221 gen_helper_neon_qunzip16,
3222 gen_helper_neon_qunzip32,
3223 NULL,
3224 }
3225 };
3226 return do_zip_uzp(s, a, fn[a->q][a->size]);
3227 }
3228
3229 static bool trans_VZIP(DisasContext *s, arg_2misc *a)
3230 {
3231 static ZipFn * const fn[2][4] = {
3232 {
3233 gen_helper_neon_zip8,
3234 gen_helper_neon_zip16,
3235 NULL,
3236 NULL,
3237 }, {
3238 gen_helper_neon_qzip8,
3239 gen_helper_neon_qzip16,
3240 gen_helper_neon_qzip32,
3241 NULL,
3242 }
3243 };
3244 return do_zip_uzp(s, a, fn[a->q][a->size]);
3245 }
3246
3247 static bool do_vmovn(DisasContext *s, arg_2misc *a,
3248 NeonGenNarrowEnvFn *narrowfn)
3249 {
3250 TCGv_i64 rm;
3251 TCGv_i32 rd0, rd1;
3252
3253 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3254 return false;
3255 }
3256
3257 /* UNDEF accesses to D16-D31 if they don't exist. */
3258 if (!dc_isar_feature(aa32_simd_r32, s) &&
3259 ((a->vd | a->vm) & 0x10)) {
3260 return false;
3261 }
3262
3263 if (a->vm & 1) {
3264 return false;
3265 }
3266
3267 if (!narrowfn) {
3268 return false;
3269 }
3270
3271 if (!vfp_access_check(s)) {
3272 return true;
3273 }
3274
3275 rm = tcg_temp_new_i64();
3276 rd0 = tcg_temp_new_i32();
3277 rd1 = tcg_temp_new_i32();
3278
3279 neon_load_reg64(rm, a->vm);
3280 narrowfn(rd0, cpu_env, rm);
3281 neon_load_reg64(rm, a->vm + 1);
3282 narrowfn(rd1, cpu_env, rm);
3283 neon_store_reg(a->vd, 0, rd0);
3284 neon_store_reg(a->vd, 1, rd1);
3285 tcg_temp_free_i64(rm);
3286 return true;
3287 }
3288
3289 #define DO_VMOVN(INSN, FUNC) \
3290 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3291 { \
3292 static NeonGenNarrowEnvFn * const narrowfn[] = { \
3293 FUNC##8, \
3294 FUNC##16, \
3295 FUNC##32, \
3296 NULL, \
3297 }; \
3298 return do_vmovn(s, a, narrowfn[a->size]); \
3299 }
3300
3301 DO_VMOVN(VMOVN, gen_neon_narrow_u)
3302 DO_VMOVN(VQMOVUN, gen_helper_neon_unarrow_sat)
3303 DO_VMOVN(VQMOVN_S, gen_helper_neon_narrow_sat_s)
3304 DO_VMOVN(VQMOVN_U, gen_helper_neon_narrow_sat_u)
3305
3306 static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
3307 {
3308 TCGv_i32 rm0, rm1;
3309 TCGv_i64 rd;
3310 static NeonGenWidenFn * const widenfns[] = {
3311 gen_helper_neon_widen_u8,
3312 gen_helper_neon_widen_u16,
3313 tcg_gen_extu_i32_i64,
3314 NULL,
3315 };
3316 NeonGenWidenFn *widenfn = widenfns[a->size];
3317
3318 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3319 return false;
3320 }
3321
3322 /* UNDEF accesses to D16-D31 if they don't exist. */
3323 if (!dc_isar_feature(aa32_simd_r32, s) &&
3324 ((a->vd | a->vm) & 0x10)) {
3325 return false;
3326 }
3327
3328 if (a->vd & 1) {
3329 return false;
3330 }
3331
3332 if (!widenfn) {
3333 return false;
3334 }
3335
3336 if (!vfp_access_check(s)) {
3337 return true;
3338 }
3339
3340 rd = tcg_temp_new_i64();
3341
3342 rm0 = neon_load_reg(a->vm, 0);
3343 rm1 = neon_load_reg(a->vm, 1);
3344
3345 widenfn(rd, rm0);
3346 tcg_gen_shli_i64(rd, rd, 8 << a->size);
3347 neon_store_reg64(rd, a->vd);
3348 widenfn(rd, rm1);
3349 tcg_gen_shli_i64(rd, rd, 8 << a->size);
3350 neon_store_reg64(rd, a->vd + 1);
3351
3352 tcg_temp_free_i64(rd);
3353 tcg_temp_free_i32(rm0);
3354 tcg_temp_free_i32(rm1);
3355 return true;
3356 }
3357
3358 static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
3359 {
3360 TCGv_ptr fpst;
3361 TCGv_i32 ahp, tmp, tmp2, tmp3;
3362
3363 if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
3364 !dc_isar_feature(aa32_fp16_spconv, s)) {
3365 return false;
3366 }
3367
3368 /* UNDEF accesses to D16-D31 if they don't exist. */
3369 if (!dc_isar_feature(aa32_simd_r32, s) &&
3370 ((a->vd | a->vm) & 0x10)) {
3371 return false;
3372 }
3373
3374 if ((a->vm & 1) || (a->size != 1)) {
3375 return false;
3376 }
3377
3378 if (!vfp_access_check(s)) {
3379 return true;
3380 }
3381
3382 fpst = get_fpstatus_ptr(true);
3383 ahp = get_ahp_flag();
3384 tmp = neon_load_reg(a->vm, 0);
3385 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
3386 tmp2 = neon_load_reg(a->vm, 1);
3387 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
3388 tcg_gen_shli_i32(tmp2, tmp2, 16);
3389 tcg_gen_or_i32(tmp2, tmp2, tmp);
3390 tcg_temp_free_i32(tmp);
3391 tmp = neon_load_reg(a->vm, 2);
3392 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
3393 tmp3 = neon_load_reg(a->vm, 3);
3394 neon_store_reg(a->vd, 0, tmp2);
3395 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
3396 tcg_gen_shli_i32(tmp3, tmp3, 16);
3397 tcg_gen_or_i32(tmp3, tmp3, tmp);
3398 neon_store_reg(a->vd, 1, tmp3);
3399 tcg_temp_free_i32(tmp);
3400 tcg_temp_free_i32(ahp);
3401 tcg_temp_free_ptr(fpst);
3402
3403 return true;
3404 }
3405
3406 static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
3407 {
3408 TCGv_ptr fpst;
3409 TCGv_i32 ahp, tmp, tmp2, tmp3;
3410
3411 if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
3412 !dc_isar_feature(aa32_fp16_spconv, s)) {
3413 return false;
3414 }
3415
3416 /* UNDEF accesses to D16-D31 if they don't exist. */
3417 if (!dc_isar_feature(aa32_simd_r32, s) &&
3418 ((a->vd | a->vm) & 0x10)) {
3419 return false;
3420 }
3421
3422 if ((a->vd & 1) || (a->size != 1)) {
3423 return false;
3424 }
3425
3426 if (!vfp_access_check(s)) {
3427 return true;
3428 }
3429
3430 fpst = get_fpstatus_ptr(true);
3431 ahp = get_ahp_flag();
3432 tmp3 = tcg_temp_new_i32();
3433 tmp = neon_load_reg(a->vm, 0);
3434 tmp2 = neon_load_reg(a->vm, 1);
3435 tcg_gen_ext16u_i32(tmp3, tmp);
3436 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
3437 neon_store_reg(a->vd, 0, tmp3);
3438 tcg_gen_shri_i32(tmp, tmp, 16);
3439 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
3440 neon_store_reg(a->vd, 1, tmp);
3441 tmp3 = tcg_temp_new_i32();
3442 tcg_gen_ext16u_i32(tmp3, tmp2);
3443 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
3444 neon_store_reg(a->vd, 2, tmp3);
3445 tcg_gen_shri_i32(tmp2, tmp2, 16);
3446 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
3447 neon_store_reg(a->vd, 3, tmp2);
3448 tcg_temp_free_i32(ahp);
3449 tcg_temp_free_ptr(fpst);
3450
3451 return true;
3452 }
3453
3454 static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn)
3455 {
3456 int vec_size = a->q ? 16 : 8;
3457 int rd_ofs = neon_reg_offset(a->vd, 0);
3458 int rm_ofs = neon_reg_offset(a->vm, 0);
3459
3460 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
3461 return false;
3462 }
3463
3464 /* UNDEF accesses to D16-D31 if they don't exist. */
3465 if (!dc_isar_feature(aa32_simd_r32, s) &&
3466 ((a->vd | a->vm) & 0x10)) {
3467 return false;
3468 }
3469
3470 if (a->size == 3) {
3471 return false;
3472 }
3473
3474 if ((a->vd | a->vm) & a->q) {
3475 return false;
3476 }
3477
3478 if (!vfp_access_check(s)) {
3479 return true;
3480 }
3481
3482 fn(a->size, rd_ofs, rm_ofs, vec_size, vec_size);
3483
3484 return true;
3485 }
3486
3487 #define DO_2MISC_VEC(INSN, FN) \
3488 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3489 { \
3490 return do_2misc_vec(s, a, FN); \
3491 }
3492
3493 DO_2MISC_VEC(VNEG, tcg_gen_gvec_neg)
3494 DO_2MISC_VEC(VABS, tcg_gen_gvec_abs)
3495 DO_2MISC_VEC(VCEQ0, gen_gvec_ceq0)
3496 DO_2MISC_VEC(VCGT0, gen_gvec_cgt0)
3497 DO_2MISC_VEC(VCLE0, gen_gvec_cle0)
3498 DO_2MISC_VEC(VCGE0, gen_gvec_cge0)
3499 DO_2MISC_VEC(VCLT0, gen_gvec_clt0)
3500
3501 static bool trans_VMVN(DisasContext *s, arg_2misc *a)
3502 {
3503 if (a->size != 0) {
3504 return false;
3505 }
3506 return do_2misc_vec(s, a, tcg_gen_gvec_not);
3507 }
3508
3509 #define WRAP_2M_3_OOL_FN(WRAPNAME, FUNC, DATA) \
3510 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
3511 uint32_t rm_ofs, uint32_t oprsz, \
3512 uint32_t maxsz) \
3513 { \
3514 tcg_gen_gvec_3_ool(rd_ofs, rd_ofs, rm_ofs, oprsz, maxsz, \
3515 DATA, FUNC); \
3516 }
3517
3518 #define WRAP_2M_2_OOL_FN(WRAPNAME, FUNC, DATA) \
3519 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
3520 uint32_t rm_ofs, uint32_t oprsz, \
3521 uint32_t maxsz) \
3522 { \
3523 tcg_gen_gvec_2_ool(rd_ofs, rm_ofs, oprsz, maxsz, DATA, FUNC); \
3524 }
3525
3526 WRAP_2M_3_OOL_FN(gen_AESE, gen_helper_crypto_aese, 0)
3527 WRAP_2M_3_OOL_FN(gen_AESD, gen_helper_crypto_aese, 1)
3528 WRAP_2M_2_OOL_FN(gen_AESMC, gen_helper_crypto_aesmc, 0)
3529 WRAP_2M_2_OOL_FN(gen_AESIMC, gen_helper_crypto_aesmc, 1)
3530 WRAP_2M_2_OOL_FN(gen_SHA1H, gen_helper_crypto_sha1h, 0)
3531 WRAP_2M_2_OOL_FN(gen_SHA1SU1, gen_helper_crypto_sha1su1, 0)
3532 WRAP_2M_2_OOL_FN(gen_SHA256SU0, gen_helper_crypto_sha256su0, 0)
3533
3534 #define DO_2M_CRYPTO(INSN, FEATURE, SIZE) \
3535 static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
3536 { \
3537 if (!dc_isar_feature(FEATURE, s) || a->size != SIZE) { \
3538 return false; \
3539 } \
3540 return do_2misc_vec(s, a, gen_##INSN); \
3541 }
3542
3543 DO_2M_CRYPTO(AESE, aa32_aes, 0)
3544 DO_2M_CRYPTO(AESD, aa32_aes, 0)
3545 DO_2M_CRYPTO(AESMC, aa32_aes, 0)
3546 DO_2M_CRYPTO(AESIMC, aa32_aes, 0)
3547 DO_2M_CRYPTO(SHA1H, aa32_sha1, 2)
3548 DO_2M_CRYPTO(SHA1SU1, aa32_sha1, 2)
3549 DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2)