]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate-vfp.inc.c
Merge remote-tracking branch 'remotes/vivier2/tags/trivial-branch-pull-request' into...
[mirror_qemu.git] / target / arm / translate-vfp.inc.c
CommitLineData
78e138bc
PM
1/*
2 * ARM translation: AArch32 VFP instructions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 */
22
23/*
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
27 */
28
29/* Include the generated VFP decoder */
30#include "decode-vfp.inc.c"
31#include "decode-vfp-uncond.inc.c"
06db8196 32
d6a092d4
PM
33/*
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
37 */
38uint64_t vfp_expand_imm(int size, uint8_t imm8)
39{
40 uint64_t imm;
41
42 switch (size) {
43 case MO_64:
44 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8, 0, 6);
47 imm <<= 48;
48 break;
49 case MO_32:
50 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8, 0, 6) << 3);
53 imm <<= 16;
54 break;
55 case MO_16:
56 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8, 0, 6) << 6);
59 break;
60 default:
61 g_assert_not_reached();
62 }
63 return imm;
64}
65
b623d803
PM
66/*
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
69 * 16 bits.
70 */
71static inline long vfp_f16_offset(unsigned reg, bool top)
72{
73 long offs = vfp_reg_offset(false, reg);
74#ifdef HOST_WORDS_BIGENDIAN
75 if (!top) {
76 offs += 2;
77 }
78#else
79 if (top) {
80 offs += 2;
81 }
82#endif
83 return offs;
84}
85
06db8196
PM
86/*
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
90 * return false.
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
94 */
95static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
96{
97 if (s->fp_excp_el) {
98 if (arm_dc_feature(s, ARM_FEATURE_M)) {
99 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
100 s->fp_excp_el);
101 } else {
102 gen_exception_insn(s, 4, EXCP_UDEF,
103 syn_fp_access_trap(1, 0xe, false),
104 s->fp_excp_el);
105 }
106 return false;
107 }
108
109 if (!s->vfp_enabled && !ignore_vfp_enabled) {
110 assert(!arm_dc_feature(s, ARM_FEATURE_M));
111 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
112 default_exception_el(s));
113 return false;
114 }
115
116 if (arm_dc_feature(s, ARM_FEATURE_M)) {
117 /* Handle M-profile lazy FP state mechanics */
118
119 /* Trigger lazy-state preservation if necessary */
120 if (s->v7m_lspact) {
121 /*
122 * Lazy state saving affects external memory and also the NVIC,
123 * so we must mark it as an IO operation for icount.
124 */
125 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
126 gen_io_start();
127 }
128 gen_helper_v7m_preserve_fp_state(cpu_env);
129 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
130 gen_io_end();
131 }
132 /*
133 * If the preserve_fp_state helper doesn't throw an exception
134 * then it will clear LSPACT; we don't need to repeat this for
135 * any further FP insns in this TB.
136 */
137 s->v7m_lspact = false;
138 }
139
140 /* Update ownership of FP context: set FPCCR.S to match current state */
141 if (s->v8m_fpccr_s_wrong) {
142 TCGv_i32 tmp;
143
144 tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
145 if (s->v8m_secure) {
146 tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
147 } else {
148 tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
149 }
150 store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
151 /* Don't need to do this for any further FP insns in this TB */
152 s->v8m_fpccr_s_wrong = false;
153 }
154
155 if (s->v7m_new_fp_ctxt_needed) {
156 /*
157 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
158 * and the FPSCR.
159 */
160 TCGv_i32 control, fpscr;
161 uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
162
163 fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
164 gen_helper_vfp_set_fpscr(cpu_env, fpscr);
165 tcg_temp_free_i32(fpscr);
166 /*
167 * We don't need to arrange to end the TB, because the only
168 * parts of FPSCR which we cache in the TB flags are the VECLEN
169 * and VECSTRIDE, and those don't exist for M-profile.
170 */
171
172 if (s->v8m_secure) {
173 bits |= R_V7M_CONTROL_SFPA_MASK;
174 }
175 control = load_cpu_field(v7m.control[M_REG_S]);
176 tcg_gen_ori_i32(control, control, bits);
177 store_cpu_field(control, v7m.control[M_REG_S]);
178 /* Don't need to do this for any further FP insns in this TB */
179 s->v7m_new_fp_ctxt_needed = false;
180 }
181 }
182
183 return true;
184}
b3ff4b87
PM
185
186/*
187 * The most usual kind of VFP access check, for everything except
188 * FMXR/FMRX to the always-available special registers.
189 */
190static bool vfp_access_check(DisasContext *s)
191{
192 return full_vfp_access_check(s, false);
193}
f7bbb8f3
PM
194
195static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
196{
197 uint32_t rd, rn, rm;
198 bool dp = a->dp;
199
200 if (!dc_isar_feature(aa32_vsel, s)) {
201 return false;
202 }
203
204 /* UNDEF accesses to D16-D31 if they don't exist */
205 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
206 ((a->vm | a->vn | a->vd) & 0x10)) {
207 return false;
208 }
1120827f
PM
209
210 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
211 return false;
212 }
213
f7bbb8f3
PM
214 rd = a->vd;
215 rn = a->vn;
216 rm = a->vm;
217
218 if (!vfp_access_check(s)) {
219 return true;
220 }
221
222 if (dp) {
223 TCGv_i64 frn, frm, dest;
224 TCGv_i64 tmp, zero, zf, nf, vf;
225
226 zero = tcg_const_i64(0);
227
228 frn = tcg_temp_new_i64();
229 frm = tcg_temp_new_i64();
230 dest = tcg_temp_new_i64();
231
232 zf = tcg_temp_new_i64();
233 nf = tcg_temp_new_i64();
234 vf = tcg_temp_new_i64();
235
236 tcg_gen_extu_i32_i64(zf, cpu_ZF);
237 tcg_gen_ext_i32_i64(nf, cpu_NF);
238 tcg_gen_ext_i32_i64(vf, cpu_VF);
239
160f3b64
PM
240 neon_load_reg64(frn, rn);
241 neon_load_reg64(frm, rm);
f7bbb8f3
PM
242 switch (a->cc) {
243 case 0: /* eq: Z */
244 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
245 frn, frm);
246 break;
247 case 1: /* vs: V */
248 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
249 frn, frm);
250 break;
251 case 2: /* ge: N == V -> N ^ V == 0 */
252 tmp = tcg_temp_new_i64();
253 tcg_gen_xor_i64(tmp, vf, nf);
254 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
255 frn, frm);
256 tcg_temp_free_i64(tmp);
257 break;
258 case 3: /* gt: !Z && N == V */
259 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
260 frn, frm);
261 tmp = tcg_temp_new_i64();
262 tcg_gen_xor_i64(tmp, vf, nf);
263 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
264 dest, frm);
265 tcg_temp_free_i64(tmp);
266 break;
267 }
160f3b64 268 neon_store_reg64(dest, rd);
f7bbb8f3
PM
269 tcg_temp_free_i64(frn);
270 tcg_temp_free_i64(frm);
271 tcg_temp_free_i64(dest);
272
273 tcg_temp_free_i64(zf);
274 tcg_temp_free_i64(nf);
275 tcg_temp_free_i64(vf);
276
277 tcg_temp_free_i64(zero);
278 } else {
279 TCGv_i32 frn, frm, dest;
280 TCGv_i32 tmp, zero;
281
282 zero = tcg_const_i32(0);
283
284 frn = tcg_temp_new_i32();
285 frm = tcg_temp_new_i32();
286 dest = tcg_temp_new_i32();
160f3b64
PM
287 neon_load_reg32(frn, rn);
288 neon_load_reg32(frm, rm);
f7bbb8f3
PM
289 switch (a->cc) {
290 case 0: /* eq: Z */
291 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
292 frn, frm);
293 break;
294 case 1: /* vs: V */
295 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
296 frn, frm);
297 break;
298 case 2: /* ge: N == V -> N ^ V == 0 */
299 tmp = tcg_temp_new_i32();
300 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
301 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
302 frn, frm);
303 tcg_temp_free_i32(tmp);
304 break;
305 case 3: /* gt: !Z && N == V */
306 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
307 frn, frm);
308 tmp = tcg_temp_new_i32();
309 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
310 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
311 dest, frm);
312 tcg_temp_free_i32(tmp);
313 break;
314 }
160f3b64 315 neon_store_reg32(dest, rd);
f7bbb8f3
PM
316 tcg_temp_free_i32(frn);
317 tcg_temp_free_i32(frm);
318 tcg_temp_free_i32(dest);
319
320 tcg_temp_free_i32(zero);
321 }
322
323 return true;
324}
325
326static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
327{
328 uint32_t rd, rn, rm;
329 bool dp = a->dp;
330 bool vmin = a->op;
331 TCGv_ptr fpst;
332
333 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
334 return false;
335 }
336
337 /* UNDEF accesses to D16-D31 if they don't exist */
338 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
339 ((a->vm | a->vn | a->vd) & 0x10)) {
340 return false;
341 }
1120827f
PM
342
343 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
344 return false;
345 }
346
f7bbb8f3
PM
347 rd = a->vd;
348 rn = a->vn;
349 rm = a->vm;
350
351 if (!vfp_access_check(s)) {
352 return true;
353 }
354
355 fpst = get_fpstatus_ptr(0);
356
357 if (dp) {
358 TCGv_i64 frn, frm, dest;
359
360 frn = tcg_temp_new_i64();
361 frm = tcg_temp_new_i64();
362 dest = tcg_temp_new_i64();
363
160f3b64
PM
364 neon_load_reg64(frn, rn);
365 neon_load_reg64(frm, rm);
f7bbb8f3
PM
366 if (vmin) {
367 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
368 } else {
369 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
370 }
160f3b64 371 neon_store_reg64(dest, rd);
f7bbb8f3
PM
372 tcg_temp_free_i64(frn);
373 tcg_temp_free_i64(frm);
374 tcg_temp_free_i64(dest);
375 } else {
376 TCGv_i32 frn, frm, dest;
377
378 frn = tcg_temp_new_i32();
379 frm = tcg_temp_new_i32();
380 dest = tcg_temp_new_i32();
381
160f3b64
PM
382 neon_load_reg32(frn, rn);
383 neon_load_reg32(frm, rm);
f7bbb8f3
PM
384 if (vmin) {
385 gen_helper_vfp_minnums(dest, frn, frm, fpst);
386 } else {
387 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
388 }
160f3b64 389 neon_store_reg32(dest, rd);
f7bbb8f3
PM
390 tcg_temp_free_i32(frn);
391 tcg_temp_free_i32(frm);
392 tcg_temp_free_i32(dest);
393 }
394
395 tcg_temp_free_ptr(fpst);
396 return true;
397}
398
399/*
400 * Table for converting the most common AArch32 encoding of
401 * rounding mode to arm_fprounding order (which matches the
402 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
403 */
404static const uint8_t fp_decode_rm[] = {
405 FPROUNDING_TIEAWAY,
406 FPROUNDING_TIEEVEN,
407 FPROUNDING_POSINF,
408 FPROUNDING_NEGINF,
409};
410
411static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
412{
413 uint32_t rd, rm;
414 bool dp = a->dp;
415 TCGv_ptr fpst;
416 TCGv_i32 tcg_rmode;
417 int rounding = fp_decode_rm[a->rm];
418
419 if (!dc_isar_feature(aa32_vrint, s)) {
420 return false;
421 }
422
423 /* UNDEF accesses to D16-D31 if they don't exist */
424 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
425 ((a->vm | a->vd) & 0x10)) {
426 return false;
427 }
1120827f
PM
428
429 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
430 return false;
431 }
432
f7bbb8f3
PM
433 rd = a->vd;
434 rm = a->vm;
435
436 if (!vfp_access_check(s)) {
437 return true;
438 }
439
440 fpst = get_fpstatus_ptr(0);
441
442 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
443 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
444
445 if (dp) {
446 TCGv_i64 tcg_op;
447 TCGv_i64 tcg_res;
448 tcg_op = tcg_temp_new_i64();
449 tcg_res = tcg_temp_new_i64();
160f3b64 450 neon_load_reg64(tcg_op, rm);
f7bbb8f3 451 gen_helper_rintd(tcg_res, tcg_op, fpst);
160f3b64 452 neon_store_reg64(tcg_res, rd);
f7bbb8f3
PM
453 tcg_temp_free_i64(tcg_op);
454 tcg_temp_free_i64(tcg_res);
455 } else {
456 TCGv_i32 tcg_op;
457 TCGv_i32 tcg_res;
458 tcg_op = tcg_temp_new_i32();
459 tcg_res = tcg_temp_new_i32();
160f3b64 460 neon_load_reg32(tcg_op, rm);
f7bbb8f3 461 gen_helper_rints(tcg_res, tcg_op, fpst);
160f3b64 462 neon_store_reg32(tcg_res, rd);
f7bbb8f3
PM
463 tcg_temp_free_i32(tcg_op);
464 tcg_temp_free_i32(tcg_res);
465 }
466
467 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
468 tcg_temp_free_i32(tcg_rmode);
469
470 tcg_temp_free_ptr(fpst);
471 return true;
472}
473
474static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
475{
476 uint32_t rd, rm;
477 bool dp = a->dp;
478 TCGv_ptr fpst;
479 TCGv_i32 tcg_rmode, tcg_shift;
480 int rounding = fp_decode_rm[a->rm];
481 bool is_signed = a->op;
482
483 if (!dc_isar_feature(aa32_vcvt_dr, s)) {
484 return false;
485 }
486
487 /* UNDEF accesses to D16-D31 if they don't exist */
488 if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
489 return false;
490 }
1120827f
PM
491
492 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
493 return false;
494 }
495
f7bbb8f3
PM
496 rd = a->vd;
497 rm = a->vm;
498
499 if (!vfp_access_check(s)) {
500 return true;
501 }
502
503 fpst = get_fpstatus_ptr(0);
504
505 tcg_shift = tcg_const_i32(0);
506
507 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
508 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
509
510 if (dp) {
511 TCGv_i64 tcg_double, tcg_res;
512 TCGv_i32 tcg_tmp;
513 tcg_double = tcg_temp_new_i64();
514 tcg_res = tcg_temp_new_i64();
515 tcg_tmp = tcg_temp_new_i32();
160f3b64 516 neon_load_reg64(tcg_double, rm);
f7bbb8f3
PM
517 if (is_signed) {
518 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
519 } else {
520 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
521 }
522 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
160f3b64 523 neon_store_reg32(tcg_tmp, rd);
f7bbb8f3
PM
524 tcg_temp_free_i32(tcg_tmp);
525 tcg_temp_free_i64(tcg_res);
526 tcg_temp_free_i64(tcg_double);
527 } else {
528 TCGv_i32 tcg_single, tcg_res;
529 tcg_single = tcg_temp_new_i32();
530 tcg_res = tcg_temp_new_i32();
160f3b64 531 neon_load_reg32(tcg_single, rm);
f7bbb8f3
PM
532 if (is_signed) {
533 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
534 } else {
535 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
536 }
160f3b64 537 neon_store_reg32(tcg_res, rd);
f7bbb8f3
PM
538 tcg_temp_free_i32(tcg_res);
539 tcg_temp_free_i32(tcg_single);
540 }
541
542 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
543 tcg_temp_free_i32(tcg_rmode);
544
545 tcg_temp_free_i32(tcg_shift);
546
547 tcg_temp_free_ptr(fpst);
548
549 return true;
550}
9851ed92
PM
551
552static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
553{
554 /* VMOV scalar to general purpose register */
555 TCGv_i32 tmp;
556 int pass;
557 uint32_t offset;
558
559 /* UNDEF accesses to D16-D31 if they don't exist */
560 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
561 return false;
562 }
563
564 offset = a->index << a->size;
565 pass = extract32(offset, 2, 1);
566 offset = extract32(offset, 0, 2) * 8;
567
568 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
569 return false;
570 }
571
572 if (!vfp_access_check(s)) {
573 return true;
574 }
575
576 tmp = neon_load_reg(a->vn, pass);
577 switch (a->size) {
578 case 0:
579 if (offset) {
580 tcg_gen_shri_i32(tmp, tmp, offset);
581 }
582 if (a->u) {
583 gen_uxtb(tmp);
584 } else {
585 gen_sxtb(tmp);
586 }
587 break;
588 case 1:
589 if (a->u) {
590 if (offset) {
591 tcg_gen_shri_i32(tmp, tmp, 16);
592 } else {
593 gen_uxth(tmp);
594 }
595 } else {
596 if (offset) {
597 tcg_gen_sari_i32(tmp, tmp, 16);
598 } else {
599 gen_sxth(tmp);
600 }
601 }
602 break;
603 case 2:
604 break;
605 }
606 store_reg(s, a->rt, tmp);
607
608 return true;
609}
610
611static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
612{
613 /* VMOV general purpose register to scalar */
614 TCGv_i32 tmp, tmp2;
615 int pass;
616 uint32_t offset;
617
618 /* UNDEF accesses to D16-D31 if they don't exist */
619 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
620 return false;
621 }
622
623 offset = a->index << a->size;
624 pass = extract32(offset, 2, 1);
625 offset = extract32(offset, 0, 2) * 8;
626
627 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
628 return false;
629 }
630
631 if (!vfp_access_check(s)) {
632 return true;
633 }
634
635 tmp = load_reg(s, a->rt);
636 switch (a->size) {
637 case 0:
638 tmp2 = neon_load_reg(a->vn, pass);
639 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
640 tcg_temp_free_i32(tmp2);
641 break;
642 case 1:
643 tmp2 = neon_load_reg(a->vn, pass);
644 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
645 tcg_temp_free_i32(tmp2);
646 break;
647 case 2:
648 break;
649 }
650 neon_store_reg(a->vn, pass, tmp);
651
652 return true;
653}
654
655static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
656{
657 /* VDUP (general purpose register) */
658 TCGv_i32 tmp;
659 int size, vec_size;
660
661 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
662 return false;
663 }
664
665 /* UNDEF accesses to D16-D31 if they don't exist */
666 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
667 return false;
668 }
669
670 if (a->b && a->e) {
671 return false;
672 }
673
674 if (a->q && (a->vn & 1)) {
675 return false;
676 }
677
678 vec_size = a->q ? 16 : 8;
679 if (a->b) {
680 size = 0;
681 } else if (a->e) {
682 size = 1;
683 } else {
684 size = 2;
685 }
686
687 if (!vfp_access_check(s)) {
688 return true;
689 }
690
691 tmp = load_reg(s, a->rt);
692 tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
693 vec_size, vec_size, tmp);
694 tcg_temp_free_i32(tmp);
695
696 return true;
697}
a9ab5001
PM
698
699static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
700{
701 TCGv_i32 tmp;
702 bool ignore_vfp_enabled = false;
703
704 if (arm_dc_feature(s, ARM_FEATURE_M)) {
705 /*
706 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
707 * Writes to R15 are UNPREDICTABLE; we choose to undef.
708 */
709 if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
710 return false;
711 }
712 }
713
714 switch (a->reg) {
715 case ARM_VFP_FPSID:
716 /*
717 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
718 * all ID registers to privileged access only.
719 */
720 if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
721 return false;
722 }
723 ignore_vfp_enabled = true;
724 break;
725 case ARM_VFP_MVFR0:
726 case ARM_VFP_MVFR1:
727 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
728 return false;
729 }
730 ignore_vfp_enabled = true;
731 break;
732 case ARM_VFP_MVFR2:
733 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
734 return false;
735 }
736 ignore_vfp_enabled = true;
737 break;
738 case ARM_VFP_FPSCR:
739 break;
740 case ARM_VFP_FPEXC:
741 if (IS_USER(s)) {
742 return false;
743 }
744 ignore_vfp_enabled = true;
745 break;
746 case ARM_VFP_FPINST:
747 case ARM_VFP_FPINST2:
748 /* Not present in VFPv3 */
749 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
750 return false;
751 }
752 break;
753 default:
754 return false;
755 }
756
757 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
758 return true;
759 }
760
761 if (a->l) {
762 /* VMRS, move VFP special register to gp register */
763 switch (a->reg) {
764 case ARM_VFP_FPSID:
765 case ARM_VFP_FPEXC:
766 case ARM_VFP_FPINST:
767 case ARM_VFP_FPINST2:
768 case ARM_VFP_MVFR0:
769 case ARM_VFP_MVFR1:
770 case ARM_VFP_MVFR2:
771 tmp = load_cpu_field(vfp.xregs[a->reg]);
772 break;
773 case ARM_VFP_FPSCR:
774 if (a->rt == 15) {
775 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
776 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
777 } else {
778 tmp = tcg_temp_new_i32();
779 gen_helper_vfp_get_fpscr(tmp, cpu_env);
780 }
781 break;
782 default:
783 g_assert_not_reached();
784 }
785
786 if (a->rt == 15) {
787 /* Set the 4 flag bits in the CPSR. */
788 gen_set_nzcv(tmp);
789 tcg_temp_free_i32(tmp);
790 } else {
791 store_reg(s, a->rt, tmp);
792 }
793 } else {
794 /* VMSR, move gp register to VFP special register */
795 switch (a->reg) {
796 case ARM_VFP_FPSID:
797 case ARM_VFP_MVFR0:
798 case ARM_VFP_MVFR1:
799 case ARM_VFP_MVFR2:
800 /* Writes are ignored. */
801 break;
802 case ARM_VFP_FPSCR:
803 tmp = load_reg(s, a->rt);
804 gen_helper_vfp_set_fpscr(cpu_env, tmp);
805 tcg_temp_free_i32(tmp);
806 gen_lookup_tb(s);
807 break;
808 case ARM_VFP_FPEXC:
809 /*
810 * TODO: VFP subarchitecture support.
811 * For now, keep the EN bit only
812 */
813 tmp = load_reg(s, a->rt);
814 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
815 store_cpu_field(tmp, vfp.xregs[a->reg]);
816 gen_lookup_tb(s);
817 break;
818 case ARM_VFP_FPINST:
819 case ARM_VFP_FPINST2:
820 tmp = load_reg(s, a->rt);
821 store_cpu_field(tmp, vfp.xregs[a->reg]);
822 break;
823 default:
824 g_assert_not_reached();
825 }
826 }
827
828 return true;
829}
830
831static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
832{
833 TCGv_i32 tmp;
834
835 if (!vfp_access_check(s)) {
836 return true;
837 }
838
839 if (a->l) {
840 /* VFP to general purpose register */
841 tmp = tcg_temp_new_i32();
842 neon_load_reg32(tmp, a->vn);
843 if (a->rt == 15) {
844 /* Set the 4 flag bits in the CPSR. */
845 gen_set_nzcv(tmp);
846 tcg_temp_free_i32(tmp);
847 } else {
848 store_reg(s, a->rt, tmp);
849 }
850 } else {
851 /* general purpose register to VFP */
852 tmp = load_reg(s, a->rt);
853 neon_store_reg32(tmp, a->vn);
854 tcg_temp_free_i32(tmp);
855 }
856
857 return true;
858}
81f68110
PM
859
860static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
861{
862 TCGv_i32 tmp;
863
864 /*
865 * VMOV between two general-purpose registers and two single precision
866 * floating point registers
867 */
868 if (!vfp_access_check(s)) {
869 return true;
870 }
871
872 if (a->op) {
873 /* fpreg to gpreg */
874 tmp = tcg_temp_new_i32();
875 neon_load_reg32(tmp, a->vm);
876 store_reg(s, a->rt, tmp);
877 tmp = tcg_temp_new_i32();
878 neon_load_reg32(tmp, a->vm + 1);
879 store_reg(s, a->rt2, tmp);
880 } else {
881 /* gpreg to fpreg */
882 tmp = load_reg(s, a->rt);
883 neon_store_reg32(tmp, a->vm);
884 tmp = load_reg(s, a->rt2);
885 neon_store_reg32(tmp, a->vm + 1);
886 }
887
888 return true;
889}
890
83655223 891static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
81f68110
PM
892{
893 TCGv_i32 tmp;
894
895 /*
896 * VMOV between two general-purpose registers and one double precision
897 * floating point register
898 */
899
900 /* UNDEF accesses to D16-D31 if they don't exist */
901 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
902 return false;
903 }
904
905 if (!vfp_access_check(s)) {
906 return true;
907 }
908
909 if (a->op) {
910 /* fpreg to gpreg */
911 tmp = tcg_temp_new_i32();
912 neon_load_reg32(tmp, a->vm * 2);
913 store_reg(s, a->rt, tmp);
914 tmp = tcg_temp_new_i32();
915 neon_load_reg32(tmp, a->vm * 2 + 1);
916 store_reg(s, a->rt2, tmp);
917 } else {
918 /* gpreg to fpreg */
919 tmp = load_reg(s, a->rt);
920 neon_store_reg32(tmp, a->vm * 2);
921 tcg_temp_free_i32(tmp);
922 tmp = load_reg(s, a->rt2);
923 neon_store_reg32(tmp, a->vm * 2 + 1);
924 tcg_temp_free_i32(tmp);
925 }
926
927 return true;
928}
79b02a3b
PM
929
930static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
931{
932 uint32_t offset;
3993d040 933 TCGv_i32 addr, tmp;
79b02a3b
PM
934
935 if (!vfp_access_check(s)) {
936 return true;
937 }
938
939 offset = a->imm << 2;
940 if (!a->u) {
941 offset = -offset;
942 }
943
944 if (s->thumb && a->rn == 15) {
945 /* This is actually UNPREDICTABLE */
946 addr = tcg_temp_new_i32();
947 tcg_gen_movi_i32(addr, s->pc & ~2);
948 } else {
949 addr = load_reg(s, a->rn);
950 }
951 tcg_gen_addi_i32(addr, addr, offset);
3993d040 952 tmp = tcg_temp_new_i32();
79b02a3b 953 if (a->l) {
3993d040
PM
954 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
955 neon_store_reg32(tmp, a->vd);
79b02a3b 956 } else {
3993d040
PM
957 neon_load_reg32(tmp, a->vd);
958 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
79b02a3b 959 }
3993d040 960 tcg_temp_free_i32(tmp);
79b02a3b
PM
961 tcg_temp_free_i32(addr);
962
963 return true;
964}
965
83655223 966static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
79b02a3b
PM
967{
968 uint32_t offset;
969 TCGv_i32 addr;
3993d040 970 TCGv_i64 tmp;
79b02a3b
PM
971
972 /* UNDEF accesses to D16-D31 if they don't exist */
973 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
974 return false;
975 }
976
977 if (!vfp_access_check(s)) {
978 return true;
979 }
980
981 offset = a->imm << 2;
982 if (!a->u) {
983 offset = -offset;
984 }
985
986 if (s->thumb && a->rn == 15) {
987 /* This is actually UNPREDICTABLE */
988 addr = tcg_temp_new_i32();
989 tcg_gen_movi_i32(addr, s->pc & ~2);
990 } else {
991 addr = load_reg(s, a->rn);
992 }
993 tcg_gen_addi_i32(addr, addr, offset);
3993d040 994 tmp = tcg_temp_new_i64();
79b02a3b 995 if (a->l) {
3993d040
PM
996 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
997 neon_store_reg64(tmp, a->vd);
79b02a3b 998 } else {
3993d040
PM
999 neon_load_reg64(tmp, a->vd);
1000 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
79b02a3b 1001 }
3993d040 1002 tcg_temp_free_i64(tmp);
79b02a3b
PM
1003 tcg_temp_free_i32(addr);
1004
1005 return true;
1006}
fa288de2
PM
1007
1008static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
1009{
1010 uint32_t offset;
3993d040 1011 TCGv_i32 addr, tmp;
fa288de2
PM
1012 int i, n;
1013
1014 n = a->imm;
1015
1016 if (n == 0 || (a->vd + n) > 32) {
1017 /*
1018 * UNPREDICTABLE cases for bad immediates: we choose to
1019 * UNDEF to avoid generating huge numbers of TCG ops
1020 */
1021 return false;
1022 }
1023 if (a->rn == 15 && a->w) {
1024 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1025 return false;
1026 }
1027
1028 if (!vfp_access_check(s)) {
1029 return true;
1030 }
1031
1032 if (s->thumb && a->rn == 15) {
1033 /* This is actually UNPREDICTABLE */
1034 addr = tcg_temp_new_i32();
1035 tcg_gen_movi_i32(addr, s->pc & ~2);
1036 } else {
1037 addr = load_reg(s, a->rn);
1038 }
1039 if (a->p) {
1040 /* pre-decrement */
1041 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1042 }
1043
1044 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1045 /*
1046 * Here 'addr' is the lowest address we will store to,
1047 * and is either the old SP (if post-increment) or
1048 * the new SP (if pre-decrement). For post-increment
1049 * where the old value is below the limit and the new
1050 * value is above, it is UNKNOWN whether the limit check
1051 * triggers; we choose to trigger.
1052 */
1053 gen_helper_v8m_stackcheck(cpu_env, addr);
1054 }
1055
1056 offset = 4;
3993d040 1057 tmp = tcg_temp_new_i32();
fa288de2
PM
1058 for (i = 0; i < n; i++) {
1059 if (a->l) {
1060 /* load */
3993d040
PM
1061 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1062 neon_store_reg32(tmp, a->vd + i);
fa288de2
PM
1063 } else {
1064 /* store */
3993d040
PM
1065 neon_load_reg32(tmp, a->vd + i);
1066 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
fa288de2
PM
1067 }
1068 tcg_gen_addi_i32(addr, addr, offset);
1069 }
3993d040 1070 tcg_temp_free_i32(tmp);
fa288de2
PM
1071 if (a->w) {
1072 /* writeback */
1073 if (a->p) {
1074 offset = -offset * n;
1075 tcg_gen_addi_i32(addr, addr, offset);
1076 }
1077 store_reg(s, a->rn, addr);
1078 } else {
1079 tcg_temp_free_i32(addr);
1080 }
1081
1082 return true;
1083}
1084
1085static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1086{
1087 uint32_t offset;
1088 TCGv_i32 addr;
3993d040 1089 TCGv_i64 tmp;
fa288de2
PM
1090 int i, n;
1091
1092 n = a->imm >> 1;
1093
1094 if (n == 0 || (a->vd + n) > 32 || n > 16) {
1095 /*
1096 * UNPREDICTABLE cases for bad immediates: we choose to
1097 * UNDEF to avoid generating huge numbers of TCG ops
1098 */
1099 return false;
1100 }
1101 if (a->rn == 15 && a->w) {
1102 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1103 return false;
1104 }
1105
1106 /* UNDEF accesses to D16-D31 if they don't exist */
1107 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) {
1108 return false;
1109 }
1110
1111 if (!vfp_access_check(s)) {
1112 return true;
1113 }
1114
1115 if (s->thumb && a->rn == 15) {
1116 /* This is actually UNPREDICTABLE */
1117 addr = tcg_temp_new_i32();
1118 tcg_gen_movi_i32(addr, s->pc & ~2);
1119 } else {
1120 addr = load_reg(s, a->rn);
1121 }
1122 if (a->p) {
1123 /* pre-decrement */
1124 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1125 }
1126
1127 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1128 /*
1129 * Here 'addr' is the lowest address we will store to,
1130 * and is either the old SP (if post-increment) or
1131 * the new SP (if pre-decrement). For post-increment
1132 * where the old value is below the limit and the new
1133 * value is above, it is UNKNOWN whether the limit check
1134 * triggers; we choose to trigger.
1135 */
1136 gen_helper_v8m_stackcheck(cpu_env, addr);
1137 }
1138
1139 offset = 8;
3993d040 1140 tmp = tcg_temp_new_i64();
fa288de2
PM
1141 for (i = 0; i < n; i++) {
1142 if (a->l) {
1143 /* load */
3993d040
PM
1144 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1145 neon_store_reg64(tmp, a->vd + i);
fa288de2
PM
1146 } else {
1147 /* store */
3993d040
PM
1148 neon_load_reg64(tmp, a->vd + i);
1149 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
fa288de2
PM
1150 }
1151 tcg_gen_addi_i32(addr, addr, offset);
1152 }
3993d040 1153 tcg_temp_free_i64(tmp);
fa288de2
PM
1154 if (a->w) {
1155 /* writeback */
1156 if (a->p) {
1157 offset = -offset * n;
1158 } else if (a->imm & 1) {
1159 offset = 4;
1160 } else {
1161 offset = 0;
1162 }
1163
1164 if (offset != 0) {
1165 tcg_gen_addi_i32(addr, addr, offset);
1166 }
1167 store_reg(s, a->rn, addr);
1168 } else {
1169 tcg_temp_free_i32(addr);
1170 }
1171
1172 return true;
1173}
266bd25c
PM
1174
1175/*
1176 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1177 * The callback should emit code to write a value to vd. If
1178 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1179 * will contain the old value of the relevant VFP register;
1180 * otherwise it must be written to only.
1181 */
1182typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1183 TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1184typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1185 TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1186
90287e22
PM
1187/*
1188 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1189 * The callback should emit code to write a value to vd (which
1190 * should be written to only).
1191 */
1192typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1193typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1194
18cf951a
PM
1195/*
1196 * Return true if the specified S reg is in a scalar bank
1197 * (ie if it is s0..s7)
1198 */
1199static inline bool vfp_sreg_is_scalar(int reg)
1200{
1201 return (reg & 0x18) == 0;
1202}
1203
1204/*
1205 * Return true if the specified D reg is in a scalar bank
1206 * (ie if it is d0..d3 or d16..d19)
1207 */
1208static inline bool vfp_dreg_is_scalar(int reg)
1209{
1210 return (reg & 0xc) == 0;
1211}
1212
1213/*
1214 * Advance the S reg number forwards by delta within its bank
1215 * (ie increment the low 3 bits but leave the rest the same)
1216 */
1217static inline int vfp_advance_sreg(int reg, int delta)
1218{
1219 return ((reg + delta) & 0x7) | (reg & ~0x7);
1220}
1221
1222/*
1223 * Advance the D reg number forwards by delta within its bank
1224 * (ie increment the low 2 bits but leave the rest the same)
1225 */
1226static inline int vfp_advance_dreg(int reg, int delta)
1227{
1228 return ((reg + delta) & 0x3) | (reg & ~0x3);
1229}
1230
266bd25c
PM
1231/*
1232 * Perform a 3-operand VFP data processing instruction. fn is the
1233 * callback to do the actual operation; this function deals with the
1234 * code to handle looping around for VFP vector processing.
1235 */
1236static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1237 int vd, int vn, int vm, bool reads_vd)
1238{
1239 uint32_t delta_m = 0;
1240 uint32_t delta_d = 0;
266bd25c
PM
1241 int veclen = s->vec_len;
1242 TCGv_i32 f0, f1, fd;
1243 TCGv_ptr fpst;
1244
1245 if (!dc_isar_feature(aa32_fpshvec, s) &&
1246 (veclen != 0 || s->vec_stride != 0)) {
1247 return false;
1248 }
1249
1250 if (!vfp_access_check(s)) {
1251 return true;
1252 }
1253
1254 if (veclen > 0) {
266bd25c 1255 /* Figure out what type of vector operation this is. */
18cf951a 1256 if (vfp_sreg_is_scalar(vd)) {
266bd25c
PM
1257 /* scalar */
1258 veclen = 0;
1259 } else {
1260 delta_d = s->vec_stride + 1;
1261
18cf951a 1262 if (vfp_sreg_is_scalar(vm)) {
266bd25c
PM
1263 /* mixed scalar/vector */
1264 delta_m = 0;
1265 } else {
1266 /* vector */
1267 delta_m = delta_d;
1268 }
1269 }
1270 }
1271
1272 f0 = tcg_temp_new_i32();
1273 f1 = tcg_temp_new_i32();
1274 fd = tcg_temp_new_i32();
1275 fpst = get_fpstatus_ptr(0);
1276
1277 neon_load_reg32(f0, vn);
1278 neon_load_reg32(f1, vm);
1279
1280 for (;;) {
1281 if (reads_vd) {
1282 neon_load_reg32(fd, vd);
1283 }
1284 fn(fd, f0, f1, fpst);
1285 neon_store_reg32(fd, vd);
1286
1287 if (veclen == 0) {
1288 break;
1289 }
1290
1291 /* Set up the operands for the next iteration */
1292 veclen--;
18cf951a
PM
1293 vd = vfp_advance_sreg(vd, delta_d);
1294 vn = vfp_advance_sreg(vn, delta_d);
266bd25c
PM
1295 neon_load_reg32(f0, vn);
1296 if (delta_m) {
18cf951a 1297 vm = vfp_advance_sreg(vm, delta_m);
266bd25c
PM
1298 neon_load_reg32(f1, vm);
1299 }
1300 }
1301
1302 tcg_temp_free_i32(f0);
1303 tcg_temp_free_i32(f1);
1304 tcg_temp_free_i32(fd);
1305 tcg_temp_free_ptr(fpst);
1306
1307 return true;
1308}
1309
1310static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1311 int vd, int vn, int vm, bool reads_vd)
1312{
1313 uint32_t delta_m = 0;
1314 uint32_t delta_d = 0;
266bd25c
PM
1315 int veclen = s->vec_len;
1316 TCGv_i64 f0, f1, fd;
1317 TCGv_ptr fpst;
1318
1319 /* UNDEF accesses to D16-D31 if they don't exist */
1320 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
1321 return false;
1322 }
1323
1120827f
PM
1324 if (!dc_isar_feature(aa32_fpdp, s)) {
1325 return false;
1326 }
1327
266bd25c
PM
1328 if (!dc_isar_feature(aa32_fpshvec, s) &&
1329 (veclen != 0 || s->vec_stride != 0)) {
1330 return false;
1331 }
1332
1333 if (!vfp_access_check(s)) {
1334 return true;
1335 }
1336
1337 if (veclen > 0) {
266bd25c 1338 /* Figure out what type of vector operation this is. */
18cf951a 1339 if (vfp_dreg_is_scalar(vd)) {
266bd25c
PM
1340 /* scalar */
1341 veclen = 0;
1342 } else {
1343 delta_d = (s->vec_stride >> 1) + 1;
1344
18cf951a 1345 if (vfp_dreg_is_scalar(vm)) {
266bd25c
PM
1346 /* mixed scalar/vector */
1347 delta_m = 0;
1348 } else {
1349 /* vector */
1350 delta_m = delta_d;
1351 }
1352 }
1353 }
1354
1355 f0 = tcg_temp_new_i64();
1356 f1 = tcg_temp_new_i64();
1357 fd = tcg_temp_new_i64();
1358 fpst = get_fpstatus_ptr(0);
1359
1360 neon_load_reg64(f0, vn);
1361 neon_load_reg64(f1, vm);
1362
1363 for (;;) {
1364 if (reads_vd) {
1365 neon_load_reg64(fd, vd);
1366 }
1367 fn(fd, f0, f1, fpst);
1368 neon_store_reg64(fd, vd);
1369
1370 if (veclen == 0) {
1371 break;
1372 }
1373 /* Set up the operands for the next iteration */
1374 veclen--;
18cf951a
PM
1375 vd = vfp_advance_dreg(vd, delta_d);
1376 vn = vfp_advance_dreg(vn, delta_d);
266bd25c
PM
1377 neon_load_reg64(f0, vn);
1378 if (delta_m) {
18cf951a 1379 vm = vfp_advance_dreg(vm, delta_m);
266bd25c
PM
1380 neon_load_reg64(f1, vm);
1381 }
1382 }
1383
1384 tcg_temp_free_i64(f0);
1385 tcg_temp_free_i64(f1);
1386 tcg_temp_free_i64(fd);
1387 tcg_temp_free_ptr(fpst);
1388
1389 return true;
1390}
1391
90287e22
PM
1392static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1393{
1394 uint32_t delta_m = 0;
1395 uint32_t delta_d = 0;
90287e22
PM
1396 int veclen = s->vec_len;
1397 TCGv_i32 f0, fd;
1398
1399 if (!dc_isar_feature(aa32_fpshvec, s) &&
1400 (veclen != 0 || s->vec_stride != 0)) {
1401 return false;
1402 }
1403
1404 if (!vfp_access_check(s)) {
1405 return true;
1406 }
1407
1408 if (veclen > 0) {
90287e22 1409 /* Figure out what type of vector operation this is. */
18cf951a 1410 if (vfp_sreg_is_scalar(vd)) {
90287e22
PM
1411 /* scalar */
1412 veclen = 0;
1413 } else {
1414 delta_d = s->vec_stride + 1;
1415
18cf951a 1416 if (vfp_sreg_is_scalar(vm)) {
90287e22
PM
1417 /* mixed scalar/vector */
1418 delta_m = 0;
1419 } else {
1420 /* vector */
1421 delta_m = delta_d;
1422 }
1423 }
1424 }
1425
1426 f0 = tcg_temp_new_i32();
1427 fd = tcg_temp_new_i32();
1428
1429 neon_load_reg32(f0, vm);
1430
1431 for (;;) {
1432 fn(fd, f0);
1433 neon_store_reg32(fd, vd);
1434
1435 if (veclen == 0) {
1436 break;
1437 }
1438
1439 if (delta_m == 0) {
1440 /* single source one-many */
1441 while (veclen--) {
18cf951a 1442 vd = vfp_advance_sreg(vd, delta_d);
90287e22
PM
1443 neon_store_reg32(fd, vd);
1444 }
1445 break;
1446 }
1447
1448 /* Set up the operands for the next iteration */
1449 veclen--;
18cf951a
PM
1450 vd = vfp_advance_sreg(vd, delta_d);
1451 vm = vfp_advance_sreg(vm, delta_m);
90287e22
PM
1452 neon_load_reg32(f0, vm);
1453 }
1454
1455 tcg_temp_free_i32(f0);
1456 tcg_temp_free_i32(fd);
1457
1458 return true;
1459}
1460
1461static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1462{
1463 uint32_t delta_m = 0;
1464 uint32_t delta_d = 0;
90287e22
PM
1465 int veclen = s->vec_len;
1466 TCGv_i64 f0, fd;
1467
1468 /* UNDEF accesses to D16-D31 if they don't exist */
1469 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
1470 return false;
1471 }
1472
1120827f
PM
1473 if (!dc_isar_feature(aa32_fpdp, s)) {
1474 return false;
1475 }
1476
90287e22
PM
1477 if (!dc_isar_feature(aa32_fpshvec, s) &&
1478 (veclen != 0 || s->vec_stride != 0)) {
1479 return false;
1480 }
1481
1482 if (!vfp_access_check(s)) {
1483 return true;
1484 }
1485
1486 if (veclen > 0) {
90287e22 1487 /* Figure out what type of vector operation this is. */
18cf951a 1488 if (vfp_dreg_is_scalar(vd)) {
90287e22
PM
1489 /* scalar */
1490 veclen = 0;
1491 } else {
1492 delta_d = (s->vec_stride >> 1) + 1;
1493
18cf951a 1494 if (vfp_dreg_is_scalar(vm)) {
90287e22
PM
1495 /* mixed scalar/vector */
1496 delta_m = 0;
1497 } else {
1498 /* vector */
1499 delta_m = delta_d;
1500 }
1501 }
1502 }
1503
1504 f0 = tcg_temp_new_i64();
1505 fd = tcg_temp_new_i64();
1506
1507 neon_load_reg64(f0, vm);
1508
1509 for (;;) {
1510 fn(fd, f0);
1511 neon_store_reg64(fd, vd);
1512
1513 if (veclen == 0) {
1514 break;
1515 }
1516
1517 if (delta_m == 0) {
1518 /* single source one-many */
1519 while (veclen--) {
18cf951a 1520 vd = vfp_advance_dreg(vd, delta_d);
90287e22
PM
1521 neon_store_reg64(fd, vd);
1522 }
1523 break;
1524 }
1525
1526 /* Set up the operands for the next iteration */
1527 veclen--;
18cf951a
PM
1528 vd = vfp_advance_dreg(vd, delta_d);
1529 vd = vfp_advance_dreg(vm, delta_m);
90287e22
PM
1530 neon_load_reg64(f0, vm);
1531 }
1532
1533 tcg_temp_free_i64(f0);
1534 tcg_temp_free_i64(fd);
1535
1536 return true;
1537}
1538
266bd25c
PM
1539static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1540{
1541 /* Note that order of inputs to the add matters for NaNs */
1542 TCGv_i32 tmp = tcg_temp_new_i32();
1543
1544 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1545 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1546 tcg_temp_free_i32(tmp);
1547}
1548
1549static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1550{
1551 return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1552}
1553
1554static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1555{
1556 /* Note that order of inputs to the add matters for NaNs */
1557 TCGv_i64 tmp = tcg_temp_new_i64();
1558
1559 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1560 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1561 tcg_temp_free_i64(tmp);
1562}
1563
83655223 1564static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
266bd25c
PM
1565{
1566 return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1567}
e7258280
PM
1568
1569static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1570{
1571 /*
1572 * VMLS: vd = vd + -(vn * vm)
1573 * Note that order of inputs to the add matters for NaNs.
1574 */
1575 TCGv_i32 tmp = tcg_temp_new_i32();
1576
1577 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1578 gen_helper_vfp_negs(tmp, tmp);
1579 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1580 tcg_temp_free_i32(tmp);
1581}
1582
1583static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1584{
1585 return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1586}
1587
1588static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1589{
1590 /*
1591 * VMLS: vd = vd + -(vn * vm)
1592 * Note that order of inputs to the add matters for NaNs.
1593 */
1594 TCGv_i64 tmp = tcg_temp_new_i64();
1595
1596 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1597 gen_helper_vfp_negd(tmp, tmp);
1598 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1599 tcg_temp_free_i64(tmp);
1600}
1601
83655223 1602static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
e7258280
PM
1603{
1604 return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
1605}
c54a416c
PM
1606
1607static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1608{
1609 /*
1610 * VNMLS: -fd + (fn * fm)
1611 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1612 * plausible looking simplifications because this will give wrong results
1613 * for NaNs.
1614 */
1615 TCGv_i32 tmp = tcg_temp_new_i32();
1616
1617 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1618 gen_helper_vfp_negs(vd, vd);
1619 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1620 tcg_temp_free_i32(tmp);
1621}
1622
1623static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
1624{
1625 return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
1626}
1627
1628static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1629{
1630 /*
1631 * VNMLS: -fd + (fn * fm)
1632 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1633 * plausible looking simplifications because this will give wrong results
1634 * for NaNs.
1635 */
1636 TCGv_i64 tmp = tcg_temp_new_i64();
1637
1638 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1639 gen_helper_vfp_negd(vd, vd);
1640 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1641 tcg_temp_free_i64(tmp);
1642}
1643
83655223 1644static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
c54a416c
PM
1645{
1646 return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
1647}
8a483533
PM
1648
1649static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1650{
1651 /* VNMLA: -fd + -(fn * fm) */
1652 TCGv_i32 tmp = tcg_temp_new_i32();
1653
1654 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1655 gen_helper_vfp_negs(tmp, tmp);
1656 gen_helper_vfp_negs(vd, vd);
1657 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1658 tcg_temp_free_i32(tmp);
1659}
1660
1661static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
1662{
1663 return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
1664}
1665
1666static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1667{
1668 /* VNMLA: -fd + (fn * fm) */
1669 TCGv_i64 tmp = tcg_temp_new_i64();
1670
1671 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1672 gen_helper_vfp_negd(tmp, tmp);
1673 gen_helper_vfp_negd(vd, vd);
1674 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1675 tcg_temp_free_i64(tmp);
1676}
1677
83655223 1678static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
8a483533
PM
1679{
1680 return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
1681}
88c5188c
PM
1682
1683static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
1684{
1685 return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
1686}
1687
83655223 1688static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
88c5188c
PM
1689{
1690 return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
1691}
43c4be12
PM
1692
1693static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1694{
1695 /* VNMUL: -(fn * fm) */
1696 gen_helper_vfp_muls(vd, vn, vm, fpst);
1697 gen_helper_vfp_negs(vd, vd);
1698}
1699
1700static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
1701{
1702 return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
1703}
1704
1705static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1706{
1707 /* VNMUL: -(fn * fm) */
1708 gen_helper_vfp_muld(vd, vn, vm, fpst);
1709 gen_helper_vfp_negd(vd, vd);
1710}
1711
83655223 1712static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
43c4be12
PM
1713{
1714 return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
1715}
ce28b303
PM
1716
1717static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
1718{
1719 return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
1720}
1721
83655223 1722static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
ce28b303
PM
1723{
1724 return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
1725}
8fec9a11
PM
1726
1727static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
1728{
1729 return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
1730}
1731
83655223 1732static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
8fec9a11
PM
1733{
1734 return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
1735}
519ee7ae
PM
1736
1737static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
1738{
1739 return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
1740}
1741
83655223 1742static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
519ee7ae
PM
1743{
1744 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
1745}
d4893b01
PM
1746
1747static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
1748{
1749 /*
1750 * VFNMA : fd = muladd(-fd, fn, fm)
1751 * VFNMS : fd = muladd(-fd, -fn, fm)
1752 * VFMA : fd = muladd( fd, fn, fm)
1753 * VFMS : fd = muladd( fd, -fn, fm)
1754 *
1755 * These are fused multiply-add, and must be done as one floating
1756 * point operation with no rounding between the multiplication and
1757 * addition steps. NB that doing the negations here as separate
1758 * steps is correct : an input NaN should come out with its sign
1759 * bit flipped if it is a negated-input.
1760 */
1761 TCGv_ptr fpst;
1762 TCGv_i32 vn, vm, vd;
1763
1764 /*
1765 * Present in VFPv4 only.
1766 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1767 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1768 */
1769 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1770 (s->vec_len != 0 || s->vec_stride != 0)) {
1771 return false;
1772 }
1773
1774 if (!vfp_access_check(s)) {
1775 return true;
1776 }
1777
1778 vn = tcg_temp_new_i32();
1779 vm = tcg_temp_new_i32();
1780 vd = tcg_temp_new_i32();
1781
1782 neon_load_reg32(vn, a->vn);
1783 neon_load_reg32(vm, a->vm);
1784 if (a->o2) {
1785 /* VFNMS, VFMS */
1786 gen_helper_vfp_negs(vn, vn);
1787 }
1788 neon_load_reg32(vd, a->vd);
1789 if (a->o1 & 1) {
1790 /* VFNMA, VFNMS */
1791 gen_helper_vfp_negs(vd, vd);
1792 }
1793 fpst = get_fpstatus_ptr(0);
1794 gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
1795 neon_store_reg32(vd, a->vd);
1796
1797 tcg_temp_free_ptr(fpst);
1798 tcg_temp_free_i32(vn);
1799 tcg_temp_free_i32(vm);
1800 tcg_temp_free_i32(vd);
1801
1802 return true;
1803}
1804
83655223 1805static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
d4893b01
PM
1806{
1807 /*
1808 * VFNMA : fd = muladd(-fd, fn, fm)
1809 * VFNMS : fd = muladd(-fd, -fn, fm)
1810 * VFMA : fd = muladd( fd, fn, fm)
1811 * VFMS : fd = muladd( fd, -fn, fm)
1812 *
1813 * These are fused multiply-add, and must be done as one floating
1814 * point operation with no rounding between the multiplication and
1815 * addition steps. NB that doing the negations here as separate
1816 * steps is correct : an input NaN should come out with its sign
1817 * bit flipped if it is a negated-input.
1818 */
1819 TCGv_ptr fpst;
1820 TCGv_i64 vn, vm, vd;
1821
1822 /*
1823 * Present in VFPv4 only.
1824 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1825 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1826 */
1827 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1828 (s->vec_len != 0 || s->vec_stride != 0)) {
1829 return false;
1830 }
1831
1832 /* UNDEF accesses to D16-D31 if they don't exist. */
1833 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) {
1834 return false;
1835 }
1836
34bea4ed
PM
1837 if (!dc_isar_feature(aa32_fpdp, s)) {
1838 return false;
1839 }
1840
d4893b01
PM
1841 if (!vfp_access_check(s)) {
1842 return true;
1843 }
1844
1845 vn = tcg_temp_new_i64();
1846 vm = tcg_temp_new_i64();
1847 vd = tcg_temp_new_i64();
1848
1849 neon_load_reg64(vn, a->vn);
1850 neon_load_reg64(vm, a->vm);
1851 if (a->o2) {
1852 /* VFNMS, VFMS */
1853 gen_helper_vfp_negd(vn, vn);
1854 }
1855 neon_load_reg64(vd, a->vd);
1856 if (a->o1 & 1) {
1857 /* VFNMA, VFNMS */
1858 gen_helper_vfp_negd(vd, vd);
1859 }
1860 fpst = get_fpstatus_ptr(0);
1861 gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
1862 neon_store_reg64(vd, a->vd);
1863
1864 tcg_temp_free_ptr(fpst);
1865 tcg_temp_free_i64(vn);
1866 tcg_temp_free_i64(vm);
1867 tcg_temp_free_i64(vd);
1868
1869 return true;
1870}
b518c753
PM
1871
1872static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
1873{
1874 uint32_t delta_d = 0;
b518c753
PM
1875 int veclen = s->vec_len;
1876 TCGv_i32 fd;
9bee50b4 1877 uint32_t vd;
b518c753
PM
1878
1879 vd = a->vd;
1880
1881 if (!dc_isar_feature(aa32_fpshvec, s) &&
1882 (veclen != 0 || s->vec_stride != 0)) {
1883 return false;
1884 }
1885
1886 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1887 return false;
1888 }
1889
1890 if (!vfp_access_check(s)) {
1891 return true;
1892 }
1893
1894 if (veclen > 0) {
b518c753 1895 /* Figure out what type of vector operation this is. */
18cf951a 1896 if (vfp_sreg_is_scalar(vd)) {
b518c753
PM
1897 /* scalar */
1898 veclen = 0;
1899 } else {
1900 delta_d = s->vec_stride + 1;
1901 }
1902 }
1903
9bee50b4 1904 fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
b518c753
PM
1905
1906 for (;;) {
1907 neon_store_reg32(fd, vd);
1908
1909 if (veclen == 0) {
1910 break;
1911 }
1912
1913 /* Set up the operands for the next iteration */
1914 veclen--;
18cf951a 1915 vd = vfp_advance_sreg(vd, delta_d);
b518c753
PM
1916 }
1917
1918 tcg_temp_free_i32(fd);
1919 return true;
1920}
1921
1922static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
1923{
1924 uint32_t delta_d = 0;
b518c753
PM
1925 int veclen = s->vec_len;
1926 TCGv_i64 fd;
9bee50b4 1927 uint32_t vd;
b518c753
PM
1928
1929 vd = a->vd;
1930
1931 /* UNDEF accesses to D16-D31 if they don't exist. */
1932 if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) {
1933 return false;
1934 }
1935
1120827f
PM
1936 if (!dc_isar_feature(aa32_fpdp, s)) {
1937 return false;
1938 }
1939
b518c753
PM
1940 if (!dc_isar_feature(aa32_fpshvec, s) &&
1941 (veclen != 0 || s->vec_stride != 0)) {
1942 return false;
1943 }
1944
1945 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1946 return false;
1947 }
1948
1949 if (!vfp_access_check(s)) {
1950 return true;
1951 }
1952
1953 if (veclen > 0) {
b518c753 1954 /* Figure out what type of vector operation this is. */
18cf951a 1955 if (vfp_dreg_is_scalar(vd)) {
b518c753
PM
1956 /* scalar */
1957 veclen = 0;
1958 } else {
1959 delta_d = (s->vec_stride >> 1) + 1;
1960 }
1961 }
1962
9bee50b4 1963 fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
b518c753
PM
1964
1965 for (;;) {
1966 neon_store_reg64(fd, vd);
1967
1968 if (veclen == 0) {
1969 break;
1970 }
1971
1972 /* Set up the operands for the next iteration */
1973 veclen--;
89a11ff7 1974 vd = vfp_advance_dreg(vd, delta_d);
b518c753
PM
1975 }
1976
1977 tcg_temp_free_i64(fd);
1978 return true;
1979}
90287e22 1980
17552b97
PM
1981static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a)
1982{
1983 return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm);
1984}
1985
1986static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a)
1987{
1988 return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm);
1989}
1990
90287e22
PM
1991static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
1992{
1993 return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
1994}
1995
1996static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
1997{
1998 return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
1999}
1882651a
PM
2000
2001static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a)
2002{
2003 return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm);
2004}
2005
2006static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
2007{
2008 return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
2009}
b8474540
PM
2010
2011static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
2012{
2013 gen_helper_vfp_sqrts(vd, vm, cpu_env);
2014}
2015
2016static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a)
2017{
2018 return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm);
2019}
2020
2021static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
2022{
2023 gen_helper_vfp_sqrtd(vd, vm, cpu_env);
2024}
2025
2026static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
2027{
2028 return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
2029}
386bba23
PM
2030
2031static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
2032{
2033 TCGv_i32 vd, vm;
2034
2035 /* Vm/M bits must be zero for the Z variant */
2036 if (a->z && a->vm != 0) {
2037 return false;
2038 }
2039
2040 if (!vfp_access_check(s)) {
2041 return true;
2042 }
2043
2044 vd = tcg_temp_new_i32();
2045 vm = tcg_temp_new_i32();
2046
2047 neon_load_reg32(vd, a->vd);
2048 if (a->z) {
2049 tcg_gen_movi_i32(vm, 0);
2050 } else {
2051 neon_load_reg32(vm, a->vm);
2052 }
2053
2054 if (a->e) {
2055 gen_helper_vfp_cmpes(vd, vm, cpu_env);
2056 } else {
2057 gen_helper_vfp_cmps(vd, vm, cpu_env);
2058 }
2059
2060 tcg_temp_free_i32(vd);
2061 tcg_temp_free_i32(vm);
2062
2063 return true;
2064}
2065
2066static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2067{
2068 TCGv_i64 vd, vm;
2069
2070 /* Vm/M bits must be zero for the Z variant */
2071 if (a->z && a->vm != 0) {
2072 return false;
2073 }
2074
2075 /* UNDEF accesses to D16-D31 if they don't exist. */
2076 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2077 return false;
2078 }
2079
1120827f
PM
2080 if (!dc_isar_feature(aa32_fpdp, s)) {
2081 return false;
2082 }
2083
386bba23
PM
2084 if (!vfp_access_check(s)) {
2085 return true;
2086 }
2087
2088 vd = tcg_temp_new_i64();
2089 vm = tcg_temp_new_i64();
2090
2091 neon_load_reg64(vd, a->vd);
2092 if (a->z) {
2093 tcg_gen_movi_i64(vm, 0);
2094 } else {
2095 neon_load_reg64(vm, a->vm);
2096 }
2097
2098 if (a->e) {
2099 gen_helper_vfp_cmped(vd, vm, cpu_env);
2100 } else {
2101 gen_helper_vfp_cmpd(vd, vm, cpu_env);
2102 }
2103
2104 tcg_temp_free_i64(vd);
2105 tcg_temp_free_i64(vm);
2106
2107 return true;
2108}
b623d803
PM
2109
2110static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2111{
2112 TCGv_ptr fpst;
2113 TCGv_i32 ahp_mode;
2114 TCGv_i32 tmp;
2115
2116 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2117 return false;
2118 }
2119
2120 if (!vfp_access_check(s)) {
2121 return true;
2122 }
2123
2124 fpst = get_fpstatus_ptr(false);
2125 ahp_mode = get_ahp_flag();
2126 tmp = tcg_temp_new_i32();
2127 /* The T bit tells us if we want the low or high 16 bits of Vm */
2128 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2129 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2130 neon_store_reg32(tmp, a->vd);
2131 tcg_temp_free_i32(ahp_mode);
2132 tcg_temp_free_ptr(fpst);
2133 tcg_temp_free_i32(tmp);
2134 return true;
2135}
2136
2137static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2138{
2139 TCGv_ptr fpst;
2140 TCGv_i32 ahp_mode;
2141 TCGv_i32 tmp;
2142 TCGv_i64 vd;
2143
2144 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2145 return false;
2146 }
2147
2148 /* UNDEF accesses to D16-D31 if they don't exist. */
2149 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2150 return false;
2151 }
2152
1120827f
PM
2153 if (!dc_isar_feature(aa32_fpdp, s)) {
2154 return false;
2155 }
2156
b623d803
PM
2157 if (!vfp_access_check(s)) {
2158 return true;
2159 }
2160
2161 fpst = get_fpstatus_ptr(false);
2162 ahp_mode = get_ahp_flag();
2163 tmp = tcg_temp_new_i32();
2164 /* The T bit tells us if we want the low or high 16 bits of Vm */
2165 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2166 vd = tcg_temp_new_i64();
2167 gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2168 neon_store_reg64(vd, a->vd);
2169 tcg_temp_free_i32(ahp_mode);
2170 tcg_temp_free_ptr(fpst);
2171 tcg_temp_free_i32(tmp);
2172 tcg_temp_free_i64(vd);
2173 return true;
2174}
cdfd14e8
PM
2175
2176static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2177{
2178 TCGv_ptr fpst;
2179 TCGv_i32 ahp_mode;
2180 TCGv_i32 tmp;
2181
2182 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2183 return false;
2184 }
2185
2186 if (!vfp_access_check(s)) {
2187 return true;
2188 }
2189
2190 fpst = get_fpstatus_ptr(false);
2191 ahp_mode = get_ahp_flag();
2192 tmp = tcg_temp_new_i32();
2193
2194 neon_load_reg32(tmp, a->vm);
2195 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2196 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2197 tcg_temp_free_i32(ahp_mode);
2198 tcg_temp_free_ptr(fpst);
2199 tcg_temp_free_i32(tmp);
2200 return true;
2201}
2202
2203static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2204{
2205 TCGv_ptr fpst;
2206 TCGv_i32 ahp_mode;
2207 TCGv_i32 tmp;
2208 TCGv_i64 vm;
2209
2210 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2211 return false;
2212 }
2213
2214 /* UNDEF accesses to D16-D31 if they don't exist. */
2215 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2216 return false;
2217 }
2218
1120827f
PM
2219 if (!dc_isar_feature(aa32_fpdp, s)) {
2220 return false;
2221 }
2222
cdfd14e8
PM
2223 if (!vfp_access_check(s)) {
2224 return true;
2225 }
2226
2227 fpst = get_fpstatus_ptr(false);
2228 ahp_mode = get_ahp_flag();
2229 tmp = tcg_temp_new_i32();
2230 vm = tcg_temp_new_i64();
2231
2232 neon_load_reg64(vm, a->vm);
2233 gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2234 tcg_temp_free_i64(vm);
2235 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2236 tcg_temp_free_i32(ahp_mode);
2237 tcg_temp_free_ptr(fpst);
2238 tcg_temp_free_i32(tmp);
2239 return true;
2240}
e25155f5
PM
2241
2242static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2243{
2244 TCGv_ptr fpst;
2245 TCGv_i32 tmp;
2246
2247 if (!dc_isar_feature(aa32_vrint, s)) {
2248 return false;
2249 }
2250
2251 if (!vfp_access_check(s)) {
2252 return true;
2253 }
2254
2255 tmp = tcg_temp_new_i32();
2256 neon_load_reg32(tmp, a->vm);
2257 fpst = get_fpstatus_ptr(false);
2258 gen_helper_rints(tmp, tmp, fpst);
2259 neon_store_reg32(tmp, a->vd);
2260 tcg_temp_free_ptr(fpst);
2261 tcg_temp_free_i32(tmp);
2262 return true;
2263}
2264
83655223 2265static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
e25155f5
PM
2266{
2267 TCGv_ptr fpst;
2268 TCGv_i64 tmp;
2269
2270 if (!dc_isar_feature(aa32_vrint, s)) {
2271 return false;
2272 }
2273
2274 /* UNDEF accesses to D16-D31 if they don't exist. */
2275 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2276 return false;
2277 }
2278
1120827f
PM
2279 if (!dc_isar_feature(aa32_fpdp, s)) {
2280 return false;
2281 }
2282
e25155f5
PM
2283 if (!vfp_access_check(s)) {
2284 return true;
2285 }
2286
2287 tmp = tcg_temp_new_i64();
2288 neon_load_reg64(tmp, a->vm);
2289 fpst = get_fpstatus_ptr(false);
2290 gen_helper_rintd(tmp, tmp, fpst);
2291 neon_store_reg64(tmp, a->vd);
2292 tcg_temp_free_ptr(fpst);
2293 tcg_temp_free_i64(tmp);
2294 return true;
2295}
2296
2297static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
2298{
2299 TCGv_ptr fpst;
2300 TCGv_i32 tmp;
2301 TCGv_i32 tcg_rmode;
2302
2303 if (!dc_isar_feature(aa32_vrint, s)) {
2304 return false;
2305 }
2306
2307 if (!vfp_access_check(s)) {
2308 return true;
2309 }
2310
2311 tmp = tcg_temp_new_i32();
2312 neon_load_reg32(tmp, a->vm);
2313 fpst = get_fpstatus_ptr(false);
2314 tcg_rmode = tcg_const_i32(float_round_to_zero);
2315 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2316 gen_helper_rints(tmp, tmp, fpst);
2317 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2318 neon_store_reg32(tmp, a->vd);
2319 tcg_temp_free_ptr(fpst);
2320 tcg_temp_free_i32(tcg_rmode);
2321 tcg_temp_free_i32(tmp);
2322 return true;
2323}
2324
83655223 2325static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
e25155f5
PM
2326{
2327 TCGv_ptr fpst;
2328 TCGv_i64 tmp;
2329 TCGv_i32 tcg_rmode;
2330
2331 if (!dc_isar_feature(aa32_vrint, s)) {
2332 return false;
2333 }
2334
2335 /* UNDEF accesses to D16-D31 if they don't exist. */
2336 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2337 return false;
2338 }
2339
1120827f
PM
2340 if (!dc_isar_feature(aa32_fpdp, s)) {
2341 return false;
2342 }
2343
e25155f5
PM
2344 if (!vfp_access_check(s)) {
2345 return true;
2346 }
2347
2348 tmp = tcg_temp_new_i64();
2349 neon_load_reg64(tmp, a->vm);
2350 fpst = get_fpstatus_ptr(false);
2351 tcg_rmode = tcg_const_i32(float_round_to_zero);
2352 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2353 gen_helper_rintd(tmp, tmp, fpst);
2354 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2355 neon_store_reg64(tmp, a->vd);
2356 tcg_temp_free_ptr(fpst);
2357 tcg_temp_free_i64(tmp);
2358 tcg_temp_free_i32(tcg_rmode);
2359 return true;
2360}
2361
2362static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
2363{
2364 TCGv_ptr fpst;
2365 TCGv_i32 tmp;
2366
2367 if (!dc_isar_feature(aa32_vrint, s)) {
2368 return false;
2369 }
2370
2371 if (!vfp_access_check(s)) {
2372 return true;
2373 }
2374
2375 tmp = tcg_temp_new_i32();
2376 neon_load_reg32(tmp, a->vm);
2377 fpst = get_fpstatus_ptr(false);
2378 gen_helper_rints_exact(tmp, tmp, fpst);
2379 neon_store_reg32(tmp, a->vd);
2380 tcg_temp_free_ptr(fpst);
2381 tcg_temp_free_i32(tmp);
2382 return true;
2383}
2384
2385static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
2386{
2387 TCGv_ptr fpst;
2388 TCGv_i64 tmp;
2389
2390 if (!dc_isar_feature(aa32_vrint, s)) {
2391 return false;
2392 }
2393
2394 /* UNDEF accesses to D16-D31 if they don't exist. */
2395 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2396 return false;
2397 }
2398
1120827f
PM
2399 if (!dc_isar_feature(aa32_fpdp, s)) {
2400 return false;
2401 }
2402
e25155f5
PM
2403 if (!vfp_access_check(s)) {
2404 return true;
2405 }
2406
2407 tmp = tcg_temp_new_i64();
2408 neon_load_reg64(tmp, a->vm);
2409 fpst = get_fpstatus_ptr(false);
2410 gen_helper_rintd_exact(tmp, tmp, fpst);
2411 neon_store_reg64(tmp, a->vd);
2412 tcg_temp_free_ptr(fpst);
2413 tcg_temp_free_i64(tmp);
2414 return true;
2415}
6ed7e49c
PM
2416
2417static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
2418{
2419 TCGv_i64 vd;
2420 TCGv_i32 vm;
2421
2422 /* UNDEF accesses to D16-D31 if they don't exist. */
2423 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2424 return false;
2425 }
2426
1120827f
PM
2427 if (!dc_isar_feature(aa32_fpdp, s)) {
2428 return false;
2429 }
2430
6ed7e49c
PM
2431 if (!vfp_access_check(s)) {
2432 return true;
2433 }
2434
2435 vm = tcg_temp_new_i32();
2436 vd = tcg_temp_new_i64();
2437 neon_load_reg32(vm, a->vm);
2438 gen_helper_vfp_fcvtds(vd, vm, cpu_env);
2439 neon_store_reg64(vd, a->vd);
2440 tcg_temp_free_i32(vm);
2441 tcg_temp_free_i64(vd);
2442 return true;
2443}
2444
2445static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
2446{
2447 TCGv_i64 vm;
2448 TCGv_i32 vd;
2449
2450 /* UNDEF accesses to D16-D31 if they don't exist. */
2451 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2452 return false;
2453 }
2454
1120827f
PM
2455 if (!dc_isar_feature(aa32_fpdp, s)) {
2456 return false;
2457 }
2458
6ed7e49c
PM
2459 if (!vfp_access_check(s)) {
2460 return true;
2461 }
2462
2463 vd = tcg_temp_new_i32();
2464 vm = tcg_temp_new_i64();
2465 neon_load_reg64(vm, a->vm);
2466 gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
2467 neon_store_reg32(vd, a->vd);
2468 tcg_temp_free_i32(vd);
2469 tcg_temp_free_i64(vm);
2470 return true;
2471}
8fc9d891
PM
2472
2473static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
2474{
2475 TCGv_i32 vm;
2476 TCGv_ptr fpst;
2477
2478 if (!vfp_access_check(s)) {
2479 return true;
2480 }
2481
2482 vm = tcg_temp_new_i32();
2483 neon_load_reg32(vm, a->vm);
2484 fpst = get_fpstatus_ptr(false);
2485 if (a->s) {
2486 /* i32 -> f32 */
2487 gen_helper_vfp_sitos(vm, vm, fpst);
2488 } else {
2489 /* u32 -> f32 */
2490 gen_helper_vfp_uitos(vm, vm, fpst);
2491 }
2492 neon_store_reg32(vm, a->vd);
2493 tcg_temp_free_i32(vm);
2494 tcg_temp_free_ptr(fpst);
2495 return true;
2496}
2497
2498static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
2499{
2500 TCGv_i32 vm;
2501 TCGv_i64 vd;
2502 TCGv_ptr fpst;
2503
2504 /* UNDEF accesses to D16-D31 if they don't exist. */
2505 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2506 return false;
2507 }
2508
1120827f
PM
2509 if (!dc_isar_feature(aa32_fpdp, s)) {
2510 return false;
2511 }
2512
8fc9d891
PM
2513 if (!vfp_access_check(s)) {
2514 return true;
2515 }
2516
2517 vm = tcg_temp_new_i32();
2518 vd = tcg_temp_new_i64();
2519 neon_load_reg32(vm, a->vm);
2520 fpst = get_fpstatus_ptr(false);
2521 if (a->s) {
2522 /* i32 -> f64 */
2523 gen_helper_vfp_sitod(vd, vm, fpst);
2524 } else {
2525 /* u32 -> f64 */
2526 gen_helper_vfp_uitod(vd, vm, fpst);
2527 }
2528 neon_store_reg64(vd, a->vd);
2529 tcg_temp_free_i32(vm);
2530 tcg_temp_free_i64(vd);
2531 tcg_temp_free_ptr(fpst);
2532 return true;
2533}
92073e94
PM
2534
2535static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
2536{
2537 TCGv_i32 vd;
2538 TCGv_i64 vm;
2539
2540 if (!dc_isar_feature(aa32_jscvt, s)) {
2541 return false;
2542 }
2543
2544 /* UNDEF accesses to D16-D31 if they don't exist. */
2545 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2546 return false;
2547 }
2548
1120827f
PM
2549 if (!dc_isar_feature(aa32_fpdp, s)) {
2550 return false;
2551 }
2552
92073e94
PM
2553 if (!vfp_access_check(s)) {
2554 return true;
2555 }
2556
2557 vm = tcg_temp_new_i64();
2558 vd = tcg_temp_new_i32();
2559 neon_load_reg64(vm, a->vm);
2560 gen_helper_vjcvt(vd, vm, cpu_env);
2561 neon_store_reg32(vd, a->vd);
2562 tcg_temp_free_i64(vm);
2563 tcg_temp_free_i32(vd);
2564 return true;
2565}
e3d6f429
PM
2566
2567static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
2568{
2569 TCGv_i32 vd, shift;
2570 TCGv_ptr fpst;
2571 int frac_bits;
2572
2573 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2574 return false;
2575 }
2576
2577 if (!vfp_access_check(s)) {
2578 return true;
2579 }
2580
2581 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2582
2583 vd = tcg_temp_new_i32();
2584 neon_load_reg32(vd, a->vd);
2585
2586 fpst = get_fpstatus_ptr(false);
2587 shift = tcg_const_i32(frac_bits);
2588
2589 /* Switch on op:U:sx bits */
2590 switch (a->opc) {
2591 case 0:
2592 gen_helper_vfp_shtos(vd, vd, shift, fpst);
2593 break;
2594 case 1:
2595 gen_helper_vfp_sltos(vd, vd, shift, fpst);
2596 break;
2597 case 2:
2598 gen_helper_vfp_uhtos(vd, vd, shift, fpst);
2599 break;
2600 case 3:
2601 gen_helper_vfp_ultos(vd, vd, shift, fpst);
2602 break;
2603 case 4:
2604 gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
2605 break;
2606 case 5:
2607 gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
2608 break;
2609 case 6:
2610 gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
2611 break;
2612 case 7:
2613 gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
2614 break;
2615 default:
2616 g_assert_not_reached();
2617 }
2618
2619 neon_store_reg32(vd, a->vd);
2620 tcg_temp_free_i32(vd);
2621 tcg_temp_free_i32(shift);
2622 tcg_temp_free_ptr(fpst);
2623 return true;
2624}
2625
2626static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
2627{
2628 TCGv_i64 vd;
2629 TCGv_i32 shift;
2630 TCGv_ptr fpst;
2631 int frac_bits;
2632
2633 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2634 return false;
2635 }
2636
2637 /* UNDEF accesses to D16-D31 if they don't exist. */
2638 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2639 return false;
2640 }
2641
1120827f
PM
2642 if (!dc_isar_feature(aa32_fpdp, s)) {
2643 return false;
2644 }
2645
e3d6f429
PM
2646 if (!vfp_access_check(s)) {
2647 return true;
2648 }
2649
2650 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2651
2652 vd = tcg_temp_new_i64();
2653 neon_load_reg64(vd, a->vd);
2654
2655 fpst = get_fpstatus_ptr(false);
2656 shift = tcg_const_i32(frac_bits);
2657
2658 /* Switch on op:U:sx bits */
2659 switch (a->opc) {
2660 case 0:
2661 gen_helper_vfp_shtod(vd, vd, shift, fpst);
2662 break;
2663 case 1:
2664 gen_helper_vfp_sltod(vd, vd, shift, fpst);
2665 break;
2666 case 2:
2667 gen_helper_vfp_uhtod(vd, vd, shift, fpst);
2668 break;
2669 case 3:
2670 gen_helper_vfp_ultod(vd, vd, shift, fpst);
2671 break;
2672 case 4:
2673 gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
2674 break;
2675 case 5:
2676 gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
2677 break;
2678 case 6:
2679 gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
2680 break;
2681 case 7:
2682 gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
2683 break;
2684 default:
2685 g_assert_not_reached();
2686 }
2687
2688 neon_store_reg64(vd, a->vd);
2689 tcg_temp_free_i64(vd);
2690 tcg_temp_free_i32(shift);
2691 tcg_temp_free_ptr(fpst);
2692 return true;
2693}
3111bfc2
PM
2694
2695static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
2696{
2697 TCGv_i32 vm;
2698 TCGv_ptr fpst;
2699
2700 if (!vfp_access_check(s)) {
2701 return true;
2702 }
2703
2704 fpst = get_fpstatus_ptr(false);
2705 vm = tcg_temp_new_i32();
2706 neon_load_reg32(vm, a->vm);
2707
2708 if (a->s) {
2709 if (a->rz) {
2710 gen_helper_vfp_tosizs(vm, vm, fpst);
2711 } else {
2712 gen_helper_vfp_tosis(vm, vm, fpst);
2713 }
2714 } else {
2715 if (a->rz) {
2716 gen_helper_vfp_touizs(vm, vm, fpst);
2717 } else {
2718 gen_helper_vfp_touis(vm, vm, fpst);
2719 }
2720 }
2721 neon_store_reg32(vm, a->vd);
2722 tcg_temp_free_i32(vm);
2723 tcg_temp_free_ptr(fpst);
2724 return true;
2725}
2726
2727static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
2728{
2729 TCGv_i32 vd;
2730 TCGv_i64 vm;
2731 TCGv_ptr fpst;
2732
2733 /* UNDEF accesses to D16-D31 if they don't exist. */
2734 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2735 return false;
2736 }
2737
1120827f
PM
2738 if (!dc_isar_feature(aa32_fpdp, s)) {
2739 return false;
2740 }
2741
3111bfc2
PM
2742 if (!vfp_access_check(s)) {
2743 return true;
2744 }
2745
2746 fpst = get_fpstatus_ptr(false);
2747 vm = tcg_temp_new_i64();
2748 vd = tcg_temp_new_i32();
2749 neon_load_reg64(vm, a->vm);
2750
2751 if (a->s) {
2752 if (a->rz) {
2753 gen_helper_vfp_tosizd(vd, vm, fpst);
2754 } else {
2755 gen_helper_vfp_tosid(vd, vm, fpst);
2756 }
2757 } else {
2758 if (a->rz) {
2759 gen_helper_vfp_touizd(vd, vm, fpst);
2760 } else {
2761 gen_helper_vfp_touid(vd, vm, fpst);
2762 }
2763 }
2764 neon_store_reg32(vd, a->vd);
2765 tcg_temp_free_i32(vd);
2766 tcg_temp_free_i64(vm);
2767 tcg_temp_free_ptr(fpst);
2768 return true;
2769}