4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "tcg-op-gvec.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
31 #include "exec/semihost.h"
32 #include "exec/gen-icount.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
40 static TCGv_i64 cpu_X
[32];
41 static TCGv_i64 cpu_pc
;
43 /* Load/store exclusive handling */
44 static TCGv_i64 cpu_exclusive_high
;
45 static TCGv_i64
cpu_reg(DisasContext
*s
, int reg
);
47 static const char *regnames
[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
55 A64_SHIFT_TYPE_LSL
= 0,
56 A64_SHIFT_TYPE_LSR
= 1,
57 A64_SHIFT_TYPE_ASR
= 2,
58 A64_SHIFT_TYPE_ROR
= 3
61 /* Table based decoder typedefs - used when the relevant bits for decode
62 * are too awkwardly scattered across the instruction (eg SIMD).
64 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
66 typedef struct AArch64DecodeTable
{
69 AArch64DecodeFn
*disas_fn
;
72 /* Function prototype for gen_ functions for calling Neon helpers */
73 typedef void NeonGenOneOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
);
74 typedef void NeonGenTwoOpFn(TCGv_i32
, TCGv_i32
, TCGv_i32
);
75 typedef void NeonGenTwoOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
);
76 typedef void NeonGenTwo64OpFn(TCGv_i64
, TCGv_i64
, TCGv_i64
);
77 typedef void NeonGenTwo64OpEnvFn(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
);
78 typedef void NeonGenNarrowFn(TCGv_i32
, TCGv_i64
);
79 typedef void NeonGenNarrowEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i64
);
80 typedef void NeonGenWidenFn(TCGv_i64
, TCGv_i32
);
81 typedef void NeonGenTwoSingleOPFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
82 typedef void NeonGenTwoDoubleOPFn(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_ptr
);
83 typedef void NeonGenOneOpFn(TCGv_i64
, TCGv_i64
);
84 typedef void CryptoTwoOpFn(TCGv_ptr
, TCGv_ptr
);
85 typedef void CryptoThreeOpIntFn(TCGv_ptr
, TCGv_ptr
, TCGv_i32
);
86 typedef void CryptoThreeOpFn(TCGv_ptr
, TCGv_ptr
, TCGv_ptr
);
87 typedef void AtomicThreeOpFn(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGArg
, TCGMemOp
);
89 /* Note that the gvec expanders operate on offsets + sizes. */
90 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
91 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
93 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
94 uint32_t, uint32_t, uint32_t);
96 /* initialize TCG globals. */
97 void a64_translate_init(void)
101 cpu_pc
= tcg_global_mem_new_i64(cpu_env
,
102 offsetof(CPUARMState
, pc
),
104 for (i
= 0; i
< 32; i
++) {
105 cpu_X
[i
] = tcg_global_mem_new_i64(cpu_env
,
106 offsetof(CPUARMState
, xregs
[i
]),
110 cpu_exclusive_high
= tcg_global_mem_new_i64(cpu_env
,
111 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
114 static inline int get_a64_user_mem_index(DisasContext
*s
)
116 /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
117 * if EL1, access as if EL0; otherwise access at current EL
121 switch (s
->mmu_idx
) {
122 case ARMMMUIdx_S12NSE1
:
123 useridx
= ARMMMUIdx_S12NSE0
;
125 case ARMMMUIdx_S1SE1
:
126 useridx
= ARMMMUIdx_S1SE0
;
129 g_assert_not_reached();
131 useridx
= s
->mmu_idx
;
134 return arm_to_core_mmu_idx(useridx
);
137 void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
,
138 fprintf_function cpu_fprintf
, int flags
)
140 ARMCPU
*cpu
= ARM_CPU(cs
);
141 CPUARMState
*env
= &cpu
->env
;
142 uint32_t psr
= pstate_read(env
);
144 int el
= arm_current_el(env
);
145 const char *ns_status
;
147 cpu_fprintf(f
, "PC=%016"PRIx64
" SP=%016"PRIx64
"\n",
148 env
->pc
, env
->xregs
[31]);
149 for (i
= 0; i
< 31; i
++) {
150 cpu_fprintf(f
, "X%02d=%016"PRIx64
, i
, env
->xregs
[i
]);
152 cpu_fprintf(f
, "\n");
158 if (arm_feature(env
, ARM_FEATURE_EL3
) && el
!= 3) {
159 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
164 cpu_fprintf(f
, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
166 psr
& PSTATE_N
? 'N' : '-',
167 psr
& PSTATE_Z
? 'Z' : '-',
168 psr
& PSTATE_C
? 'C' : '-',
169 psr
& PSTATE_V
? 'V' : '-',
172 psr
& PSTATE_SP
? 'h' : 't');
174 if (flags
& CPU_DUMP_FPU
) {
176 for (i
= 0; i
< numvfpregs
; i
++) {
177 uint64_t *q
= aa64_vfp_qreg(env
, i
);
180 cpu_fprintf(f
, "q%02d=%016" PRIx64
":%016" PRIx64
"%c",
181 i
, vhi
, vlo
, (i
& 1 ? '\n' : ' '));
183 cpu_fprintf(f
, "FPCR: %08x FPSR: %08x\n",
184 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
188 void gen_a64_set_pc_im(uint64_t val
)
190 tcg_gen_movi_i64(cpu_pc
, val
);
193 /* Load the PC from a generic TCG variable.
195 * If address tagging is enabled via the TCR TBI bits, then loading
196 * an address into the PC will clear out any tag in the it:
197 * + for EL2 and EL3 there is only one TBI bit, and if it is set
198 * then the address is zero-extended, clearing bits [63:56]
199 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
200 * and TBI1 controls addressses with bit 55 == 1.
201 * If the appropriate TBI bit is set for the address then
202 * the address is sign-extended from bit 55 into bits [63:56]
204 * We can avoid doing this for relative-branches, because the
205 * PC + offset can never overflow into the tag bits (assuming
206 * that virtual addresses are less than 56 bits wide, as they
207 * are currently), but we must handle it for branch-to-register.
209 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
212 if (s
->current_el
<= 1) {
213 /* Test if NEITHER or BOTH TBI values are set. If so, no need to
214 * examine bit 55 of address, can just generate code.
215 * If mixed, then test via generated code
217 if (s
->tbi0
&& s
->tbi1
) {
218 TCGv_i64 tmp_reg
= tcg_temp_new_i64();
219 /* Both bits set, sign extension from bit 55 into [63:56] will
222 tcg_gen_shli_i64(tmp_reg
, src
, 8);
223 tcg_gen_sari_i64(cpu_pc
, tmp_reg
, 8);
224 tcg_temp_free_i64(tmp_reg
);
225 } else if (!s
->tbi0
&& !s
->tbi1
) {
226 /* Neither bit set, just load it as-is */
227 tcg_gen_mov_i64(cpu_pc
, src
);
229 TCGv_i64 tcg_tmpval
= tcg_temp_new_i64();
230 TCGv_i64 tcg_bit55
= tcg_temp_new_i64();
231 TCGv_i64 tcg_zero
= tcg_const_i64(0);
233 tcg_gen_andi_i64(tcg_bit55
, src
, (1ull << 55));
236 /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
237 tcg_gen_andi_i64(tcg_tmpval
, src
,
238 0x00FFFFFFFFFFFFFFull
);
239 tcg_gen_movcond_i64(TCG_COND_EQ
, cpu_pc
, tcg_bit55
, tcg_zero
,
242 /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
243 tcg_gen_ori_i64(tcg_tmpval
, src
,
244 0xFF00000000000000ull
);
245 tcg_gen_movcond_i64(TCG_COND_NE
, cpu_pc
, tcg_bit55
, tcg_zero
,
248 tcg_temp_free_i64(tcg_zero
);
249 tcg_temp_free_i64(tcg_bit55
);
250 tcg_temp_free_i64(tcg_tmpval
);
252 } else { /* EL > 1 */
254 /* Force tag byte to all zero */
255 tcg_gen_andi_i64(cpu_pc
, src
, 0x00FFFFFFFFFFFFFFull
);
257 /* Load unmodified address */
258 tcg_gen_mov_i64(cpu_pc
, src
);
263 typedef struct DisasCompare64
{
268 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
272 arm_test_cc(&c32
, cc
);
274 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
275 * properly. The NE/EQ comparisons are also fine with this choice. */
276 c64
->cond
= c32
.cond
;
277 c64
->value
= tcg_temp_new_i64();
278 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
283 static void a64_free_cc(DisasCompare64
*c64
)
285 tcg_temp_free_i64(c64
->value
);
288 static void gen_exception_internal(int excp
)
290 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
292 assert(excp_is_internal(excp
));
293 gen_helper_exception_internal(cpu_env
, tcg_excp
);
294 tcg_temp_free_i32(tcg_excp
);
297 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
299 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
300 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
301 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
303 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
305 tcg_temp_free_i32(tcg_el
);
306 tcg_temp_free_i32(tcg_syn
);
307 tcg_temp_free_i32(tcg_excp
);
310 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
312 gen_a64_set_pc_im(s
->pc
- offset
);
313 gen_exception_internal(excp
);
314 s
->base
.is_jmp
= DISAS_NORETURN
;
317 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
318 uint32_t syndrome
, uint32_t target_el
)
320 gen_a64_set_pc_im(s
->pc
- offset
);
321 gen_exception(excp
, syndrome
, target_el
);
322 s
->base
.is_jmp
= DISAS_NORETURN
;
325 static void gen_exception_bkpt_insn(DisasContext
*s
, int offset
,
330 gen_a64_set_pc_im(s
->pc
- offset
);
331 tcg_syn
= tcg_const_i32(syndrome
);
332 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
333 tcg_temp_free_i32(tcg_syn
);
334 s
->base
.is_jmp
= DISAS_NORETURN
;
337 static void gen_ss_advance(DisasContext
*s
)
339 /* If the singlestep state is Active-not-pending, advance to
344 gen_helper_clear_pstate_ss(cpu_env
);
348 static void gen_step_complete_exception(DisasContext
*s
)
350 /* We just completed step of an insn. Move from Active-not-pending
351 * to Active-pending, and then also take the swstep exception.
352 * This corresponds to making the (IMPDEF) choice to prioritize
353 * swstep exceptions over asynchronous exceptions taken to an exception
354 * level where debug is disabled. This choice has the advantage that
355 * we do not need to maintain internal state corresponding to the
356 * ISV/EX syndrome bits between completion of the step and generation
357 * of the exception, and our syndrome information is always correct.
360 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
361 default_exception_el(s
));
362 s
->base
.is_jmp
= DISAS_NORETURN
;
365 static inline bool use_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
367 /* No direct tb linking with singlestep (either QEMU's or the ARM
368 * debug architecture kind) or deterministic io
370 if (s
->base
.singlestep_enabled
|| s
->ss_active
||
371 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
)) {
375 #ifndef CONFIG_USER_ONLY
376 /* Only link tbs from inside the same guest page */
377 if ((s
->base
.tb
->pc
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
385 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
387 TranslationBlock
*tb
;
390 if (use_goto_tb(s
, n
, dest
)) {
392 gen_a64_set_pc_im(dest
);
393 tcg_gen_exit_tb((intptr_t)tb
+ n
);
394 s
->base
.is_jmp
= DISAS_NORETURN
;
396 gen_a64_set_pc_im(dest
);
398 gen_step_complete_exception(s
);
399 } else if (s
->base
.singlestep_enabled
) {
400 gen_exception_internal(EXCP_DEBUG
);
402 tcg_gen_lookup_and_goto_ptr();
403 s
->base
.is_jmp
= DISAS_NORETURN
;
408 static void unallocated_encoding(DisasContext
*s
)
410 /* Unallocated and reserved encodings are uncategorized */
411 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
412 default_exception_el(s
));
415 #define unsupported_encoding(s, insn) \
417 qemu_log_mask(LOG_UNIMP, \
418 "%s:%d: unsupported instruction encoding 0x%08x " \
419 "at pc=%016" PRIx64 "\n", \
420 __FILE__, __LINE__, insn, s->pc - 4); \
421 unallocated_encoding(s); \
424 static void init_tmp_a64_array(DisasContext
*s
)
426 #ifdef CONFIG_DEBUG_TCG
427 memset(s
->tmp_a64
, 0, sizeof(s
->tmp_a64
));
429 s
->tmp_a64_count
= 0;
432 static void free_tmp_a64(DisasContext
*s
)
435 for (i
= 0; i
< s
->tmp_a64_count
; i
++) {
436 tcg_temp_free_i64(s
->tmp_a64
[i
]);
438 init_tmp_a64_array(s
);
441 static TCGv_i64
new_tmp_a64(DisasContext
*s
)
443 assert(s
->tmp_a64_count
< TMP_A64_MAX
);
444 return s
->tmp_a64
[s
->tmp_a64_count
++] = tcg_temp_new_i64();
447 static TCGv_i64
new_tmp_a64_zero(DisasContext
*s
)
449 TCGv_i64 t
= new_tmp_a64(s
);
450 tcg_gen_movi_i64(t
, 0);
455 * Register access functions
457 * These functions are used for directly accessing a register in where
458 * changes to the final register value are likely to be made. If you
459 * need to use a register for temporary calculation (e.g. index type
460 * operations) use the read_* form.
462 * B1.2.1 Register mappings
464 * In instruction register encoding 31 can refer to ZR (zero register) or
465 * the SP (stack pointer) depending on context. In QEMU's case we map SP
466 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
467 * This is the point of the _sp forms.
469 static TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
472 return new_tmp_a64_zero(s
);
478 /* register access for when 31 == SP */
479 static TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
484 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
485 * representing the register contents. This TCGv is an auto-freed
486 * temporary so it need not be explicitly freed, and may be modified.
488 static TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
490 TCGv_i64 v
= new_tmp_a64(s
);
493 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
495 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
498 tcg_gen_movi_i64(v
, 0);
503 static TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
505 TCGv_i64 v
= new_tmp_a64(s
);
507 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
509 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
514 /* We should have at some point before trying to access an FP register
515 * done the necessary access check, so assert that
516 * (a) we did the check and
517 * (b) we didn't then just plough ahead anyway if it failed.
518 * Print the instruction pattern in the abort message so we can figure
519 * out what we need to fix if a user encounters this problem in the wild.
521 static inline void assert_fp_access_checked(DisasContext
*s
)
523 #ifdef CONFIG_DEBUG_TCG
524 if (unlikely(!s
->fp_access_checked
|| s
->fp_excp_el
)) {
525 fprintf(stderr
, "target-arm: FP access check missing for "
526 "instruction 0x%08x\n", s
->insn
);
532 /* Return the offset into CPUARMState of an element of specified
533 * size, 'element' places in from the least significant end of
534 * the FP/vector register Qn.
536 static inline int vec_reg_offset(DisasContext
*s
, int regno
,
537 int element
, TCGMemOp size
)
540 #ifdef HOST_WORDS_BIGENDIAN
541 /* This is complicated slightly because vfp.zregs[n].d[0] is
542 * still the low half and vfp.zregs[n].d[1] the high half
543 * of the 128 bit vector, even on big endian systems.
544 * Calculate the offset assuming a fully bigendian 128 bits,
545 * then XOR to account for the order of the two 64 bit halves.
547 offs
+= (16 - ((element
+ 1) * (1 << size
)));
550 offs
+= element
* (1 << size
);
552 offs
+= offsetof(CPUARMState
, vfp
.zregs
[regno
]);
553 assert_fp_access_checked(s
);
557 /* Return the offset info CPUARMState of the "whole" vector register Qn. */
558 static inline int vec_full_reg_offset(DisasContext
*s
, int regno
)
560 assert_fp_access_checked(s
);
561 return offsetof(CPUARMState
, vfp
.zregs
[regno
]);
564 /* Return a newly allocated pointer to the vector register. */
565 static TCGv_ptr
vec_full_reg_ptr(DisasContext
*s
, int regno
)
567 TCGv_ptr ret
= tcg_temp_new_ptr();
568 tcg_gen_addi_ptr(ret
, cpu_env
, vec_full_reg_offset(s
, regno
));
572 /* Return the byte size of the "whole" vector register, VL / 8. */
573 static inline int vec_full_reg_size(DisasContext
*s
)
575 /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
576 In the meantime this is just the AdvSIMD length of 128. */
580 /* Return the offset into CPUARMState of a slice (from
581 * the least significant end) of FP register Qn (ie
583 * (Note that this is not the same mapping as for A32; see cpu.h)
585 static inline int fp_reg_offset(DisasContext
*s
, int regno
, TCGMemOp size
)
587 return vec_reg_offset(s
, regno
, 0, size
);
590 /* Offset of the high half of the 128 bit vector Qn */
591 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
593 return vec_reg_offset(s
, regno
, 1, MO_64
);
596 /* Convenience accessors for reading and writing single and double
597 * FP registers. Writing clears the upper parts of the associated
598 * 128 bit vector register, as required by the architecture.
599 * Note that unlike the GP register accessors, the values returned
600 * by the read functions must be manually freed.
602 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
604 TCGv_i64 v
= tcg_temp_new_i64();
606 tcg_gen_ld_i64(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_64
));
610 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
612 TCGv_i32 v
= tcg_temp_new_i32();
614 tcg_gen_ld_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_32
));
618 static TCGv_i32
read_fp_hreg(DisasContext
*s
, int reg
)
620 TCGv_i32 v
= tcg_temp_new_i32();
622 tcg_gen_ld16u_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_16
));
626 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
627 * If SVE is not enabled, then there are only 128 bits in the vector.
629 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
631 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
632 unsigned vsz
= vec_full_reg_size(s
);
635 TCGv_i64 tcg_zero
= tcg_const_i64(0);
636 tcg_gen_st_i64(tcg_zero
, cpu_env
, ofs
+ 8);
637 tcg_temp_free_i64(tcg_zero
);
640 tcg_gen_gvec_dup8i(ofs
+ 16, vsz
- 16, vsz
- 16, 0);
644 static void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
646 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
648 tcg_gen_st_i64(v
, cpu_env
, ofs
);
649 clear_vec_high(s
, false, reg
);
652 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
654 TCGv_i64 tmp
= tcg_temp_new_i64();
656 tcg_gen_extu_i32_i64(tmp
, v
);
657 write_fp_dreg(s
, reg
, tmp
);
658 tcg_temp_free_i64(tmp
);
661 static TCGv_ptr
get_fpstatus_ptr(bool is_f16
)
663 TCGv_ptr statusptr
= tcg_temp_new_ptr();
666 /* In A64 all instructions (both FP and Neon) use the FPCR; there
667 * is no equivalent of the A32 Neon "standard FPSCR value".
668 * However half-precision operations operate under a different
669 * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
672 offset
= offsetof(CPUARMState
, vfp
.fp_status_f16
);
674 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
676 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
680 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
681 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
682 GVecGen2Fn
*gvec_fn
, int vece
)
684 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
685 is_q
? 16 : 8, vec_full_reg_size(s
));
688 /* Expand a 2-operand + immediate AdvSIMD vector operation using
689 * an expander function.
691 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
692 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
694 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
695 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
698 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
699 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
700 GVecGen3Fn
*gvec_fn
, int vece
)
702 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
703 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
706 /* Expand a 2-operand + immediate AdvSIMD vector operation using
709 static void gen_gvec_op2i(DisasContext
*s
, bool is_q
, int rd
,
710 int rn
, int64_t imm
, const GVecGen2i
*gvec_op
)
712 tcg_gen_gvec_2i(vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
713 is_q
? 16 : 8, vec_full_reg_size(s
), imm
, gvec_op
);
716 /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
717 static void gen_gvec_op3(DisasContext
*s
, bool is_q
, int rd
,
718 int rn
, int rm
, const GVecGen3
*gvec_op
)
720 tcg_gen_gvec_3(vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
721 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8,
722 vec_full_reg_size(s
), gvec_op
);
725 /* Expand a 3-operand + env pointer operation using
726 * an out-of-line helper.
728 static void gen_gvec_op3_env(DisasContext
*s
, bool is_q
, int rd
,
729 int rn
, int rm
, gen_helper_gvec_3_ptr
*fn
)
731 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
732 vec_full_reg_offset(s
, rn
),
733 vec_full_reg_offset(s
, rm
), cpu_env
,
734 is_q
? 16 : 8, vec_full_reg_size(s
), 0, fn
);
737 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
738 * an out-of-line helper.
740 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
741 int rm
, bool is_fp16
, int data
,
742 gen_helper_gvec_3_ptr
*fn
)
744 TCGv_ptr fpst
= get_fpstatus_ptr(is_fp16
);
745 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
746 vec_full_reg_offset(s
, rn
),
747 vec_full_reg_offset(s
, rm
), fpst
,
748 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
749 tcg_temp_free_ptr(fpst
);
752 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
753 * than the 32 bit equivalent.
755 static inline void gen_set_NZ64(TCGv_i64 result
)
757 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
758 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
761 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
762 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
765 gen_set_NZ64(result
);
767 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
768 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
770 tcg_gen_movi_i32(cpu_CF
, 0);
771 tcg_gen_movi_i32(cpu_VF
, 0);
774 /* dest = T0 + T1; compute C, N, V and Z flags */
775 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
778 TCGv_i64 result
, flag
, tmp
;
779 result
= tcg_temp_new_i64();
780 flag
= tcg_temp_new_i64();
781 tmp
= tcg_temp_new_i64();
783 tcg_gen_movi_i64(tmp
, 0);
784 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
786 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
788 gen_set_NZ64(result
);
790 tcg_gen_xor_i64(flag
, result
, t0
);
791 tcg_gen_xor_i64(tmp
, t0
, t1
);
792 tcg_gen_andc_i64(flag
, flag
, tmp
);
793 tcg_temp_free_i64(tmp
);
794 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
796 tcg_gen_mov_i64(dest
, result
);
797 tcg_temp_free_i64(result
);
798 tcg_temp_free_i64(flag
);
800 /* 32 bit arithmetic */
801 TCGv_i32 t0_32
= tcg_temp_new_i32();
802 TCGv_i32 t1_32
= tcg_temp_new_i32();
803 TCGv_i32 tmp
= tcg_temp_new_i32();
805 tcg_gen_movi_i32(tmp
, 0);
806 tcg_gen_extrl_i64_i32(t0_32
, t0
);
807 tcg_gen_extrl_i64_i32(t1_32
, t1
);
808 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
809 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
810 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
811 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
812 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
813 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
815 tcg_temp_free_i32(tmp
);
816 tcg_temp_free_i32(t0_32
);
817 tcg_temp_free_i32(t1_32
);
821 /* dest = T0 - T1; compute C, N, V and Z flags */
822 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
825 /* 64 bit arithmetic */
826 TCGv_i64 result
, flag
, tmp
;
828 result
= tcg_temp_new_i64();
829 flag
= tcg_temp_new_i64();
830 tcg_gen_sub_i64(result
, t0
, t1
);
832 gen_set_NZ64(result
);
834 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
835 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
837 tcg_gen_xor_i64(flag
, result
, t0
);
838 tmp
= tcg_temp_new_i64();
839 tcg_gen_xor_i64(tmp
, t0
, t1
);
840 tcg_gen_and_i64(flag
, flag
, tmp
);
841 tcg_temp_free_i64(tmp
);
842 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
843 tcg_gen_mov_i64(dest
, result
);
844 tcg_temp_free_i64(flag
);
845 tcg_temp_free_i64(result
);
847 /* 32 bit arithmetic */
848 TCGv_i32 t0_32
= tcg_temp_new_i32();
849 TCGv_i32 t1_32
= tcg_temp_new_i32();
852 tcg_gen_extrl_i64_i32(t0_32
, t0
);
853 tcg_gen_extrl_i64_i32(t1_32
, t1
);
854 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
855 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
856 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
857 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
858 tmp
= tcg_temp_new_i32();
859 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
860 tcg_temp_free_i32(t0_32
);
861 tcg_temp_free_i32(t1_32
);
862 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
863 tcg_temp_free_i32(tmp
);
864 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
868 /* dest = T0 + T1 + CF; do not compute flags. */
869 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
871 TCGv_i64 flag
= tcg_temp_new_i64();
872 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
873 tcg_gen_add_i64(dest
, t0
, t1
);
874 tcg_gen_add_i64(dest
, dest
, flag
);
875 tcg_temp_free_i64(flag
);
878 tcg_gen_ext32u_i64(dest
, dest
);
882 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
883 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
886 TCGv_i64 result
, cf_64
, vf_64
, tmp
;
887 result
= tcg_temp_new_i64();
888 cf_64
= tcg_temp_new_i64();
889 vf_64
= tcg_temp_new_i64();
890 tmp
= tcg_const_i64(0);
892 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
893 tcg_gen_add2_i64(result
, cf_64
, t0
, tmp
, cf_64
, tmp
);
894 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, tmp
);
895 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
896 gen_set_NZ64(result
);
898 tcg_gen_xor_i64(vf_64
, result
, t0
);
899 tcg_gen_xor_i64(tmp
, t0
, t1
);
900 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
901 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
903 tcg_gen_mov_i64(dest
, result
);
905 tcg_temp_free_i64(tmp
);
906 tcg_temp_free_i64(vf_64
);
907 tcg_temp_free_i64(cf_64
);
908 tcg_temp_free_i64(result
);
910 TCGv_i32 t0_32
, t1_32
, tmp
;
911 t0_32
= tcg_temp_new_i32();
912 t1_32
= tcg_temp_new_i32();
913 tmp
= tcg_const_i32(0);
915 tcg_gen_extrl_i64_i32(t0_32
, t0
);
916 tcg_gen_extrl_i64_i32(t1_32
, t1
);
917 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, cpu_CF
, tmp
);
918 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, tmp
);
920 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
921 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
922 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
923 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
924 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
926 tcg_temp_free_i32(tmp
);
927 tcg_temp_free_i32(t1_32
);
928 tcg_temp_free_i32(t0_32
);
933 * Load/Store generators
937 * Store from GPR register to memory.
939 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
940 TCGv_i64 tcg_addr
, int size
, int memidx
,
942 unsigned int iss_srt
,
943 bool iss_sf
, bool iss_ar
)
946 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, s
->be_data
+ size
);
951 syn
= syn_data_abort_with_iss(0,
957 0, 0, 0, 0, 0, false);
958 disas_set_insn_syndrome(s
, syn
);
962 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
963 TCGv_i64 tcg_addr
, int size
,
965 unsigned int iss_srt
,
966 bool iss_sf
, bool iss_ar
)
968 do_gpr_st_memidx(s
, source
, tcg_addr
, size
, get_mem_index(s
),
969 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
973 * Load from memory to GPR register
975 static void do_gpr_ld_memidx(DisasContext
*s
,
976 TCGv_i64 dest
, TCGv_i64 tcg_addr
,
977 int size
, bool is_signed
,
978 bool extend
, int memidx
,
979 bool iss_valid
, unsigned int iss_srt
,
980 bool iss_sf
, bool iss_ar
)
982 TCGMemOp memop
= s
->be_data
+ size
;
990 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
992 if (extend
&& is_signed
) {
994 tcg_gen_ext32u_i64(dest
, dest
);
1000 syn
= syn_data_abort_with_iss(0,
1006 0, 0, 0, 0, 0, false);
1007 disas_set_insn_syndrome(s
, syn
);
1011 static void do_gpr_ld(DisasContext
*s
,
1012 TCGv_i64 dest
, TCGv_i64 tcg_addr
,
1013 int size
, bool is_signed
, bool extend
,
1014 bool iss_valid
, unsigned int iss_srt
,
1015 bool iss_sf
, bool iss_ar
)
1017 do_gpr_ld_memidx(s
, dest
, tcg_addr
, size
, is_signed
, extend
,
1019 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
1023 * Store from FP register to memory
1025 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, int size
)
1027 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1028 TCGv_i64 tmp
= tcg_temp_new_i64();
1029 tcg_gen_ld_i64(tmp
, cpu_env
, fp_reg_offset(s
, srcidx
, MO_64
));
1031 tcg_gen_qemu_st_i64(tmp
, tcg_addr
, get_mem_index(s
),
1034 bool be
= s
->be_data
== MO_BE
;
1035 TCGv_i64 tcg_hiaddr
= tcg_temp_new_i64();
1037 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
1038 tcg_gen_qemu_st_i64(tmp
, be
? tcg_hiaddr
: tcg_addr
, get_mem_index(s
),
1040 tcg_gen_ld_i64(tmp
, cpu_env
, fp_reg_hi_offset(s
, srcidx
));
1041 tcg_gen_qemu_st_i64(tmp
, be
? tcg_addr
: tcg_hiaddr
, get_mem_index(s
),
1043 tcg_temp_free_i64(tcg_hiaddr
);
1046 tcg_temp_free_i64(tmp
);
1050 * Load from memory to FP register
1052 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, int size
)
1054 /* This always zero-extends and writes to a full 128 bit wide vector */
1055 TCGv_i64 tmplo
= tcg_temp_new_i64();
1059 TCGMemOp memop
= s
->be_data
+ size
;
1060 tmphi
= tcg_const_i64(0);
1061 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), memop
);
1063 bool be
= s
->be_data
== MO_BE
;
1064 TCGv_i64 tcg_hiaddr
;
1066 tmphi
= tcg_temp_new_i64();
1067 tcg_hiaddr
= tcg_temp_new_i64();
1069 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
1070 tcg_gen_qemu_ld_i64(tmplo
, be
? tcg_hiaddr
: tcg_addr
, get_mem_index(s
),
1072 tcg_gen_qemu_ld_i64(tmphi
, be
? tcg_addr
: tcg_hiaddr
, get_mem_index(s
),
1074 tcg_temp_free_i64(tcg_hiaddr
);
1077 tcg_gen_st_i64(tmplo
, cpu_env
, fp_reg_offset(s
, destidx
, MO_64
));
1078 tcg_gen_st_i64(tmphi
, cpu_env
, fp_reg_hi_offset(s
, destidx
));
1080 tcg_temp_free_i64(tmplo
);
1081 tcg_temp_free_i64(tmphi
);
1083 clear_vec_high(s
, true, destidx
);
1087 * Vector load/store helpers.
1089 * The principal difference between this and a FP load is that we don't
1090 * zero extend as we are filling a partial chunk of the vector register.
1091 * These functions don't support 128 bit loads/stores, which would be
1092 * normal load/store operations.
1094 * The _i32 versions are useful when operating on 32 bit quantities
1095 * (eg for floating point single or using Neon helper functions).
1098 /* Get value of an element within a vector register */
1099 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
1100 int element
, TCGMemOp memop
)
1102 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1105 tcg_gen_ld8u_i64(tcg_dest
, cpu_env
, vect_off
);
1108 tcg_gen_ld16u_i64(tcg_dest
, cpu_env
, vect_off
);
1111 tcg_gen_ld32u_i64(tcg_dest
, cpu_env
, vect_off
);
1114 tcg_gen_ld8s_i64(tcg_dest
, cpu_env
, vect_off
);
1117 tcg_gen_ld16s_i64(tcg_dest
, cpu_env
, vect_off
);
1120 tcg_gen_ld32s_i64(tcg_dest
, cpu_env
, vect_off
);
1124 tcg_gen_ld_i64(tcg_dest
, cpu_env
, vect_off
);
1127 g_assert_not_reached();
1131 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1132 int element
, TCGMemOp memop
)
1134 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1137 tcg_gen_ld8u_i32(tcg_dest
, cpu_env
, vect_off
);
1140 tcg_gen_ld16u_i32(tcg_dest
, cpu_env
, vect_off
);
1143 tcg_gen_ld8s_i32(tcg_dest
, cpu_env
, vect_off
);
1146 tcg_gen_ld16s_i32(tcg_dest
, cpu_env
, vect_off
);
1150 tcg_gen_ld_i32(tcg_dest
, cpu_env
, vect_off
);
1153 g_assert_not_reached();
1157 /* Set value of an element within a vector register */
1158 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1159 int element
, TCGMemOp memop
)
1161 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1164 tcg_gen_st8_i64(tcg_src
, cpu_env
, vect_off
);
1167 tcg_gen_st16_i64(tcg_src
, cpu_env
, vect_off
);
1170 tcg_gen_st32_i64(tcg_src
, cpu_env
, vect_off
);
1173 tcg_gen_st_i64(tcg_src
, cpu_env
, vect_off
);
1176 g_assert_not_reached();
1180 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1181 int destidx
, int element
, TCGMemOp memop
)
1183 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1186 tcg_gen_st8_i32(tcg_src
, cpu_env
, vect_off
);
1189 tcg_gen_st16_i32(tcg_src
, cpu_env
, vect_off
);
1192 tcg_gen_st_i32(tcg_src
, cpu_env
, vect_off
);
1195 g_assert_not_reached();
1199 /* Store from vector register to memory */
1200 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1201 TCGv_i64 tcg_addr
, int size
)
1203 TCGMemOp memop
= s
->be_data
+ size
;
1204 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1206 read_vec_element(s
, tcg_tmp
, srcidx
, element
, size
);
1207 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), memop
);
1209 tcg_temp_free_i64(tcg_tmp
);
1212 /* Load from memory to vector register */
1213 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1214 TCGv_i64 tcg_addr
, int size
)
1216 TCGMemOp memop
= s
->be_data
+ size
;
1217 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1219 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), memop
);
1220 write_vec_element(s
, tcg_tmp
, destidx
, element
, size
);
1222 tcg_temp_free_i64(tcg_tmp
);
1225 /* Check that FP/Neon access is enabled. If it is, return
1226 * true. If not, emit code to generate an appropriate exception,
1227 * and return false; the caller should not emit any code for
1228 * the instruction. Note that this check must happen after all
1229 * unallocated-encoding checks (otherwise the syndrome information
1230 * for the resulting exception will be incorrect).
1232 static inline bool fp_access_check(DisasContext
*s
)
1234 assert(!s
->fp_access_checked
);
1235 s
->fp_access_checked
= true;
1237 if (!s
->fp_excp_el
) {
1241 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_fp_access_trap(1, 0xe, false),
1246 /* Check that SVE access is enabled. If it is, return true.
1247 * If not, emit code to generate an appropriate exception and return false.
1249 static inline bool sve_access_check(DisasContext
*s
)
1251 if (s
->sve_excp_el
) {
1252 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_sve_access_trap(),
1260 * This utility function is for doing register extension with an
1261 * optional shift. You will likely want to pass a temporary for the
1262 * destination register. See DecodeRegExtend() in the ARM ARM.
1264 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1265 int option
, unsigned int shift
)
1267 int extsize
= extract32(option
, 0, 2);
1268 bool is_signed
= extract32(option
, 2, 1);
1273 tcg_gen_ext8s_i64(tcg_out
, tcg_in
);
1276 tcg_gen_ext16s_i64(tcg_out
, tcg_in
);
1279 tcg_gen_ext32s_i64(tcg_out
, tcg_in
);
1282 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1288 tcg_gen_ext8u_i64(tcg_out
, tcg_in
);
1291 tcg_gen_ext16u_i64(tcg_out
, tcg_in
);
1294 tcg_gen_ext32u_i64(tcg_out
, tcg_in
);
1297 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1303 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1307 static inline void gen_check_sp_alignment(DisasContext
*s
)
1309 /* The AArch64 architecture mandates that (if enabled via PSTATE
1310 * or SCTLR bits) there is a check that SP is 16-aligned on every
1311 * SP-relative load or store (with an exception generated if it is not).
1312 * In line with general QEMU practice regarding misaligned accesses,
1313 * we omit these checks for the sake of guest program performance.
1314 * This function is provided as a hook so we can more easily add these
1315 * checks in future (possibly as a "favour catching guest program bugs
1316 * over speed" user selectable option).
1321 * This provides a simple table based table lookup decoder. It is
1322 * intended to be used when the relevant bits for decode are too
1323 * awkwardly placed and switch/if based logic would be confusing and
1324 * deeply nested. Since it's a linear search through the table, tables
1325 * should be kept small.
1327 * It returns the first handler where insn & mask == pattern, or
1328 * NULL if there is no match.
1329 * The table is terminated by an empty mask (i.e. 0)
1331 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1334 const AArch64DecodeTable
*tptr
= table
;
1336 while (tptr
->mask
) {
1337 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1338 return tptr
->disas_fn
;
1346 * The instruction disassembly implemented here matches
1347 * the instruction encoding classifications in chapter C4
1348 * of the ARM Architecture Reference Manual (DDI0487B_a);
1349 * classification names and decode diagrams here should generally
1350 * match up with those in the manual.
1353 /* Unconditional branch (immediate)
1355 * +----+-----------+-------------------------------------+
1356 * | op | 0 0 1 0 1 | imm26 |
1357 * +----+-----------+-------------------------------------+
1359 static void disas_uncond_b_imm(DisasContext
*s
, uint32_t insn
)
1361 uint64_t addr
= s
->pc
+ sextract32(insn
, 0, 26) * 4 - 4;
1363 if (insn
& (1U << 31)) {
1364 /* BL Branch with link */
1365 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->pc
);
1368 /* B Branch / BL Branch with link */
1369 gen_goto_tb(s
, 0, addr
);
1372 /* Compare and branch (immediate)
1373 * 31 30 25 24 23 5 4 0
1374 * +----+-------------+----+---------------------+--------+
1375 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1376 * +----+-------------+----+---------------------+--------+
1378 static void disas_comp_b_imm(DisasContext
*s
, uint32_t insn
)
1380 unsigned int sf
, op
, rt
;
1382 TCGLabel
*label_match
;
1385 sf
= extract32(insn
, 31, 1);
1386 op
= extract32(insn
, 24, 1); /* 0: CBZ; 1: CBNZ */
1387 rt
= extract32(insn
, 0, 5);
1388 addr
= s
->pc
+ sextract32(insn
, 5, 19) * 4 - 4;
1390 tcg_cmp
= read_cpu_reg(s
, rt
, sf
);
1391 label_match
= gen_new_label();
1393 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1394 tcg_cmp
, 0, label_match
);
1396 gen_goto_tb(s
, 0, s
->pc
);
1397 gen_set_label(label_match
);
1398 gen_goto_tb(s
, 1, addr
);
1401 /* Test and branch (immediate)
1402 * 31 30 25 24 23 19 18 5 4 0
1403 * +----+-------------+----+-------+-------------+------+
1404 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1405 * +----+-------------+----+-------+-------------+------+
1407 static void disas_test_b_imm(DisasContext
*s
, uint32_t insn
)
1409 unsigned int bit_pos
, op
, rt
;
1411 TCGLabel
*label_match
;
1414 bit_pos
= (extract32(insn
, 31, 1) << 5) | extract32(insn
, 19, 5);
1415 op
= extract32(insn
, 24, 1); /* 0: TBZ; 1: TBNZ */
1416 addr
= s
->pc
+ sextract32(insn
, 5, 14) * 4 - 4;
1417 rt
= extract32(insn
, 0, 5);
1419 tcg_cmp
= tcg_temp_new_i64();
1420 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, rt
), (1ULL << bit_pos
));
1421 label_match
= gen_new_label();
1422 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1423 tcg_cmp
, 0, label_match
);
1424 tcg_temp_free_i64(tcg_cmp
);
1425 gen_goto_tb(s
, 0, s
->pc
);
1426 gen_set_label(label_match
);
1427 gen_goto_tb(s
, 1, addr
);
1430 /* Conditional branch (immediate)
1431 * 31 25 24 23 5 4 3 0
1432 * +---------------+----+---------------------+----+------+
1433 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1434 * +---------------+----+---------------------+----+------+
1436 static void disas_cond_b_imm(DisasContext
*s
, uint32_t insn
)
1441 if ((insn
& (1 << 4)) || (insn
& (1 << 24))) {
1442 unallocated_encoding(s
);
1445 addr
= s
->pc
+ sextract32(insn
, 5, 19) * 4 - 4;
1446 cond
= extract32(insn
, 0, 4);
1449 /* genuinely conditional branches */
1450 TCGLabel
*label_match
= gen_new_label();
1451 arm_gen_test_cc(cond
, label_match
);
1452 gen_goto_tb(s
, 0, s
->pc
);
1453 gen_set_label(label_match
);
1454 gen_goto_tb(s
, 1, addr
);
1456 /* 0xe and 0xf are both "always" conditions */
1457 gen_goto_tb(s
, 0, addr
);
1461 /* HINT instruction group, including various allocated HINTs */
1462 static void handle_hint(DisasContext
*s
, uint32_t insn
,
1463 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1465 unsigned int selector
= crm
<< 3 | op2
;
1468 unallocated_encoding(s
);
1476 s
->base
.is_jmp
= DISAS_WFI
;
1478 /* When running in MTTCG we don't generate jumps to the yield and
1479 * WFE helpers as it won't affect the scheduling of other vCPUs.
1480 * If we wanted to more completely model WFE/SEV so we don't busy
1481 * spin unnecessarily we would need to do something more involved.
1484 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1485 s
->base
.is_jmp
= DISAS_YIELD
;
1489 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1490 s
->base
.is_jmp
= DISAS_WFE
;
1495 /* we treat all as NOP at least for now */
1498 /* default specified as NOP equivalent */
1503 static void gen_clrex(DisasContext
*s
, uint32_t insn
)
1505 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1508 /* CLREX, DSB, DMB, ISB */
1509 static void handle_sync(DisasContext
*s
, uint32_t insn
,
1510 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1515 unallocated_encoding(s
);
1526 case 1: /* MBReqTypes_Reads */
1527 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1529 case 2: /* MBReqTypes_Writes */
1530 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1532 default: /* MBReqTypes_All */
1533 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1539 /* We need to break the TB after this insn to execute
1540 * a self-modified code correctly and also to take
1541 * any pending interrupts immediately.
1543 gen_goto_tb(s
, 0, s
->pc
);
1546 unallocated_encoding(s
);
1551 /* MSR (immediate) - move immediate to processor state field */
1552 static void handle_msr_i(DisasContext
*s
, uint32_t insn
,
1553 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1555 int op
= op1
<< 3 | op2
;
1557 case 0x05: /* SPSel */
1558 if (s
->current_el
== 0) {
1559 unallocated_encoding(s
);
1563 case 0x1e: /* DAIFSet */
1564 case 0x1f: /* DAIFClear */
1566 TCGv_i32 tcg_imm
= tcg_const_i32(crm
);
1567 TCGv_i32 tcg_op
= tcg_const_i32(op
);
1568 gen_a64_set_pc_im(s
->pc
- 4);
1569 gen_helper_msr_i_pstate(cpu_env
, tcg_op
, tcg_imm
);
1570 tcg_temp_free_i32(tcg_imm
);
1571 tcg_temp_free_i32(tcg_op
);
1572 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1573 gen_a64_set_pc_im(s
->pc
);
1574 s
->base
.is_jmp
= (op
== 0x1f ? DISAS_EXIT
: DISAS_JUMP
);
1578 unallocated_encoding(s
);
1583 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
1585 TCGv_i32 tmp
= tcg_temp_new_i32();
1586 TCGv_i32 nzcv
= tcg_temp_new_i32();
1588 /* build bit 31, N */
1589 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
1590 /* build bit 30, Z */
1591 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
1592 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
1593 /* build bit 29, C */
1594 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
1595 /* build bit 28, V */
1596 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
1597 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
1598 /* generate result */
1599 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
1601 tcg_temp_free_i32(nzcv
);
1602 tcg_temp_free_i32(tmp
);
1605 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
1608 TCGv_i32 nzcv
= tcg_temp_new_i32();
1610 /* take NZCV from R[t] */
1611 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
1614 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
1616 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
1617 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
1619 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
1620 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
1622 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
1623 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
1624 tcg_temp_free_i32(nzcv
);
1627 /* MRS - move from system register
1628 * MSR (register) - move to system register
1631 * These are all essentially the same insn in 'read' and 'write'
1632 * versions, with varying op0 fields.
1634 static void handle_sys(DisasContext
*s
, uint32_t insn
, bool isread
,
1635 unsigned int op0
, unsigned int op1
, unsigned int op2
,
1636 unsigned int crn
, unsigned int crm
, unsigned int rt
)
1638 const ARMCPRegInfo
*ri
;
1641 ri
= get_arm_cp_reginfo(s
->cp_regs
,
1642 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
1643 crn
, crm
, op0
, op1
, op2
));
1646 /* Unknown register; this might be a guest error or a QEMU
1647 * unimplemented feature.
1649 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
1650 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1651 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
1652 unallocated_encoding(s
);
1656 /* Check access permissions */
1657 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
1658 unallocated_encoding(s
);
1663 /* Emit code to perform further access permissions checks at
1664 * runtime; this may result in an exception.
1667 TCGv_i32 tcg_syn
, tcg_isread
;
1670 gen_a64_set_pc_im(s
->pc
- 4);
1671 tmpptr
= tcg_const_ptr(ri
);
1672 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
1673 tcg_syn
= tcg_const_i32(syndrome
);
1674 tcg_isread
= tcg_const_i32(isread
);
1675 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
, tcg_isread
);
1676 tcg_temp_free_ptr(tmpptr
);
1677 tcg_temp_free_i32(tcg_syn
);
1678 tcg_temp_free_i32(tcg_isread
);
1681 /* Handle special cases first */
1682 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
1686 tcg_rt
= cpu_reg(s
, rt
);
1688 gen_get_nzcv(tcg_rt
);
1690 gen_set_nzcv(tcg_rt
);
1693 case ARM_CP_CURRENTEL
:
1694 /* Reads as current EL value from pstate, which is
1695 * guaranteed to be constant by the tb flags.
1697 tcg_rt
= cpu_reg(s
, rt
);
1698 tcg_gen_movi_i64(tcg_rt
, s
->current_el
<< 2);
1701 /* Writes clear the aligned block of memory which rt points into. */
1702 tcg_rt
= cpu_reg(s
, rt
);
1703 gen_helper_dc_zva(cpu_env
, tcg_rt
);
1708 if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
1711 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check(s
)) {
1715 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
1719 tcg_rt
= cpu_reg(s
, rt
);
1722 if (ri
->type
& ARM_CP_CONST
) {
1723 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
1724 } else if (ri
->readfn
) {
1726 tmpptr
= tcg_const_ptr(ri
);
1727 gen_helper_get_cp_reg64(tcg_rt
, cpu_env
, tmpptr
);
1728 tcg_temp_free_ptr(tmpptr
);
1730 tcg_gen_ld_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
1733 if (ri
->type
& ARM_CP_CONST
) {
1734 /* If not forbidden by access permissions, treat as WI */
1736 } else if (ri
->writefn
) {
1738 tmpptr
= tcg_const_ptr(ri
);
1739 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tcg_rt
);
1740 tcg_temp_free_ptr(tmpptr
);
1742 tcg_gen_st_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
1746 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
1747 /* I/O operations must end the TB here (whether read or write) */
1749 s
->base
.is_jmp
= DISAS_UPDATE
;
1750 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
1751 /* We default to ending the TB on a coprocessor register write,
1752 * but allow this to be suppressed by the register definition
1753 * (usually only necessary to work around guest bugs).
1755 s
->base
.is_jmp
= DISAS_UPDATE
;
1760 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1761 * +---------------------+---+-----+-----+-------+-------+-----+------+
1762 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1763 * +---------------------+---+-----+-----+-------+-------+-----+------+
1765 static void disas_system(DisasContext
*s
, uint32_t insn
)
1767 unsigned int l
, op0
, op1
, crn
, crm
, op2
, rt
;
1768 l
= extract32(insn
, 21, 1);
1769 op0
= extract32(insn
, 19, 2);
1770 op1
= extract32(insn
, 16, 3);
1771 crn
= extract32(insn
, 12, 4);
1772 crm
= extract32(insn
, 8, 4);
1773 op2
= extract32(insn
, 5, 3);
1774 rt
= extract32(insn
, 0, 5);
1777 if (l
|| rt
!= 31) {
1778 unallocated_encoding(s
);
1782 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1783 handle_hint(s
, insn
, op1
, op2
, crm
);
1785 case 3: /* CLREX, DSB, DMB, ISB */
1786 handle_sync(s
, insn
, op1
, op2
, crm
);
1788 case 4: /* MSR (immediate) */
1789 handle_msr_i(s
, insn
, op1
, op2
, crm
);
1792 unallocated_encoding(s
);
1797 handle_sys(s
, insn
, l
, op0
, op1
, op2
, crn
, crm
, rt
);
1800 /* Exception generation
1802 * 31 24 23 21 20 5 4 2 1 0
1803 * +-----------------+-----+------------------------+-----+----+
1804 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1805 * +-----------------------+------------------------+----------+
1807 static void disas_exc(DisasContext
*s
, uint32_t insn
)
1809 int opc
= extract32(insn
, 21, 3);
1810 int op2_ll
= extract32(insn
, 0, 5);
1811 int imm16
= extract32(insn
, 5, 16);
1816 /* For SVC, HVC and SMC we advance the single-step state
1817 * machine before taking the exception. This is architecturally
1818 * mandated, to ensure that single-stepping a system call
1819 * instruction works properly.
1824 gen_exception_insn(s
, 0, EXCP_SWI
, syn_aa64_svc(imm16
),
1825 default_exception_el(s
));
1828 if (s
->current_el
== 0) {
1829 unallocated_encoding(s
);
1832 /* The pre HVC helper handles cases when HVC gets trapped
1833 * as an undefined insn by runtime configuration.
1835 gen_a64_set_pc_im(s
->pc
- 4);
1836 gen_helper_pre_hvc(cpu_env
);
1838 gen_exception_insn(s
, 0, EXCP_HVC
, syn_aa64_hvc(imm16
), 2);
1841 if (s
->current_el
== 0) {
1842 unallocated_encoding(s
);
1845 gen_a64_set_pc_im(s
->pc
- 4);
1846 tmp
= tcg_const_i32(syn_aa64_smc(imm16
));
1847 gen_helper_pre_smc(cpu_env
, tmp
);
1848 tcg_temp_free_i32(tmp
);
1850 gen_exception_insn(s
, 0, EXCP_SMC
, syn_aa64_smc(imm16
), 3);
1853 unallocated_encoding(s
);
1859 unallocated_encoding(s
);
1863 gen_exception_bkpt_insn(s
, 4, syn_aa64_bkpt(imm16
));
1867 unallocated_encoding(s
);
1870 /* HLT. This has two purposes.
1871 * Architecturally, it is an external halting debug instruction.
1872 * Since QEMU doesn't implement external debug, we treat this as
1873 * it is required for halting debug disabled: it will UNDEF.
1874 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1876 if (semihosting_enabled() && imm16
== 0xf000) {
1877 #ifndef CONFIG_USER_ONLY
1878 /* In system mode, don't allow userspace access to semihosting,
1879 * to provide some semblance of security (and for consistency
1880 * with our 32-bit semihosting).
1882 if (s
->current_el
== 0) {
1883 unsupported_encoding(s
, insn
);
1887 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1889 unsupported_encoding(s
, insn
);
1893 if (op2_ll
< 1 || op2_ll
> 3) {
1894 unallocated_encoding(s
);
1897 /* DCPS1, DCPS2, DCPS3 */
1898 unsupported_encoding(s
, insn
);
1901 unallocated_encoding(s
);
1906 /* Unconditional branch (register)
1907 * 31 25 24 21 20 16 15 10 9 5 4 0
1908 * +---------------+-------+-------+-------+------+-------+
1909 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1910 * +---------------+-------+-------+-------+------+-------+
1912 static void disas_uncond_b_reg(DisasContext
*s
, uint32_t insn
)
1914 unsigned int opc
, op2
, op3
, rn
, op4
;
1916 opc
= extract32(insn
, 21, 4);
1917 op2
= extract32(insn
, 16, 5);
1918 op3
= extract32(insn
, 10, 6);
1919 rn
= extract32(insn
, 5, 5);
1920 op4
= extract32(insn
, 0, 5);
1922 if (op4
!= 0x0 || op3
!= 0x0 || op2
!= 0x1f) {
1923 unallocated_encoding(s
);
1931 gen_a64_set_pc(s
, cpu_reg(s
, rn
));
1932 /* BLR also needs to load return address */
1934 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->pc
);
1938 if (s
->current_el
== 0) {
1939 unallocated_encoding(s
);
1942 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1945 gen_helper_exception_return(cpu_env
);
1946 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1949 /* Must exit loop to check un-masked IRQs */
1950 s
->base
.is_jmp
= DISAS_EXIT
;
1954 unallocated_encoding(s
);
1956 unsupported_encoding(s
, insn
);
1960 unallocated_encoding(s
);
1964 s
->base
.is_jmp
= DISAS_JUMP
;
1967 /* Branches, exception generating and system instructions */
1968 static void disas_b_exc_sys(DisasContext
*s
, uint32_t insn
)
1970 switch (extract32(insn
, 25, 7)) {
1971 case 0x0a: case 0x0b:
1972 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1973 disas_uncond_b_imm(s
, insn
);
1975 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1976 disas_comp_b_imm(s
, insn
);
1978 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1979 disas_test_b_imm(s
, insn
);
1981 case 0x2a: /* Conditional branch (immediate) */
1982 disas_cond_b_imm(s
, insn
);
1984 case 0x6a: /* Exception generation / System */
1985 if (insn
& (1 << 24)) {
1986 disas_system(s
, insn
);
1991 case 0x6b: /* Unconditional branch (register) */
1992 disas_uncond_b_reg(s
, insn
);
1995 unallocated_encoding(s
);
2001 * Load/Store exclusive instructions are implemented by remembering
2002 * the value/address loaded, and seeing if these are the same
2003 * when the store is performed. This is not actually the architecturally
2004 * mandated semantics, but it works for typical guest code sequences
2005 * and avoids having to monitor regular stores.
2007 * The store exclusive uses the atomic cmpxchg primitives to avoid
2008 * races in multi-threaded linux-user and when MTTCG softmmu is
2011 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
2012 TCGv_i64 addr
, int size
, bool is_pair
)
2014 int idx
= get_mem_index(s
);
2015 TCGMemOp memop
= s
->be_data
;
2017 g_assert(size
<= 3);
2019 g_assert(size
>= 2);
2021 /* The pair must be single-copy atomic for the doubleword. */
2022 memop
|= MO_64
| MO_ALIGN
;
2023 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
, memop
);
2024 if (s
->be_data
== MO_LE
) {
2025 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2026 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2028 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2029 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2032 /* The pair must be single-copy atomic for *each* doubleword, not
2033 the entire quadword, however it must be quadword aligned. */
2035 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
,
2036 memop
| MO_ALIGN_16
);
2038 TCGv_i64 addr2
= tcg_temp_new_i64();
2039 tcg_gen_addi_i64(addr2
, addr
, 8);
2040 tcg_gen_qemu_ld_i64(cpu_exclusive_high
, addr2
, idx
, memop
);
2041 tcg_temp_free_i64(addr2
);
2043 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2044 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2047 memop
|= size
| MO_ALIGN
;
2048 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
, memop
);
2049 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2051 tcg_gen_mov_i64(cpu_exclusive_addr
, addr
);
2054 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2055 TCGv_i64 addr
, int size
, int is_pair
)
2057 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2058 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2061 * [addr + datasize] = {Rt2};
2067 * env->exclusive_addr = -1;
2069 TCGLabel
*fail_label
= gen_new_label();
2070 TCGLabel
*done_label
= gen_new_label();
2073 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
2075 tmp
= tcg_temp_new_i64();
2078 if (s
->be_data
== MO_LE
) {
2079 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2081 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2083 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2084 cpu_exclusive_val
, tmp
,
2086 MO_64
| MO_ALIGN
| s
->be_data
);
2087 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2088 } else if (s
->be_data
== MO_LE
) {
2089 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2090 gen_helper_paired_cmpxchg64_le_parallel(tmp
, cpu_env
,
2095 gen_helper_paired_cmpxchg64_le(tmp
, cpu_env
, cpu_exclusive_addr
,
2096 cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2099 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2100 gen_helper_paired_cmpxchg64_be_parallel(tmp
, cpu_env
,
2105 gen_helper_paired_cmpxchg64_be(tmp
, cpu_env
, cpu_exclusive_addr
,
2106 cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2110 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2111 cpu_reg(s
, rt
), get_mem_index(s
),
2112 size
| MO_ALIGN
| s
->be_data
);
2113 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2115 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2116 tcg_temp_free_i64(tmp
);
2117 tcg_gen_br(done_label
);
2119 gen_set_label(fail_label
);
2120 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2121 gen_set_label(done_label
);
2122 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2125 static void gen_compare_and_swap(DisasContext
*s
, int rs
, int rt
,
2128 TCGv_i64 tcg_rs
= cpu_reg(s
, rs
);
2129 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2130 int memidx
= get_mem_index(s
);
2131 TCGv_i64 addr
= cpu_reg_sp(s
, rn
);
2134 gen_check_sp_alignment(s
);
2136 tcg_gen_atomic_cmpxchg_i64(tcg_rs
, addr
, tcg_rs
, tcg_rt
, memidx
,
2137 size
| MO_ALIGN
| s
->be_data
);
2140 static void gen_compare_and_swap_pair(DisasContext
*s
, int rs
, int rt
,
2143 TCGv_i64 s1
= cpu_reg(s
, rs
);
2144 TCGv_i64 s2
= cpu_reg(s
, rs
+ 1);
2145 TCGv_i64 t1
= cpu_reg(s
, rt
);
2146 TCGv_i64 t2
= cpu_reg(s
, rt
+ 1);
2147 TCGv_i64 addr
= cpu_reg_sp(s
, rn
);
2148 int memidx
= get_mem_index(s
);
2151 gen_check_sp_alignment(s
);
2155 TCGv_i64 cmp
= tcg_temp_new_i64();
2156 TCGv_i64 val
= tcg_temp_new_i64();
2158 if (s
->be_data
== MO_LE
) {
2159 tcg_gen_concat32_i64(val
, t1
, t2
);
2160 tcg_gen_concat32_i64(cmp
, s1
, s2
);
2162 tcg_gen_concat32_i64(val
, t2
, t1
);
2163 tcg_gen_concat32_i64(cmp
, s2
, s1
);
2166 tcg_gen_atomic_cmpxchg_i64(cmp
, addr
, cmp
, val
, memidx
,
2167 MO_64
| MO_ALIGN
| s
->be_data
);
2168 tcg_temp_free_i64(val
);
2170 if (s
->be_data
== MO_LE
) {
2171 tcg_gen_extr32_i64(s1
, s2
, cmp
);
2173 tcg_gen_extr32_i64(s2
, s1
, cmp
);
2175 tcg_temp_free_i64(cmp
);
2176 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2177 TCGv_i32 tcg_rs
= tcg_const_i32(rs
);
2179 if (s
->be_data
== MO_LE
) {
2180 gen_helper_casp_le_parallel(cpu_env
, tcg_rs
, addr
, t1
, t2
);
2182 gen_helper_casp_be_parallel(cpu_env
, tcg_rs
, addr
, t1
, t2
);
2184 tcg_temp_free_i32(tcg_rs
);
2186 TCGv_i64 d1
= tcg_temp_new_i64();
2187 TCGv_i64 d2
= tcg_temp_new_i64();
2188 TCGv_i64 a2
= tcg_temp_new_i64();
2189 TCGv_i64 c1
= tcg_temp_new_i64();
2190 TCGv_i64 c2
= tcg_temp_new_i64();
2191 TCGv_i64 zero
= tcg_const_i64(0);
2193 /* Load the two words, in memory order. */
2194 tcg_gen_qemu_ld_i64(d1
, addr
, memidx
,
2195 MO_64
| MO_ALIGN_16
| s
->be_data
);
2196 tcg_gen_addi_i64(a2
, addr
, 8);
2197 tcg_gen_qemu_ld_i64(d2
, addr
, memidx
, MO_64
| s
->be_data
);
2199 /* Compare the two words, also in memory order. */
2200 tcg_gen_setcond_i64(TCG_COND_EQ
, c1
, d1
, s1
);
2201 tcg_gen_setcond_i64(TCG_COND_EQ
, c2
, d2
, s2
);
2202 tcg_gen_and_i64(c2
, c2
, c1
);
2204 /* If compare equal, write back new data, else write back old data. */
2205 tcg_gen_movcond_i64(TCG_COND_NE
, c1
, c2
, zero
, t1
, d1
);
2206 tcg_gen_movcond_i64(TCG_COND_NE
, c2
, c2
, zero
, t2
, d2
);
2207 tcg_gen_qemu_st_i64(c1
, addr
, memidx
, MO_64
| s
->be_data
);
2208 tcg_gen_qemu_st_i64(c2
, a2
, memidx
, MO_64
| s
->be_data
);
2209 tcg_temp_free_i64(a2
);
2210 tcg_temp_free_i64(c1
);
2211 tcg_temp_free_i64(c2
);
2212 tcg_temp_free_i64(zero
);
2214 /* Write back the data from memory to Rs. */
2215 tcg_gen_mov_i64(s1
, d1
);
2216 tcg_gen_mov_i64(s2
, d2
);
2217 tcg_temp_free_i64(d1
);
2218 tcg_temp_free_i64(d2
);
2222 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2223 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2225 static bool disas_ldst_compute_iss_sf(int size
, bool is_signed
, int opc
)
2227 int opc0
= extract32(opc
, 0, 1);
2231 regsize
= opc0
? 32 : 64;
2233 regsize
= size
== 3 ? 64 : 32;
2235 return regsize
== 64;
2238 /* Load/store exclusive
2240 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2241 * +-----+-------------+----+---+----+------+----+-------+------+------+
2242 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2243 * +-----+-------------+----+---+----+------+----+-------+------+------+
2245 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2246 * L: 0 -> store, 1 -> load
2247 * o2: 0 -> exclusive, 1 -> not
2248 * o1: 0 -> single register, 1 -> register pair
2249 * o0: 1 -> load-acquire/store-release, 0 -> not
2251 static void disas_ldst_excl(DisasContext
*s
, uint32_t insn
)
2253 int rt
= extract32(insn
, 0, 5);
2254 int rn
= extract32(insn
, 5, 5);
2255 int rt2
= extract32(insn
, 10, 5);
2256 int rs
= extract32(insn
, 16, 5);
2257 int is_lasr
= extract32(insn
, 15, 1);
2258 int o2_L_o1_o0
= extract32(insn
, 21, 3) * 2 | is_lasr
;
2259 int size
= extract32(insn
, 30, 2);
2262 switch (o2_L_o1_o0
) {
2263 case 0x0: /* STXR */
2264 case 0x1: /* STLXR */
2266 gen_check_sp_alignment(s
);
2269 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2271 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2272 gen_store_exclusive(s
, rs
, rt
, rt2
, tcg_addr
, size
, false);
2275 case 0x4: /* LDXR */
2276 case 0x5: /* LDAXR */
2278 gen_check_sp_alignment(s
);
2280 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2282 gen_load_exclusive(s
, rt
, rt2
, tcg_addr
, size
, false);
2284 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2288 case 0x9: /* STLR */
2289 /* Generate ISS for non-exclusive accesses including LASR. */
2291 gen_check_sp_alignment(s
);
2293 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2294 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2295 do_gpr_st(s
, cpu_reg(s
, rt
), tcg_addr
, size
, true, rt
,
2296 disas_ldst_compute_iss_sf(size
, false, 0), is_lasr
);
2299 case 0xd: /* LDAR */
2300 /* Generate ISS for non-exclusive accesses including LASR. */
2302 gen_check_sp_alignment(s
);
2304 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2305 do_gpr_ld(s
, cpu_reg(s
, rt
), tcg_addr
, size
, false, false, true, rt
,
2306 disas_ldst_compute_iss_sf(size
, false, 0), is_lasr
);
2307 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2310 case 0x2: case 0x3: /* CASP / STXP */
2311 if (size
& 2) { /* STXP / STLXP */
2313 gen_check_sp_alignment(s
);
2316 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2318 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2319 gen_store_exclusive(s
, rs
, rt
, rt2
, tcg_addr
, size
, true);
2323 && ((rt
| rs
) & 1) == 0
2324 && arm_dc_feature(s
, ARM_FEATURE_V8_ATOMICS
)) {
2326 gen_compare_and_swap_pair(s
, rs
, rt
, rn
, size
| 2);
2331 case 0x6: case 0x7: /* CASPA / LDXP */
2332 if (size
& 2) { /* LDXP / LDAXP */
2334 gen_check_sp_alignment(s
);
2336 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2338 gen_load_exclusive(s
, rt
, rt2
, tcg_addr
, size
, true);
2340 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2345 && ((rt
| rs
) & 1) == 0
2346 && arm_dc_feature(s
, ARM_FEATURE_V8_ATOMICS
)) {
2347 /* CASPA / CASPAL */
2348 gen_compare_and_swap_pair(s
, rs
, rt
, rn
, size
| 2);
2354 case 0xb: /* CASL */
2355 case 0xe: /* CASA */
2356 case 0xf: /* CASAL */
2357 if (rt2
== 31 && arm_dc_feature(s
, ARM_FEATURE_V8_ATOMICS
)) {
2358 gen_compare_and_swap(s
, rs
, rt
, rn
, size
);
2363 unallocated_encoding(s
);
2367 * Load register (literal)
2369 * 31 30 29 27 26 25 24 23 5 4 0
2370 * +-----+-------+---+-----+-------------------+-------+
2371 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2372 * +-----+-------+---+-----+-------------------+-------+
2374 * V: 1 -> vector (simd/fp)
2375 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2376 * 10-> 32 bit signed, 11 -> prefetch
2377 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2379 static void disas_ld_lit(DisasContext
*s
, uint32_t insn
)
2381 int rt
= extract32(insn
, 0, 5);
2382 int64_t imm
= sextract32(insn
, 5, 19) << 2;
2383 bool is_vector
= extract32(insn
, 26, 1);
2384 int opc
= extract32(insn
, 30, 2);
2385 bool is_signed
= false;
2387 TCGv_i64 tcg_rt
, tcg_addr
;
2391 unallocated_encoding(s
);
2395 if (!fp_access_check(s
)) {
2400 /* PRFM (literal) : prefetch */
2403 size
= 2 + extract32(opc
, 0, 1);
2404 is_signed
= extract32(opc
, 1, 1);
2407 tcg_rt
= cpu_reg(s
, rt
);
2409 tcg_addr
= tcg_const_i64((s
->pc
- 4) + imm
);
2411 do_fp_ld(s
, rt
, tcg_addr
, size
);
2413 /* Only unsigned 32bit loads target 32bit registers. */
2414 bool iss_sf
= opc
!= 0;
2416 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, false,
2417 true, rt
, iss_sf
, false);
2419 tcg_temp_free_i64(tcg_addr
);
2423 * LDNP (Load Pair - non-temporal hint)
2424 * LDP (Load Pair - non vector)
2425 * LDPSW (Load Pair Signed Word - non vector)
2426 * STNP (Store Pair - non-temporal hint)
2427 * STP (Store Pair - non vector)
2428 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2429 * LDP (Load Pair of SIMD&FP)
2430 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2431 * STP (Store Pair of SIMD&FP)
2433 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2434 * +-----+-------+---+---+-------+---+-----------------------------+
2435 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2436 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2438 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2440 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2441 * V: 0 -> GPR, 1 -> Vector
2442 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2443 * 10 -> signed offset, 11 -> pre-index
2444 * L: 0 -> Store 1 -> Load
2446 * Rt, Rt2 = GPR or SIMD registers to be stored
2447 * Rn = general purpose register containing address
2448 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2450 static void disas_ldst_pair(DisasContext
*s
, uint32_t insn
)
2452 int rt
= extract32(insn
, 0, 5);
2453 int rn
= extract32(insn
, 5, 5);
2454 int rt2
= extract32(insn
, 10, 5);
2455 uint64_t offset
= sextract64(insn
, 15, 7);
2456 int index
= extract32(insn
, 23, 2);
2457 bool is_vector
= extract32(insn
, 26, 1);
2458 bool is_load
= extract32(insn
, 22, 1);
2459 int opc
= extract32(insn
, 30, 2);
2461 bool is_signed
= false;
2462 bool postindex
= false;
2465 TCGv_i64 tcg_addr
; /* calculated address */
2469 unallocated_encoding(s
);
2476 size
= 2 + extract32(opc
, 1, 1);
2477 is_signed
= extract32(opc
, 0, 1);
2478 if (!is_load
&& is_signed
) {
2479 unallocated_encoding(s
);
2485 case 1: /* post-index */
2490 /* signed offset with "non-temporal" hint. Since we don't emulate
2491 * caches we don't care about hints to the cache system about
2492 * data access patterns, and handle this identically to plain
2496 /* There is no non-temporal-hint version of LDPSW */
2497 unallocated_encoding(s
);
2502 case 2: /* signed offset, rn not updated */
2505 case 3: /* pre-index */
2511 if (is_vector
&& !fp_access_check(s
)) {
2518 gen_check_sp_alignment(s
);
2521 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2524 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
);
2529 do_fp_ld(s
, rt
, tcg_addr
, size
);
2531 do_fp_st(s
, rt
, tcg_addr
, size
);
2533 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2535 do_fp_ld(s
, rt2
, tcg_addr
, size
);
2537 do_fp_st(s
, rt2
, tcg_addr
, size
);
2540 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2541 TCGv_i64 tcg_rt2
= cpu_reg(s
, rt2
);
2544 TCGv_i64 tmp
= tcg_temp_new_i64();
2546 /* Do not modify tcg_rt before recognizing any exception
2547 * from the second load.
2549 do_gpr_ld(s
, tmp
, tcg_addr
, size
, is_signed
, false,
2550 false, 0, false, false);
2551 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2552 do_gpr_ld(s
, tcg_rt2
, tcg_addr
, size
, is_signed
, false,
2553 false, 0, false, false);
2555 tcg_gen_mov_i64(tcg_rt
, tmp
);
2556 tcg_temp_free_i64(tmp
);
2558 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2559 false, 0, false, false);
2560 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2561 do_gpr_st(s
, tcg_rt2
, tcg_addr
, size
,
2562 false, 0, false, false);
2568 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
- (1 << size
));
2570 tcg_gen_subi_i64(tcg_addr
, tcg_addr
, 1 << size
);
2572 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), tcg_addr
);
2577 * Load/store (immediate post-indexed)
2578 * Load/store (immediate pre-indexed)
2579 * Load/store (unscaled immediate)
2581 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2582 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2583 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2584 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2586 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2588 * V = 0 -> non-vector
2589 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2590 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2592 static void disas_ldst_reg_imm9(DisasContext
*s
, uint32_t insn
,
2598 int rn
= extract32(insn
, 5, 5);
2599 int imm9
= sextract32(insn
, 12, 9);
2600 int idx
= extract32(insn
, 10, 2);
2601 bool is_signed
= false;
2602 bool is_store
= false;
2603 bool is_extended
= false;
2604 bool is_unpriv
= (idx
== 2);
2605 bool iss_valid
= !is_vector
;
2612 size
|= (opc
& 2) << 1;
2613 if (size
> 4 || is_unpriv
) {
2614 unallocated_encoding(s
);
2617 is_store
= ((opc
& 1) == 0);
2618 if (!fp_access_check(s
)) {
2622 if (size
== 3 && opc
== 2) {
2623 /* PRFM - prefetch */
2625 unallocated_encoding(s
);
2630 if (opc
== 3 && size
> 1) {
2631 unallocated_encoding(s
);
2634 is_store
= (opc
== 0);
2635 is_signed
= extract32(opc
, 1, 1);
2636 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
2654 g_assert_not_reached();
2658 gen_check_sp_alignment(s
);
2660 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2663 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, imm9
);
2668 do_fp_st(s
, rt
, tcg_addr
, size
);
2670 do_fp_ld(s
, rt
, tcg_addr
, size
);
2673 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2674 int memidx
= is_unpriv
? get_a64_user_mem_index(s
) : get_mem_index(s
);
2675 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
2678 do_gpr_st_memidx(s
, tcg_rt
, tcg_addr
, size
, memidx
,
2679 iss_valid
, rt
, iss_sf
, false);
2681 do_gpr_ld_memidx(s
, tcg_rt
, tcg_addr
, size
,
2682 is_signed
, is_extended
, memidx
,
2683 iss_valid
, rt
, iss_sf
, false);
2688 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
2690 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, imm9
);
2692 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
2697 * Load/store (register offset)
2699 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2700 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2701 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2702 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2705 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2706 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2708 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2709 * opc<0>: 0 -> store, 1 -> load
2710 * V: 1 -> vector/simd
2711 * opt: extend encoding (see DecodeRegExtend)
2712 * S: if S=1 then scale (essentially index by sizeof(size))
2713 * Rt: register to transfer into/out of
2714 * Rn: address register or SP for base
2715 * Rm: offset register or ZR for offset
2717 static void disas_ldst_reg_roffset(DisasContext
*s
, uint32_t insn
,
2723 int rn
= extract32(insn
, 5, 5);
2724 int shift
= extract32(insn
, 12, 1);
2725 int rm
= extract32(insn
, 16, 5);
2726 int opt
= extract32(insn
, 13, 3);
2727 bool is_signed
= false;
2728 bool is_store
= false;
2729 bool is_extended
= false;
2734 if (extract32(opt
, 1, 1) == 0) {
2735 unallocated_encoding(s
);
2740 size
|= (opc
& 2) << 1;
2742 unallocated_encoding(s
);
2745 is_store
= !extract32(opc
, 0, 1);
2746 if (!fp_access_check(s
)) {
2750 if (size
== 3 && opc
== 2) {
2751 /* PRFM - prefetch */
2754 if (opc
== 3 && size
> 1) {
2755 unallocated_encoding(s
);
2758 is_store
= (opc
== 0);
2759 is_signed
= extract32(opc
, 1, 1);
2760 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
2764 gen_check_sp_alignment(s
);
2766 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2768 tcg_rm
= read_cpu_reg(s
, rm
, 1);
2769 ext_and_shift_reg(tcg_rm
, tcg_rm
, opt
, shift
? size
: 0);
2771 tcg_gen_add_i64(tcg_addr
, tcg_addr
, tcg_rm
);
2775 do_fp_st(s
, rt
, tcg_addr
, size
);
2777 do_fp_ld(s
, rt
, tcg_addr
, size
);
2780 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2781 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
2783 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2784 true, rt
, iss_sf
, false);
2786 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
,
2787 is_signed
, is_extended
,
2788 true, rt
, iss_sf
, false);
2794 * Load/store (unsigned immediate)
2796 * 31 30 29 27 26 25 24 23 22 21 10 9 5
2797 * +----+-------+---+-----+-----+------------+-------+------+
2798 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
2799 * +----+-------+---+-----+-----+------------+-------+------+
2802 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2803 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2805 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2806 * opc<0>: 0 -> store, 1 -> load
2807 * Rn: base address register (inc SP)
2808 * Rt: target register
2810 static void disas_ldst_reg_unsigned_imm(DisasContext
*s
, uint32_t insn
,
2816 int rn
= extract32(insn
, 5, 5);
2817 unsigned int imm12
= extract32(insn
, 10, 12);
2818 unsigned int offset
;
2823 bool is_signed
= false;
2824 bool is_extended
= false;
2827 size
|= (opc
& 2) << 1;
2829 unallocated_encoding(s
);
2832 is_store
= !extract32(opc
, 0, 1);
2833 if (!fp_access_check(s
)) {
2837 if (size
== 3 && opc
== 2) {
2838 /* PRFM - prefetch */
2841 if (opc
== 3 && size
> 1) {
2842 unallocated_encoding(s
);
2845 is_store
= (opc
== 0);
2846 is_signed
= extract32(opc
, 1, 1);
2847 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
2851 gen_check_sp_alignment(s
);
2853 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
2854 offset
= imm12
<< size
;
2855 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
);
2859 do_fp_st(s
, rt
, tcg_addr
, size
);
2861 do_fp_ld(s
, rt
, tcg_addr
, size
);
2864 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2865 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
2867 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
,
2868 true, rt
, iss_sf
, false);
2870 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, is_extended
,
2871 true, rt
, iss_sf
, false);
2876 /* Atomic memory operations
2878 * 31 30 27 26 24 22 21 16 15 12 10 5 0
2879 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
2880 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
2881 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
2883 * Rt: the result register
2884 * Rn: base address or SP
2885 * Rs: the source register for the operation
2886 * V: vector flag (always 0 as of v8.3)
2890 static void disas_ldst_atomic(DisasContext
*s
, uint32_t insn
,
2891 int size
, int rt
, bool is_vector
)
2893 int rs
= extract32(insn
, 16, 5);
2894 int rn
= extract32(insn
, 5, 5);
2895 int o3_opc
= extract32(insn
, 12, 4);
2896 int feature
= ARM_FEATURE_V8_ATOMICS
;
2897 TCGv_i64 tcg_rn
, tcg_rs
;
2898 AtomicThreeOpFn
*fn
;
2901 unallocated_encoding(s
);
2905 case 000: /* LDADD */
2906 fn
= tcg_gen_atomic_fetch_add_i64
;
2908 case 001: /* LDCLR */
2909 fn
= tcg_gen_atomic_fetch_and_i64
;
2911 case 002: /* LDEOR */
2912 fn
= tcg_gen_atomic_fetch_xor_i64
;
2914 case 003: /* LDSET */
2915 fn
= tcg_gen_atomic_fetch_or_i64
;
2917 case 004: /* LDSMAX */
2918 fn
= tcg_gen_atomic_fetch_smax_i64
;
2920 case 005: /* LDSMIN */
2921 fn
= tcg_gen_atomic_fetch_smin_i64
;
2923 case 006: /* LDUMAX */
2924 fn
= tcg_gen_atomic_fetch_umax_i64
;
2926 case 007: /* LDUMIN */
2927 fn
= tcg_gen_atomic_fetch_umin_i64
;
2930 fn
= tcg_gen_atomic_xchg_i64
;
2933 unallocated_encoding(s
);
2936 if (!arm_dc_feature(s
, feature
)) {
2937 unallocated_encoding(s
);
2942 gen_check_sp_alignment(s
);
2944 tcg_rn
= cpu_reg_sp(s
, rn
);
2945 tcg_rs
= read_cpu_reg(s
, rs
, true);
2947 if (o3_opc
== 1) { /* LDCLR */
2948 tcg_gen_not_i64(tcg_rs
, tcg_rs
);
2951 /* The tcg atomic primitives are all full barriers. Therefore we
2952 * can ignore the Acquire and Release bits of this instruction.
2954 fn(cpu_reg(s
, rt
), tcg_rn
, tcg_rs
, get_mem_index(s
),
2955 s
->be_data
| size
| MO_ALIGN
);
2958 /* Load/store register (all forms) */
2959 static void disas_ldst_reg(DisasContext
*s
, uint32_t insn
)
2961 int rt
= extract32(insn
, 0, 5);
2962 int opc
= extract32(insn
, 22, 2);
2963 bool is_vector
= extract32(insn
, 26, 1);
2964 int size
= extract32(insn
, 30, 2);
2966 switch (extract32(insn
, 24, 2)) {
2968 if (extract32(insn
, 21, 1) == 0) {
2969 /* Load/store register (unscaled immediate)
2970 * Load/store immediate pre/post-indexed
2971 * Load/store register unprivileged
2973 disas_ldst_reg_imm9(s
, insn
, opc
, size
, rt
, is_vector
);
2976 switch (extract32(insn
, 10, 2)) {
2978 disas_ldst_atomic(s
, insn
, size
, rt
, is_vector
);
2981 disas_ldst_reg_roffset(s
, insn
, opc
, size
, rt
, is_vector
);
2986 disas_ldst_reg_unsigned_imm(s
, insn
, opc
, size
, rt
, is_vector
);
2989 unallocated_encoding(s
);
2992 /* AdvSIMD load/store multiple structures
2994 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2995 * +---+---+---------------+---+-------------+--------+------+------+------+
2996 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2997 * +---+---+---------------+---+-------------+--------+------+------+------+
2999 * AdvSIMD load/store multiple structures (post-indexed)
3001 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
3002 * +---+---+---------------+---+---+---------+--------+------+------+------+
3003 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
3004 * +---+---+---------------+---+---+---------+--------+------+------+------+
3006 * Rt: first (or only) SIMD&FP register to be transferred
3007 * Rn: base address or SP
3008 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3010 static void disas_ldst_multiple_struct(DisasContext
*s
, uint32_t insn
)
3012 int rt
= extract32(insn
, 0, 5);
3013 int rn
= extract32(insn
, 5, 5);
3014 int size
= extract32(insn
, 10, 2);
3015 int opcode
= extract32(insn
, 12, 4);
3016 bool is_store
= !extract32(insn
, 22, 1);
3017 bool is_postidx
= extract32(insn
, 23, 1);
3018 bool is_q
= extract32(insn
, 30, 1);
3019 TCGv_i64 tcg_addr
, tcg_rn
;
3021 int ebytes
= 1 << size
;
3022 int elements
= (is_q
? 128 : 64) / (8 << size
);
3023 int rpt
; /* num iterations */
3024 int selem
; /* structure elements */
3027 if (extract32(insn
, 31, 1) || extract32(insn
, 21, 1)) {
3028 unallocated_encoding(s
);
3032 /* From the shared decode logic */
3063 unallocated_encoding(s
);
3067 if (size
== 3 && !is_q
&& selem
!= 1) {
3069 unallocated_encoding(s
);
3073 if (!fp_access_check(s
)) {
3078 gen_check_sp_alignment(s
);
3081 tcg_rn
= cpu_reg_sp(s
, rn
);
3082 tcg_addr
= tcg_temp_new_i64();
3083 tcg_gen_mov_i64(tcg_addr
, tcg_rn
);
3085 for (r
= 0; r
< rpt
; r
++) {
3087 for (e
= 0; e
< elements
; e
++) {
3088 int tt
= (rt
+ r
) % 32;
3090 for (xs
= 0; xs
< selem
; xs
++) {
3092 do_vec_st(s
, tt
, e
, tcg_addr
, size
);
3094 do_vec_ld(s
, tt
, e
, tcg_addr
, size
);
3096 /* For non-quad operations, setting a slice of the low
3097 * 64 bits of the register clears the high 64 bits (in
3098 * the ARM ARM pseudocode this is implicit in the fact
3099 * that 'rval' is a 64 bit wide variable).
3100 * For quad operations, we might still need to zero the
3101 * high bits of SVE. We optimize by noticing that we only
3102 * need to do this the first time we touch a register.
3104 if (e
== 0 && (r
== 0 || xs
== selem
- 1)) {
3105 clear_vec_high(s
, is_q
, tt
);
3108 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, ebytes
);
3115 int rm
= extract32(insn
, 16, 5);
3117 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
3119 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3122 tcg_temp_free_i64(tcg_addr
);
3125 /* AdvSIMD load/store single structure
3127 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3128 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3129 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3130 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3132 * AdvSIMD load/store single structure (post-indexed)
3134 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3135 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3136 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3137 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3139 * Rt: first (or only) SIMD&FP register to be transferred
3140 * Rn: base address or SP
3141 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3142 * index = encoded in Q:S:size dependent on size
3144 * lane_size = encoded in R, opc
3145 * transfer width = encoded in opc, S, size
3147 static void disas_ldst_single_struct(DisasContext
*s
, uint32_t insn
)
3149 int rt
= extract32(insn
, 0, 5);
3150 int rn
= extract32(insn
, 5, 5);
3151 int size
= extract32(insn
, 10, 2);
3152 int S
= extract32(insn
, 12, 1);
3153 int opc
= extract32(insn
, 13, 3);
3154 int R
= extract32(insn
, 21, 1);
3155 int is_load
= extract32(insn
, 22, 1);
3156 int is_postidx
= extract32(insn
, 23, 1);
3157 int is_q
= extract32(insn
, 30, 1);
3159 int scale
= extract32(opc
, 1, 2);
3160 int selem
= (extract32(opc
, 0, 1) << 1 | R
) + 1;
3161 bool replicate
= false;
3162 int index
= is_q
<< 3 | S
<< 2 | size
;
3164 TCGv_i64 tcg_addr
, tcg_rn
;
3168 if (!is_load
|| S
) {
3169 unallocated_encoding(s
);
3178 if (extract32(size
, 0, 1)) {
3179 unallocated_encoding(s
);
3185 if (extract32(size
, 1, 1)) {
3186 unallocated_encoding(s
);
3189 if (!extract32(size
, 0, 1)) {
3193 unallocated_encoding(s
);
3201 g_assert_not_reached();
3204 if (!fp_access_check(s
)) {
3208 ebytes
= 1 << scale
;
3211 gen_check_sp_alignment(s
);
3214 tcg_rn
= cpu_reg_sp(s
, rn
);
3215 tcg_addr
= tcg_temp_new_i64();
3216 tcg_gen_mov_i64(tcg_addr
, tcg_rn
);
3218 for (xs
= 0; xs
< selem
; xs
++) {
3220 /* Load and replicate to all elements */
3222 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3224 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
,
3225 get_mem_index(s
), s
->be_data
+ scale
);
3228 mulconst
= 0x0101010101010101ULL
;
3231 mulconst
= 0x0001000100010001ULL
;
3234 mulconst
= 0x0000000100000001ULL
;
3240 g_assert_not_reached();
3243 tcg_gen_muli_i64(tcg_tmp
, tcg_tmp
, mulconst
);
3245 write_vec_element(s
, tcg_tmp
, rt
, 0, MO_64
);
3247 write_vec_element(s
, tcg_tmp
, rt
, 1, MO_64
);
3249 tcg_temp_free_i64(tcg_tmp
);
3250 clear_vec_high(s
, is_q
, rt
);
3252 /* Load/store one element per register */
3254 do_vec_ld(s
, rt
, index
, tcg_addr
, scale
);
3256 do_vec_st(s
, rt
, index
, tcg_addr
, scale
);
3259 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, ebytes
);
3264 int rm
= extract32(insn
, 16, 5);
3266 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
3268 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3271 tcg_temp_free_i64(tcg_addr
);
3274 /* Loads and stores */
3275 static void disas_ldst(DisasContext
*s
, uint32_t insn
)
3277 switch (extract32(insn
, 24, 6)) {
3278 case 0x08: /* Load/store exclusive */
3279 disas_ldst_excl(s
, insn
);
3281 case 0x18: case 0x1c: /* Load register (literal) */
3282 disas_ld_lit(s
, insn
);
3284 case 0x28: case 0x29:
3285 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
3286 disas_ldst_pair(s
, insn
);
3288 case 0x38: case 0x39:
3289 case 0x3c: case 0x3d: /* Load/store register (all forms) */
3290 disas_ldst_reg(s
, insn
);
3292 case 0x0c: /* AdvSIMD load/store multiple structures */
3293 disas_ldst_multiple_struct(s
, insn
);
3295 case 0x0d: /* AdvSIMD load/store single structure */
3296 disas_ldst_single_struct(s
, insn
);
3299 unallocated_encoding(s
);
3304 /* PC-rel. addressing
3305 * 31 30 29 28 24 23 5 4 0
3306 * +----+-------+-----------+-------------------+------+
3307 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
3308 * +----+-------+-----------+-------------------+------+
3310 static void disas_pc_rel_adr(DisasContext
*s
, uint32_t insn
)
3312 unsigned int page
, rd
;
3316 page
= extract32(insn
, 31, 1);
3317 /* SignExtend(immhi:immlo) -> offset */
3318 offset
= sextract64(insn
, 5, 19);
3319 offset
= offset
<< 2 | extract32(insn
, 29, 2);
3320 rd
= extract32(insn
, 0, 5);
3324 /* ADRP (page based) */
3329 tcg_gen_movi_i64(cpu_reg(s
, rd
), base
+ offset
);
3333 * Add/subtract (immediate)
3335 * 31 30 29 28 24 23 22 21 10 9 5 4 0
3336 * +--+--+--+-----------+-----+-------------+-----+-----+
3337 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
3338 * +--+--+--+-----------+-----+-------------+-----+-----+
3340 * sf: 0 -> 32bit, 1 -> 64bit
3341 * op: 0 -> add , 1 -> sub
3343 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3345 static void disas_add_sub_imm(DisasContext
*s
, uint32_t insn
)
3347 int rd
= extract32(insn
, 0, 5);
3348 int rn
= extract32(insn
, 5, 5);
3349 uint64_t imm
= extract32(insn
, 10, 12);
3350 int shift
= extract32(insn
, 22, 2);
3351 bool setflags
= extract32(insn
, 29, 1);
3352 bool sub_op
= extract32(insn
, 30, 1);
3353 bool is_64bit
= extract32(insn
, 31, 1);
3355 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
3356 TCGv_i64 tcg_rd
= setflags
? cpu_reg(s
, rd
) : cpu_reg_sp(s
, rd
);
3357 TCGv_i64 tcg_result
;
3366 unallocated_encoding(s
);
3370 tcg_result
= tcg_temp_new_i64();
3373 tcg_gen_subi_i64(tcg_result
, tcg_rn
, imm
);
3375 tcg_gen_addi_i64(tcg_result
, tcg_rn
, imm
);
3378 TCGv_i64 tcg_imm
= tcg_const_i64(imm
);
3380 gen_sub_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
3382 gen_add_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
3384 tcg_temp_free_i64(tcg_imm
);
3388 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3390 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
3393 tcg_temp_free_i64(tcg_result
);
3396 /* The input should be a value in the bottom e bits (with higher
3397 * bits zero); returns that value replicated into every element
3398 * of size e in a 64 bit integer.
3400 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
3410 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
3411 static inline uint64_t bitmask64(unsigned int length
)
3413 assert(length
> 0 && length
<= 64);
3414 return ~0ULL >> (64 - length
);
3417 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3418 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3419 * value (ie should cause a guest UNDEF exception), and true if they are
3420 * valid, in which case the decoded bit pattern is written to result.
3422 static bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
3423 unsigned int imms
, unsigned int immr
)
3426 unsigned e
, levels
, s
, r
;
3429 assert(immn
< 2 && imms
< 64 && immr
< 64);
3431 /* The bit patterns we create here are 64 bit patterns which
3432 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3433 * 64 bits each. Each element contains the same value: a run
3434 * of between 1 and e-1 non-zero bits, rotated within the
3435 * element by between 0 and e-1 bits.
3437 * The element size and run length are encoded into immn (1 bit)
3438 * and imms (6 bits) as follows:
3439 * 64 bit elements: immn = 1, imms = <length of run - 1>
3440 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3441 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3442 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3443 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3444 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3445 * Notice that immn = 0, imms = 11111x is the only combination
3446 * not covered by one of the above options; this is reserved.
3447 * Further, <length of run - 1> all-ones is a reserved pattern.
3449 * In all cases the rotation is by immr % e (and immr is 6 bits).
3452 /* First determine the element size */
3453 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
3455 /* This is the immn == 0, imms == 0x11111x case */
3465 /* <length of run - 1> mustn't be all-ones. */
3469 /* Create the value of one element: s+1 set bits rotated
3470 * by r within the element (which is e bits wide)...
3472 mask
= bitmask64(s
+ 1);
3474 mask
= (mask
>> r
) | (mask
<< (e
- r
));
3475 mask
&= bitmask64(e
);
3477 /* ...then replicate the element over the whole 64 bit value */
3478 mask
= bitfield_replicate(mask
, e
);
3483 /* Logical (immediate)
3484 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3485 * +----+-----+-------------+---+------+------+------+------+
3486 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3487 * +----+-----+-------------+---+------+------+------+------+
3489 static void disas_logic_imm(DisasContext
*s
, uint32_t insn
)
3491 unsigned int sf
, opc
, is_n
, immr
, imms
, rn
, rd
;
3492 TCGv_i64 tcg_rd
, tcg_rn
;
3494 bool is_and
= false;
3496 sf
= extract32(insn
, 31, 1);
3497 opc
= extract32(insn
, 29, 2);
3498 is_n
= extract32(insn
, 22, 1);
3499 immr
= extract32(insn
, 16, 6);
3500 imms
= extract32(insn
, 10, 6);
3501 rn
= extract32(insn
, 5, 5);
3502 rd
= extract32(insn
, 0, 5);
3505 unallocated_encoding(s
);
3509 if (opc
== 0x3) { /* ANDS */
3510 tcg_rd
= cpu_reg(s
, rd
);
3512 tcg_rd
= cpu_reg_sp(s
, rd
);
3514 tcg_rn
= cpu_reg(s
, rn
);
3516 if (!logic_imm_decode_wmask(&wmask
, is_n
, imms
, immr
)) {
3517 /* some immediate field values are reserved */
3518 unallocated_encoding(s
);
3523 wmask
&= 0xffffffff;
3527 case 0x3: /* ANDS */
3529 tcg_gen_andi_i64(tcg_rd
, tcg_rn
, wmask
);
3533 tcg_gen_ori_i64(tcg_rd
, tcg_rn
, wmask
);
3536 tcg_gen_xori_i64(tcg_rd
, tcg_rn
, wmask
);
3539 assert(FALSE
); /* must handle all above */
3543 if (!sf
&& !is_and
) {
3544 /* zero extend final result; we know we can skip this for AND
3545 * since the immediate had the high 32 bits clear.
3547 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3550 if (opc
== 3) { /* ANDS */
3551 gen_logic_CC(sf
, tcg_rd
);
3556 * Move wide (immediate)
3558 * 31 30 29 28 23 22 21 20 5 4 0
3559 * +--+-----+-------------+-----+----------------+------+
3560 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3561 * +--+-----+-------------+-----+----------------+------+
3563 * sf: 0 -> 32 bit, 1 -> 64 bit
3564 * opc: 00 -> N, 10 -> Z, 11 -> K
3565 * hw: shift/16 (0,16, and sf only 32, 48)
3567 static void disas_movw_imm(DisasContext
*s
, uint32_t insn
)
3569 int rd
= extract32(insn
, 0, 5);
3570 uint64_t imm
= extract32(insn
, 5, 16);
3571 int sf
= extract32(insn
, 31, 1);
3572 int opc
= extract32(insn
, 29, 2);
3573 int pos
= extract32(insn
, 21, 2) << 4;
3574 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3577 if (!sf
&& (pos
>= 32)) {
3578 unallocated_encoding(s
);
3592 tcg_gen_movi_i64(tcg_rd
, imm
);
3595 tcg_imm
= tcg_const_i64(imm
);
3596 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_imm
, pos
, 16);
3597 tcg_temp_free_i64(tcg_imm
);
3599 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3603 unallocated_encoding(s
);
3609 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3610 * +----+-----+-------------+---+------+------+------+------+
3611 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
3612 * +----+-----+-------------+---+------+------+------+------+
3614 static void disas_bitfield(DisasContext
*s
, uint32_t insn
)
3616 unsigned int sf
, n
, opc
, ri
, si
, rn
, rd
, bitsize
, pos
, len
;
3617 TCGv_i64 tcg_rd
, tcg_tmp
;
3619 sf
= extract32(insn
, 31, 1);
3620 opc
= extract32(insn
, 29, 2);
3621 n
= extract32(insn
, 22, 1);
3622 ri
= extract32(insn
, 16, 6);
3623 si
= extract32(insn
, 10, 6);
3624 rn
= extract32(insn
, 5, 5);
3625 rd
= extract32(insn
, 0, 5);
3626 bitsize
= sf
? 64 : 32;
3628 if (sf
!= n
|| ri
>= bitsize
|| si
>= bitsize
|| opc
> 2) {
3629 unallocated_encoding(s
);
3633 tcg_rd
= cpu_reg(s
, rd
);
3635 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
3636 to be smaller than bitsize, we'll never reference data outside the
3637 low 32-bits anyway. */
3638 tcg_tmp
= read_cpu_reg(s
, rn
, 1);
3640 /* Recognize simple(r) extractions. */
3642 /* Wd<s-r:0> = Wn<s:r> */
3643 len
= (si
- ri
) + 1;
3644 if (opc
== 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3645 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
3647 } else if (opc
== 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3648 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
3651 /* opc == 1, BXFIL fall through to deposit */
3652 tcg_gen_extract_i64(tcg_tmp
, tcg_tmp
, ri
, len
);
3655 /* Handle the ri > si case with a deposit
3656 * Wd<32+s-r,32-r> = Wn<s:0>
3659 pos
= (bitsize
- ri
) & (bitsize
- 1);
3662 if (opc
== 0 && len
< ri
) {
3663 /* SBFM: sign extend the destination field from len to fill
3664 the balance of the word. Let the deposit below insert all
3665 of those sign bits. */
3666 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
3670 if (opc
== 1) { /* BFM, BXFIL */
3671 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
3673 /* SBFM or UBFM: We start with zero, and we haven't modified
3674 any bits outside bitsize, therefore the zero-extension
3675 below is unneeded. */
3676 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
3681 if (!sf
) { /* zero extend final result */
3682 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3687 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
3688 * +----+------+-------------+---+----+------+--------+------+------+
3689 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
3690 * +----+------+-------------+---+----+------+--------+------+------+
3692 static void disas_extract(DisasContext
*s
, uint32_t insn
)
3694 unsigned int sf
, n
, rm
, imm
, rn
, rd
, bitsize
, op21
, op0
;
3696 sf
= extract32(insn
, 31, 1);
3697 n
= extract32(insn
, 22, 1);
3698 rm
= extract32(insn
, 16, 5);
3699 imm
= extract32(insn
, 10, 6);
3700 rn
= extract32(insn
, 5, 5);
3701 rd
= extract32(insn
, 0, 5);
3702 op21
= extract32(insn
, 29, 2);
3703 op0
= extract32(insn
, 21, 1);
3704 bitsize
= sf
? 64 : 32;
3706 if (sf
!= n
|| op21
|| op0
|| imm
>= bitsize
) {
3707 unallocated_encoding(s
);
3709 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
3711 tcg_rd
= cpu_reg(s
, rd
);
3713 if (unlikely(imm
== 0)) {
3714 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3715 * so an extract from bit 0 is a special case.
3718 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, rm
));
3720 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rm
));
3722 } else if (rm
== rn
) { /* ROR */
3723 tcg_rm
= cpu_reg(s
, rm
);
3725 tcg_gen_rotri_i64(tcg_rd
, tcg_rm
, imm
);
3727 TCGv_i32 tmp
= tcg_temp_new_i32();
3728 tcg_gen_extrl_i64_i32(tmp
, tcg_rm
);
3729 tcg_gen_rotri_i32(tmp
, tmp
, imm
);
3730 tcg_gen_extu_i32_i64(tcg_rd
, tmp
);
3731 tcg_temp_free_i32(tmp
);
3734 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3735 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3736 tcg_gen_shri_i64(tcg_rm
, tcg_rm
, imm
);
3737 tcg_gen_shli_i64(tcg_rn
, tcg_rn
, bitsize
- imm
);
3738 tcg_gen_or_i64(tcg_rd
, tcg_rm
, tcg_rn
);
3740 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3746 /* Data processing - immediate */
3747 static void disas_data_proc_imm(DisasContext
*s
, uint32_t insn
)
3749 switch (extract32(insn
, 23, 6)) {
3750 case 0x20: case 0x21: /* PC-rel. addressing */
3751 disas_pc_rel_adr(s
, insn
);
3753 case 0x22: case 0x23: /* Add/subtract (immediate) */
3754 disas_add_sub_imm(s
, insn
);
3756 case 0x24: /* Logical (immediate) */
3757 disas_logic_imm(s
, insn
);
3759 case 0x25: /* Move wide (immediate) */
3760 disas_movw_imm(s
, insn
);
3762 case 0x26: /* Bitfield */
3763 disas_bitfield(s
, insn
);
3765 case 0x27: /* Extract */
3766 disas_extract(s
, insn
);
3769 unallocated_encoding(s
);
3774 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
3775 * Note that it is the caller's responsibility to ensure that the
3776 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3777 * mandated semantics for out of range shifts.
3779 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
3780 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
3782 switch (shift_type
) {
3783 case A64_SHIFT_TYPE_LSL
:
3784 tcg_gen_shl_i64(dst
, src
, shift_amount
);
3786 case A64_SHIFT_TYPE_LSR
:
3787 tcg_gen_shr_i64(dst
, src
, shift_amount
);
3789 case A64_SHIFT_TYPE_ASR
:
3791 tcg_gen_ext32s_i64(dst
, src
);
3793 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
3795 case A64_SHIFT_TYPE_ROR
:
3797 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
3800 t0
= tcg_temp_new_i32();
3801 t1
= tcg_temp_new_i32();
3802 tcg_gen_extrl_i64_i32(t0
, src
);
3803 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
3804 tcg_gen_rotr_i32(t0
, t0
, t1
);
3805 tcg_gen_extu_i32_i64(dst
, t0
);
3806 tcg_temp_free_i32(t0
);
3807 tcg_temp_free_i32(t1
);
3811 assert(FALSE
); /* all shift types should be handled */
3815 if (!sf
) { /* zero extend final result */
3816 tcg_gen_ext32u_i64(dst
, dst
);
3820 /* Shift a TCGv src by immediate, put result in dst.
3821 * The shift amount must be in range (this should always be true as the
3822 * relevant instructions will UNDEF on bad shift immediates).
3824 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
3825 enum a64_shift_type shift_type
, unsigned int shift_i
)
3827 assert(shift_i
< (sf
? 64 : 32));
3830 tcg_gen_mov_i64(dst
, src
);
3832 TCGv_i64 shift_const
;
3834 shift_const
= tcg_const_i64(shift_i
);
3835 shift_reg(dst
, src
, sf
, shift_type
, shift_const
);
3836 tcg_temp_free_i64(shift_const
);
3840 /* Logical (shifted register)
3841 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3842 * +----+-----+-----------+-------+---+------+--------+------+------+
3843 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
3844 * +----+-----+-----------+-------+---+------+--------+------+------+
3846 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
3848 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
3849 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
3851 sf
= extract32(insn
, 31, 1);
3852 opc
= extract32(insn
, 29, 2);
3853 shift_type
= extract32(insn
, 22, 2);
3854 invert
= extract32(insn
, 21, 1);
3855 rm
= extract32(insn
, 16, 5);
3856 shift_amount
= extract32(insn
, 10, 6);
3857 rn
= extract32(insn
, 5, 5);
3858 rd
= extract32(insn
, 0, 5);
3860 if (!sf
&& (shift_amount
& (1 << 5))) {
3861 unallocated_encoding(s
);
3865 tcg_rd
= cpu_reg(s
, rd
);
3867 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
3868 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3869 * register-register MOV and MVN, so it is worth special casing.
3871 tcg_rm
= cpu_reg(s
, rm
);
3873 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
3875 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3879 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
3881 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
3887 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3890 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
3893 tcg_rn
= cpu_reg(s
, rn
);
3895 switch (opc
| (invert
<< 2)) {
3898 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3901 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3904 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3908 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3911 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3914 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
3922 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3926 gen_logic_CC(sf
, tcg_rd
);
3931 * Add/subtract (extended register)
3933 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
3934 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3935 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
3936 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3938 * sf: 0 -> 32bit, 1 -> 64bit
3939 * op: 0 -> add , 1 -> sub
3942 * option: extension type (see DecodeRegExtend)
3943 * imm3: optional shift to Rm
3945 * Rd = Rn + LSL(extend(Rm), amount)
3947 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
3949 int rd
= extract32(insn
, 0, 5);
3950 int rn
= extract32(insn
, 5, 5);
3951 int imm3
= extract32(insn
, 10, 3);
3952 int option
= extract32(insn
, 13, 3);
3953 int rm
= extract32(insn
, 16, 5);
3954 bool setflags
= extract32(insn
, 29, 1);
3955 bool sub_op
= extract32(insn
, 30, 1);
3956 bool sf
= extract32(insn
, 31, 1);
3958 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
3960 TCGv_i64 tcg_result
;
3963 unallocated_encoding(s
);
3967 /* non-flag setting ops may use SP */
3969 tcg_rd
= cpu_reg_sp(s
, rd
);
3971 tcg_rd
= cpu_reg(s
, rd
);
3973 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
3975 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3976 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
3978 tcg_result
= tcg_temp_new_i64();
3982 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
3984 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
3988 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3990 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3995 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3997 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4000 tcg_temp_free_i64(tcg_result
);
4004 * Add/subtract (shifted register)
4006 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4007 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4008 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4009 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4011 * sf: 0 -> 32bit, 1 -> 64bit
4012 * op: 0 -> add , 1 -> sub
4014 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4015 * imm6: Shift amount to apply to Rm before the add/sub
4017 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
4019 int rd
= extract32(insn
, 0, 5);
4020 int rn
= extract32(insn
, 5, 5);
4021 int imm6
= extract32(insn
, 10, 6);
4022 int rm
= extract32(insn
, 16, 5);
4023 int shift_type
= extract32(insn
, 22, 2);
4024 bool setflags
= extract32(insn
, 29, 1);
4025 bool sub_op
= extract32(insn
, 30, 1);
4026 bool sf
= extract32(insn
, 31, 1);
4028 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4029 TCGv_i64 tcg_rn
, tcg_rm
;
4030 TCGv_i64 tcg_result
;
4032 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
4033 unallocated_encoding(s
);
4037 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4038 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4040 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
4042 tcg_result
= tcg_temp_new_i64();
4046 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4048 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4052 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4054 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4059 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4061 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4064 tcg_temp_free_i64(tcg_result
);
4067 /* Data-processing (3 source)
4069 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4070 * +--+------+-----------+------+------+----+------+------+------+
4071 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4072 * +--+------+-----------+------+------+----+------+------+------+
4074 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
4076 int rd
= extract32(insn
, 0, 5);
4077 int rn
= extract32(insn
, 5, 5);
4078 int ra
= extract32(insn
, 10, 5);
4079 int rm
= extract32(insn
, 16, 5);
4080 int op_id
= (extract32(insn
, 29, 3) << 4) |
4081 (extract32(insn
, 21, 3) << 1) |
4082 extract32(insn
, 15, 1);
4083 bool sf
= extract32(insn
, 31, 1);
4084 bool is_sub
= extract32(op_id
, 0, 1);
4085 bool is_high
= extract32(op_id
, 2, 1);
4086 bool is_signed
= false;
4091 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4093 case 0x42: /* SMADDL */
4094 case 0x43: /* SMSUBL */
4095 case 0x44: /* SMULH */
4098 case 0x0: /* MADD (32bit) */
4099 case 0x1: /* MSUB (32bit) */
4100 case 0x40: /* MADD (64bit) */
4101 case 0x41: /* MSUB (64bit) */
4102 case 0x4a: /* UMADDL */
4103 case 0x4b: /* UMSUBL */
4104 case 0x4c: /* UMULH */
4107 unallocated_encoding(s
);
4112 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
4113 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4114 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
4115 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
4118 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4120 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
4123 tcg_temp_free_i64(low_bits
);
4127 tcg_op1
= tcg_temp_new_i64();
4128 tcg_op2
= tcg_temp_new_i64();
4129 tcg_tmp
= tcg_temp_new_i64();
4132 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
4133 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
4136 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
4137 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
4139 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
4140 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
4144 if (ra
== 31 && !is_sub
) {
4145 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4146 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
4148 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
4150 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4152 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
4157 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
4160 tcg_temp_free_i64(tcg_op1
);
4161 tcg_temp_free_i64(tcg_op2
);
4162 tcg_temp_free_i64(tcg_tmp
);
4165 /* Add/subtract (with carry)
4166 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4167 * +--+--+--+------------------------+------+---------+------+-----+
4168 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
4169 * +--+--+--+------------------------+------+---------+------+-----+
4173 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
4175 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
4176 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
4178 if (extract32(insn
, 10, 6) != 0) {
4179 unallocated_encoding(s
);
4183 sf
= extract32(insn
, 31, 1);
4184 op
= extract32(insn
, 30, 1);
4185 setflags
= extract32(insn
, 29, 1);
4186 rm
= extract32(insn
, 16, 5);
4187 rn
= extract32(insn
, 5, 5);
4188 rd
= extract32(insn
, 0, 5);
4190 tcg_rd
= cpu_reg(s
, rd
);
4191 tcg_rn
= cpu_reg(s
, rn
);
4194 tcg_y
= new_tmp_a64(s
);
4195 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
4197 tcg_y
= cpu_reg(s
, rm
);
4201 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4203 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
4207 /* Conditional compare (immediate / register)
4208 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4209 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4210 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
4211 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4214 static void disas_cc(DisasContext
*s
, uint32_t insn
)
4216 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
4217 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
4218 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
4221 if (!extract32(insn
, 29, 1)) {
4222 unallocated_encoding(s
);
4225 if (insn
& (1 << 10 | 1 << 4)) {
4226 unallocated_encoding(s
);
4229 sf
= extract32(insn
, 31, 1);
4230 op
= extract32(insn
, 30, 1);
4231 is_imm
= extract32(insn
, 11, 1);
4232 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
4233 cond
= extract32(insn
, 12, 4);
4234 rn
= extract32(insn
, 5, 5);
4235 nzcv
= extract32(insn
, 0, 4);
4237 /* Set T0 = !COND. */
4238 tcg_t0
= tcg_temp_new_i32();
4239 arm_test_cc(&c
, cond
);
4240 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
4243 /* Load the arguments for the new comparison. */
4245 tcg_y
= new_tmp_a64(s
);
4246 tcg_gen_movi_i64(tcg_y
, y
);
4248 tcg_y
= cpu_reg(s
, y
);
4250 tcg_rn
= cpu_reg(s
, rn
);
4252 /* Set the flags for the new comparison. */
4253 tcg_tmp
= tcg_temp_new_i64();
4255 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
4257 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
4259 tcg_temp_free_i64(tcg_tmp
);
4261 /* If COND was false, force the flags to #nzcv. Compute two masks
4262 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4263 * For tcg hosts that support ANDC, we can make do with just T1.
4264 * In either case, allow the tcg optimizer to delete any unused mask.
4266 tcg_t1
= tcg_temp_new_i32();
4267 tcg_t2
= tcg_temp_new_i32();
4268 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
4269 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
4271 if (nzcv
& 8) { /* N */
4272 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
4274 if (TCG_TARGET_HAS_andc_i32
) {
4275 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
4277 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
4280 if (nzcv
& 4) { /* Z */
4281 if (TCG_TARGET_HAS_andc_i32
) {
4282 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
4284 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
4287 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
4289 if (nzcv
& 2) { /* C */
4290 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
4292 if (TCG_TARGET_HAS_andc_i32
) {
4293 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
4295 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
4298 if (nzcv
& 1) { /* V */
4299 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
4301 if (TCG_TARGET_HAS_andc_i32
) {
4302 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
4304 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
4307 tcg_temp_free_i32(tcg_t0
);
4308 tcg_temp_free_i32(tcg_t1
);
4309 tcg_temp_free_i32(tcg_t2
);
4312 /* Conditional select
4313 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
4314 * +----+----+---+-----------------+------+------+-----+------+------+
4315 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
4316 * +----+----+---+-----------------+------+------+-----+------+------+
4318 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
4320 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
4321 TCGv_i64 tcg_rd
, zero
;
4324 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
4325 /* S == 1 or op2<1> == 1 */
4326 unallocated_encoding(s
);
4329 sf
= extract32(insn
, 31, 1);
4330 else_inv
= extract32(insn
, 30, 1);
4331 rm
= extract32(insn
, 16, 5);
4332 cond
= extract32(insn
, 12, 4);
4333 else_inc
= extract32(insn
, 10, 1);
4334 rn
= extract32(insn
, 5, 5);
4335 rd
= extract32(insn
, 0, 5);
4337 tcg_rd
= cpu_reg(s
, rd
);
4339 a64_test_cc(&c
, cond
);
4340 zero
= tcg_const_i64(0);
4342 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
4344 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
), tcg_rd
, c
.value
, zero
);
4346 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
4349 TCGv_i64 t_true
= cpu_reg(s
, rn
);
4350 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
4351 if (else_inv
&& else_inc
) {
4352 tcg_gen_neg_i64(t_false
, t_false
);
4353 } else if (else_inv
) {
4354 tcg_gen_not_i64(t_false
, t_false
);
4355 } else if (else_inc
) {
4356 tcg_gen_addi_i64(t_false
, t_false
, 1);
4358 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
4361 tcg_temp_free_i64(zero
);
4365 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4369 static void handle_clz(DisasContext
*s
, unsigned int sf
,
4370 unsigned int rn
, unsigned int rd
)
4372 TCGv_i64 tcg_rd
, tcg_rn
;
4373 tcg_rd
= cpu_reg(s
, rd
);
4374 tcg_rn
= cpu_reg(s
, rn
);
4377 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
4379 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
4380 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
4381 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
4382 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
4383 tcg_temp_free_i32(tcg_tmp32
);
4387 static void handle_cls(DisasContext
*s
, unsigned int sf
,
4388 unsigned int rn
, unsigned int rd
)
4390 TCGv_i64 tcg_rd
, tcg_rn
;
4391 tcg_rd
= cpu_reg(s
, rd
);
4392 tcg_rn
= cpu_reg(s
, rn
);
4395 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
4397 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
4398 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
4399 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
4400 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
4401 tcg_temp_free_i32(tcg_tmp32
);
4405 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
4406 unsigned int rn
, unsigned int rd
)
4408 TCGv_i64 tcg_rd
, tcg_rn
;
4409 tcg_rd
= cpu_reg(s
, rd
);
4410 tcg_rn
= cpu_reg(s
, rn
);
4413 gen_helper_rbit64(tcg_rd
, tcg_rn
);
4415 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
4416 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
4417 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
4418 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
4419 tcg_temp_free_i32(tcg_tmp32
);
4423 /* REV with sf==1, opcode==3 ("REV64") */
4424 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
4425 unsigned int rn
, unsigned int rd
)
4428 unallocated_encoding(s
);
4431 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
4434 /* REV with sf==0, opcode==2
4435 * REV32 (sf==1, opcode==2)
4437 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
4438 unsigned int rn
, unsigned int rd
)
4440 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4443 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
4444 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4446 /* bswap32_i64 requires zero high word */
4447 tcg_gen_ext32u_i64(tcg_tmp
, tcg_rn
);
4448 tcg_gen_bswap32_i64(tcg_rd
, tcg_tmp
);
4449 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 32);
4450 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
);
4451 tcg_gen_concat32_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
4453 tcg_temp_free_i64(tcg_tmp
);
4455 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rn
));
4456 tcg_gen_bswap32_i64(tcg_rd
, tcg_rd
);
4460 /* REV16 (opcode==1) */
4461 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
4462 unsigned int rn
, unsigned int rd
)
4464 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4465 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
4466 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4467 TCGv_i64 mask
= tcg_const_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
4469 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
4470 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
4471 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
4472 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
4473 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
4475 tcg_temp_free_i64(mask
);
4476 tcg_temp_free_i64(tcg_tmp
);
4479 /* Data-processing (1 source)
4480 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4481 * +----+---+---+-----------------+---------+--------+------+------+
4482 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
4483 * +----+---+---+-----------------+---------+--------+------+------+
4485 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
4487 unsigned int sf
, opcode
, rn
, rd
;
4489 if (extract32(insn
, 29, 1) || extract32(insn
, 16, 5)) {
4490 unallocated_encoding(s
);
4494 sf
= extract32(insn
, 31, 1);
4495 opcode
= extract32(insn
, 10, 6);
4496 rn
= extract32(insn
, 5, 5);
4497 rd
= extract32(insn
, 0, 5);
4501 handle_rbit(s
, sf
, rn
, rd
);
4504 handle_rev16(s
, sf
, rn
, rd
);
4507 handle_rev32(s
, sf
, rn
, rd
);
4510 handle_rev64(s
, sf
, rn
, rd
);
4513 handle_clz(s
, sf
, rn
, rd
);
4516 handle_cls(s
, sf
, rn
, rd
);
4521 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
4522 unsigned int rm
, unsigned int rn
, unsigned int rd
)
4524 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
4525 tcg_rd
= cpu_reg(s
, rd
);
4527 if (!sf
&& is_signed
) {
4528 tcg_n
= new_tmp_a64(s
);
4529 tcg_m
= new_tmp_a64(s
);
4530 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
4531 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
4533 tcg_n
= read_cpu_reg(s
, rn
, sf
);
4534 tcg_m
= read_cpu_reg(s
, rm
, sf
);
4538 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
4540 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
4543 if (!sf
) { /* zero extend final result */
4544 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4548 /* LSLV, LSRV, ASRV, RORV */
4549 static void handle_shift_reg(DisasContext
*s
,
4550 enum a64_shift_type shift_type
, unsigned int sf
,
4551 unsigned int rm
, unsigned int rn
, unsigned int rd
)
4553 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
4554 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4555 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4557 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
4558 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
4559 tcg_temp_free_i64(tcg_shift
);
4562 /* CRC32[BHWX], CRC32C[BHWX] */
4563 static void handle_crc32(DisasContext
*s
,
4564 unsigned int sf
, unsigned int sz
, bool crc32c
,
4565 unsigned int rm
, unsigned int rn
, unsigned int rd
)
4567 TCGv_i64 tcg_acc
, tcg_val
;
4570 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)
4571 || (sf
== 1 && sz
!= 3)
4572 || (sf
== 0 && sz
== 3)) {
4573 unallocated_encoding(s
);
4578 tcg_val
= cpu_reg(s
, rm
);
4592 g_assert_not_reached();
4594 tcg_val
= new_tmp_a64(s
);
4595 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
4598 tcg_acc
= cpu_reg(s
, rn
);
4599 tcg_bytes
= tcg_const_i32(1 << sz
);
4602 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
4604 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
4607 tcg_temp_free_i32(tcg_bytes
);
4610 /* Data-processing (2 source)
4611 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4612 * +----+---+---+-----------------+------+--------+------+------+
4613 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
4614 * +----+---+---+-----------------+------+--------+------+------+
4616 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
4618 unsigned int sf
, rm
, opcode
, rn
, rd
;
4619 sf
= extract32(insn
, 31, 1);
4620 rm
= extract32(insn
, 16, 5);
4621 opcode
= extract32(insn
, 10, 6);
4622 rn
= extract32(insn
, 5, 5);
4623 rd
= extract32(insn
, 0, 5);
4625 if (extract32(insn
, 29, 1)) {
4626 unallocated_encoding(s
);
4632 handle_div(s
, false, sf
, rm
, rn
, rd
);
4635 handle_div(s
, true, sf
, rm
, rn
, rd
);
4638 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
4641 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
4644 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
4647 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
4656 case 23: /* CRC32 */
4658 int sz
= extract32(opcode
, 0, 2);
4659 bool crc32c
= extract32(opcode
, 2, 1);
4660 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
4664 unallocated_encoding(s
);
4669 /* Data processing - register */
4670 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
4672 switch (extract32(insn
, 24, 5)) {
4673 case 0x0a: /* Logical (shifted register) */
4674 disas_logic_reg(s
, insn
);
4676 case 0x0b: /* Add/subtract */
4677 if (insn
& (1 << 21)) { /* (extended register) */
4678 disas_add_sub_ext_reg(s
, insn
);
4680 disas_add_sub_reg(s
, insn
);
4683 case 0x1b: /* Data-processing (3 source) */
4684 disas_data_proc_3src(s
, insn
);
4687 switch (extract32(insn
, 21, 3)) {
4688 case 0x0: /* Add/subtract (with carry) */
4689 disas_adc_sbc(s
, insn
);
4691 case 0x2: /* Conditional compare */
4692 disas_cc(s
, insn
); /* both imm and reg forms */
4694 case 0x4: /* Conditional select */
4695 disas_cond_select(s
, insn
);
4697 case 0x6: /* Data-processing */
4698 if (insn
& (1 << 30)) { /* (1 source) */
4699 disas_data_proc_1src(s
, insn
);
4700 } else { /* (2 source) */
4701 disas_data_proc_2src(s
, insn
);
4705 unallocated_encoding(s
);
4710 unallocated_encoding(s
);
4715 static void handle_fp_compare(DisasContext
*s
, bool is_double
,
4716 unsigned int rn
, unsigned int rm
,
4717 bool cmp_with_zero
, bool signal_all_nans
)
4719 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
4720 TCGv_ptr fpst
= get_fpstatus_ptr(false);
4723 TCGv_i64 tcg_vn
, tcg_vm
;
4725 tcg_vn
= read_fp_dreg(s
, rn
);
4726 if (cmp_with_zero
) {
4727 tcg_vm
= tcg_const_i64(0);
4729 tcg_vm
= read_fp_dreg(s
, rm
);
4731 if (signal_all_nans
) {
4732 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4734 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4736 tcg_temp_free_i64(tcg_vn
);
4737 tcg_temp_free_i64(tcg_vm
);
4739 TCGv_i32 tcg_vn
, tcg_vm
;
4741 tcg_vn
= read_fp_sreg(s
, rn
);
4742 if (cmp_with_zero
) {
4743 tcg_vm
= tcg_const_i32(0);
4745 tcg_vm
= read_fp_sreg(s
, rm
);
4747 if (signal_all_nans
) {
4748 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4750 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
4752 tcg_temp_free_i32(tcg_vn
);
4753 tcg_temp_free_i32(tcg_vm
);
4756 tcg_temp_free_ptr(fpst
);
4758 gen_set_nzcv(tcg_flags
);
4760 tcg_temp_free_i64(tcg_flags
);
4763 /* Floating point compare
4764 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
4765 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4766 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
4767 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4769 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
4771 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
4773 mos
= extract32(insn
, 29, 3);
4774 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
4775 rm
= extract32(insn
, 16, 5);
4776 op
= extract32(insn
, 14, 2);
4777 rn
= extract32(insn
, 5, 5);
4778 opc
= extract32(insn
, 3, 2);
4779 op2r
= extract32(insn
, 0, 3);
4781 if (mos
|| op
|| op2r
|| type
> 1) {
4782 unallocated_encoding(s
);
4786 if (!fp_access_check(s
)) {
4790 handle_fp_compare(s
, type
, rn
, rm
, opc
& 1, opc
& 2);
4793 /* Floating point conditional compare
4794 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4795 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4796 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
4797 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4799 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
4801 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
4803 TCGLabel
*label_continue
= NULL
;
4805 mos
= extract32(insn
, 29, 3);
4806 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
4807 rm
= extract32(insn
, 16, 5);
4808 cond
= extract32(insn
, 12, 4);
4809 rn
= extract32(insn
, 5, 5);
4810 op
= extract32(insn
, 4, 1);
4811 nzcv
= extract32(insn
, 0, 4);
4813 if (mos
|| type
> 1) {
4814 unallocated_encoding(s
);
4818 if (!fp_access_check(s
)) {
4822 if (cond
< 0x0e) { /* not always */
4823 TCGLabel
*label_match
= gen_new_label();
4824 label_continue
= gen_new_label();
4825 arm_gen_test_cc(cond
, label_match
);
4827 tcg_flags
= tcg_const_i64(nzcv
<< 28);
4828 gen_set_nzcv(tcg_flags
);
4829 tcg_temp_free_i64(tcg_flags
);
4830 tcg_gen_br(label_continue
);
4831 gen_set_label(label_match
);
4834 handle_fp_compare(s
, type
, rn
, rm
, false, op
);
4837 gen_set_label(label_continue
);
4841 /* Floating point conditional select
4842 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4843 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4844 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
4845 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4847 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
4849 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
4850 TCGv_i64 t_true
, t_false
, t_zero
;
4853 mos
= extract32(insn
, 29, 3);
4854 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
4855 rm
= extract32(insn
, 16, 5);
4856 cond
= extract32(insn
, 12, 4);
4857 rn
= extract32(insn
, 5, 5);
4858 rd
= extract32(insn
, 0, 5);
4860 if (mos
|| type
> 1) {
4861 unallocated_encoding(s
);
4865 if (!fp_access_check(s
)) {
4869 /* Zero extend sreg inputs to 64 bits now. */
4870 t_true
= tcg_temp_new_i64();
4871 t_false
= tcg_temp_new_i64();
4872 read_vec_element(s
, t_true
, rn
, 0, type
? MO_64
: MO_32
);
4873 read_vec_element(s
, t_false
, rm
, 0, type
? MO_64
: MO_32
);
4875 a64_test_cc(&c
, cond
);
4876 t_zero
= tcg_const_i64(0);
4877 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, t_zero
, t_true
, t_false
);
4878 tcg_temp_free_i64(t_zero
);
4879 tcg_temp_free_i64(t_false
);
4882 /* Note that sregs write back zeros to the high bits,
4883 and we've already done the zero-extension. */
4884 write_fp_dreg(s
, rd
, t_true
);
4885 tcg_temp_free_i64(t_true
);
4888 /* Floating-point data-processing (1 source) - half precision */
4889 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
4891 TCGv_ptr fpst
= NULL
;
4892 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
4893 TCGv_i32 tcg_res
= tcg_temp_new_i32();
4896 case 0x0: /* FMOV */
4897 tcg_gen_mov_i32(tcg_res
, tcg_op
);
4899 case 0x1: /* FABS */
4900 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
4902 case 0x2: /* FNEG */
4903 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
4905 case 0x3: /* FSQRT */
4906 gen_helper_sqrt_f16(tcg_res
, tcg_op
, cpu_env
);
4908 case 0x8: /* FRINTN */
4909 case 0x9: /* FRINTP */
4910 case 0xa: /* FRINTM */
4911 case 0xb: /* FRINTZ */
4912 case 0xc: /* FRINTA */
4914 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
4915 fpst
= get_fpstatus_ptr(true);
4917 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4918 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
4920 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4921 tcg_temp_free_i32(tcg_rmode
);
4924 case 0xe: /* FRINTX */
4925 fpst
= get_fpstatus_ptr(true);
4926 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
4928 case 0xf: /* FRINTI */
4929 fpst
= get_fpstatus_ptr(true);
4930 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
4936 write_fp_sreg(s
, rd
, tcg_res
);
4939 tcg_temp_free_ptr(fpst
);
4941 tcg_temp_free_i32(tcg_op
);
4942 tcg_temp_free_i32(tcg_res
);
4945 /* Floating-point data-processing (1 source) - single precision */
4946 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
4952 fpst
= get_fpstatus_ptr(false);
4953 tcg_op
= read_fp_sreg(s
, rn
);
4954 tcg_res
= tcg_temp_new_i32();
4957 case 0x0: /* FMOV */
4958 tcg_gen_mov_i32(tcg_res
, tcg_op
);
4960 case 0x1: /* FABS */
4961 gen_helper_vfp_abss(tcg_res
, tcg_op
);
4963 case 0x2: /* FNEG */
4964 gen_helper_vfp_negs(tcg_res
, tcg_op
);
4966 case 0x3: /* FSQRT */
4967 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
4969 case 0x8: /* FRINTN */
4970 case 0x9: /* FRINTP */
4971 case 0xa: /* FRINTM */
4972 case 0xb: /* FRINTZ */
4973 case 0xc: /* FRINTA */
4975 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
4977 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4978 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
4980 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4981 tcg_temp_free_i32(tcg_rmode
);
4984 case 0xe: /* FRINTX */
4985 gen_helper_rints_exact(tcg_res
, tcg_op
, fpst
);
4987 case 0xf: /* FRINTI */
4988 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
4994 write_fp_sreg(s
, rd
, tcg_res
);
4996 tcg_temp_free_ptr(fpst
);
4997 tcg_temp_free_i32(tcg_op
);
4998 tcg_temp_free_i32(tcg_res
);
5001 /* Floating-point data-processing (1 source) - double precision */
5002 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
5009 case 0x0: /* FMOV */
5010 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
5014 fpst
= get_fpstatus_ptr(false);
5015 tcg_op
= read_fp_dreg(s
, rn
);
5016 tcg_res
= tcg_temp_new_i64();
5019 case 0x1: /* FABS */
5020 gen_helper_vfp_absd(tcg_res
, tcg_op
);
5022 case 0x2: /* FNEG */
5023 gen_helper_vfp_negd(tcg_res
, tcg_op
);
5025 case 0x3: /* FSQRT */
5026 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, cpu_env
);
5028 case 0x8: /* FRINTN */
5029 case 0x9: /* FRINTP */
5030 case 0xa: /* FRINTM */
5031 case 0xb: /* FRINTZ */
5032 case 0xc: /* FRINTA */
5034 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
5036 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
5037 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
5039 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
5040 tcg_temp_free_i32(tcg_rmode
);
5043 case 0xe: /* FRINTX */
5044 gen_helper_rintd_exact(tcg_res
, tcg_op
, fpst
);
5046 case 0xf: /* FRINTI */
5047 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
5053 write_fp_dreg(s
, rd
, tcg_res
);
5055 tcg_temp_free_ptr(fpst
);
5056 tcg_temp_free_i64(tcg_op
);
5057 tcg_temp_free_i64(tcg_res
);
5060 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
5061 int rd
, int rn
, int dtype
, int ntype
)
5066 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
5068 /* Single to double */
5069 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
5070 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, cpu_env
);
5071 write_fp_dreg(s
, rd
, tcg_rd
);
5072 tcg_temp_free_i64(tcg_rd
);
5074 /* Single to half */
5075 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
5076 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, cpu_env
);
5077 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5078 write_fp_sreg(s
, rd
, tcg_rd
);
5079 tcg_temp_free_i32(tcg_rd
);
5081 tcg_temp_free_i32(tcg_rn
);
5086 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
5087 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
5089 /* Double to single */
5090 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, cpu_env
);
5092 /* Double to half */
5093 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, cpu_env
);
5094 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5096 write_fp_sreg(s
, rd
, tcg_rd
);
5097 tcg_temp_free_i32(tcg_rd
);
5098 tcg_temp_free_i64(tcg_rn
);
5103 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
5104 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
5106 /* Half to single */
5107 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
5108 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, cpu_env
);
5109 write_fp_sreg(s
, rd
, tcg_rd
);
5110 tcg_temp_free_i32(tcg_rd
);
5112 /* Half to double */
5113 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
5114 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, cpu_env
);
5115 write_fp_dreg(s
, rd
, tcg_rd
);
5116 tcg_temp_free_i64(tcg_rd
);
5118 tcg_temp_free_i32(tcg_rn
);
5126 /* Floating point data-processing (1 source)
5127 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
5128 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5129 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
5130 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5132 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
5134 int type
= extract32(insn
, 22, 2);
5135 int opcode
= extract32(insn
, 15, 6);
5136 int rn
= extract32(insn
, 5, 5);
5137 int rd
= extract32(insn
, 0, 5);
5140 case 0x4: case 0x5: case 0x7:
5142 /* FCVT between half, single and double precision */
5143 int dtype
= extract32(opcode
, 0, 2);
5144 if (type
== 2 || dtype
== type
) {
5145 unallocated_encoding(s
);
5148 if (!fp_access_check(s
)) {
5152 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
5158 /* 32-to-32 and 64-to-64 ops */
5161 if (!fp_access_check(s
)) {
5165 handle_fp_1src_single(s
, opcode
, rd
, rn
);
5168 if (!fp_access_check(s
)) {
5172 handle_fp_1src_double(s
, opcode
, rd
, rn
);
5175 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5176 unallocated_encoding(s
);
5180 if (!fp_access_check(s
)) {
5184 handle_fp_1src_half(s
, opcode
, rd
, rn
);
5187 unallocated_encoding(s
);
5191 unallocated_encoding(s
);
5196 /* Floating-point data-processing (2 source) - single precision */
5197 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
5198 int rd
, int rn
, int rm
)
5205 tcg_res
= tcg_temp_new_i32();
5206 fpst
= get_fpstatus_ptr(false);
5207 tcg_op1
= read_fp_sreg(s
, rn
);
5208 tcg_op2
= read_fp_sreg(s
, rm
);
5211 case 0x0: /* FMUL */
5212 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5214 case 0x1: /* FDIV */
5215 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5217 case 0x2: /* FADD */
5218 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5220 case 0x3: /* FSUB */
5221 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5223 case 0x4: /* FMAX */
5224 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5226 case 0x5: /* FMIN */
5227 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5229 case 0x6: /* FMAXNM */
5230 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5232 case 0x7: /* FMINNM */
5233 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5235 case 0x8: /* FNMUL */
5236 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5237 gen_helper_vfp_negs(tcg_res
, tcg_res
);
5241 write_fp_sreg(s
, rd
, tcg_res
);
5243 tcg_temp_free_ptr(fpst
);
5244 tcg_temp_free_i32(tcg_op1
);
5245 tcg_temp_free_i32(tcg_op2
);
5246 tcg_temp_free_i32(tcg_res
);
5249 /* Floating-point data-processing (2 source) - double precision */
5250 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
5251 int rd
, int rn
, int rm
)
5258 tcg_res
= tcg_temp_new_i64();
5259 fpst
= get_fpstatus_ptr(false);
5260 tcg_op1
= read_fp_dreg(s
, rn
);
5261 tcg_op2
= read_fp_dreg(s
, rm
);
5264 case 0x0: /* FMUL */
5265 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5267 case 0x1: /* FDIV */
5268 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5270 case 0x2: /* FADD */
5271 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5273 case 0x3: /* FSUB */
5274 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5276 case 0x4: /* FMAX */
5277 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5279 case 0x5: /* FMIN */
5280 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5282 case 0x6: /* FMAXNM */
5283 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5285 case 0x7: /* FMINNM */
5286 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5288 case 0x8: /* FNMUL */
5289 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5290 gen_helper_vfp_negd(tcg_res
, tcg_res
);
5294 write_fp_dreg(s
, rd
, tcg_res
);
5296 tcg_temp_free_ptr(fpst
);
5297 tcg_temp_free_i64(tcg_op1
);
5298 tcg_temp_free_i64(tcg_op2
);
5299 tcg_temp_free_i64(tcg_res
);
5302 /* Floating-point data-processing (2 source) - half precision */
5303 static void handle_fp_2src_half(DisasContext
*s
, int opcode
,
5304 int rd
, int rn
, int rm
)
5311 tcg_res
= tcg_temp_new_i32();
5312 fpst
= get_fpstatus_ptr(true);
5313 tcg_op1
= read_fp_hreg(s
, rn
);
5314 tcg_op2
= read_fp_hreg(s
, rm
);
5317 case 0x0: /* FMUL */
5318 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5320 case 0x1: /* FDIV */
5321 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5323 case 0x2: /* FADD */
5324 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5326 case 0x3: /* FSUB */
5327 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5329 case 0x4: /* FMAX */
5330 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5332 case 0x5: /* FMIN */
5333 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5335 case 0x6: /* FMAXNM */
5336 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5338 case 0x7: /* FMINNM */
5339 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5341 case 0x8: /* FNMUL */
5342 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5343 tcg_gen_xori_i32(tcg_res
, tcg_res
, 0x8000);
5346 g_assert_not_reached();
5349 write_fp_sreg(s
, rd
, tcg_res
);
5351 tcg_temp_free_ptr(fpst
);
5352 tcg_temp_free_i32(tcg_op1
);
5353 tcg_temp_free_i32(tcg_op2
);
5354 tcg_temp_free_i32(tcg_res
);
5357 /* Floating point data-processing (2 source)
5358 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5359 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5360 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
5361 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5363 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
5365 int type
= extract32(insn
, 22, 2);
5366 int rd
= extract32(insn
, 0, 5);
5367 int rn
= extract32(insn
, 5, 5);
5368 int rm
= extract32(insn
, 16, 5);
5369 int opcode
= extract32(insn
, 12, 4);
5372 unallocated_encoding(s
);
5378 if (!fp_access_check(s
)) {
5381 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
5384 if (!fp_access_check(s
)) {
5387 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
5390 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5391 unallocated_encoding(s
);
5394 if (!fp_access_check(s
)) {
5397 handle_fp_2src_half(s
, opcode
, rd
, rn
, rm
);
5400 unallocated_encoding(s
);
5404 /* Floating-point data-processing (3 source) - single precision */
5405 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
5406 int rd
, int rn
, int rm
, int ra
)
5408 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
5409 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5410 TCGv_ptr fpst
= get_fpstatus_ptr(false);
5412 tcg_op1
= read_fp_sreg(s
, rn
);
5413 tcg_op2
= read_fp_sreg(s
, rm
);
5414 tcg_op3
= read_fp_sreg(s
, ra
);
5416 /* These are fused multiply-add, and must be done as one
5417 * floating point operation with no rounding between the
5418 * multiplication and addition steps.
5419 * NB that doing the negations here as separate steps is
5420 * correct : an input NaN should come out with its sign bit
5421 * flipped if it is a negated-input.
5424 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
5428 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
5431 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
5433 write_fp_sreg(s
, rd
, tcg_res
);
5435 tcg_temp_free_ptr(fpst
);
5436 tcg_temp_free_i32(tcg_op1
);
5437 tcg_temp_free_i32(tcg_op2
);
5438 tcg_temp_free_i32(tcg_op3
);
5439 tcg_temp_free_i32(tcg_res
);
5442 /* Floating-point data-processing (3 source) - double precision */
5443 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
5444 int rd
, int rn
, int rm
, int ra
)
5446 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
5447 TCGv_i64 tcg_res
= tcg_temp_new_i64();
5448 TCGv_ptr fpst
= get_fpstatus_ptr(false);
5450 tcg_op1
= read_fp_dreg(s
, rn
);
5451 tcg_op2
= read_fp_dreg(s
, rm
);
5452 tcg_op3
= read_fp_dreg(s
, ra
);
5454 /* These are fused multiply-add, and must be done as one
5455 * floating point operation with no rounding between the
5456 * multiplication and addition steps.
5457 * NB that doing the negations here as separate steps is
5458 * correct : an input NaN should come out with its sign bit
5459 * flipped if it is a negated-input.
5462 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
5466 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
5469 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
5471 write_fp_dreg(s
, rd
, tcg_res
);
5473 tcg_temp_free_ptr(fpst
);
5474 tcg_temp_free_i64(tcg_op1
);
5475 tcg_temp_free_i64(tcg_op2
);
5476 tcg_temp_free_i64(tcg_op3
);
5477 tcg_temp_free_i64(tcg_res
);
5480 /* Floating-point data-processing (3 source) - half precision */
5481 static void handle_fp_3src_half(DisasContext
*s
, bool o0
, bool o1
,
5482 int rd
, int rn
, int rm
, int ra
)
5484 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
5485 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5486 TCGv_ptr fpst
= get_fpstatus_ptr(true);
5488 tcg_op1
= read_fp_hreg(s
, rn
);
5489 tcg_op2
= read_fp_hreg(s
, rm
);
5490 tcg_op3
= read_fp_hreg(s
, ra
);
5492 /* These are fused multiply-add, and must be done as one
5493 * floating point operation with no rounding between the
5494 * multiplication and addition steps.
5495 * NB that doing the negations here as separate steps is
5496 * correct : an input NaN should come out with its sign bit
5497 * flipped if it is a negated-input.
5500 tcg_gen_xori_i32(tcg_op3
, tcg_op3
, 0x8000);
5504 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
5507 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
5509 write_fp_sreg(s
, rd
, tcg_res
);
5511 tcg_temp_free_ptr(fpst
);
5512 tcg_temp_free_i32(tcg_op1
);
5513 tcg_temp_free_i32(tcg_op2
);
5514 tcg_temp_free_i32(tcg_op3
);
5515 tcg_temp_free_i32(tcg_res
);
5518 /* Floating point data-processing (3 source)
5519 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
5520 * +---+---+---+-----------+------+----+------+----+------+------+------+
5521 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
5522 * +---+---+---+-----------+------+----+------+----+------+------+------+
5524 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
5526 int type
= extract32(insn
, 22, 2);
5527 int rd
= extract32(insn
, 0, 5);
5528 int rn
= extract32(insn
, 5, 5);
5529 int ra
= extract32(insn
, 10, 5);
5530 int rm
= extract32(insn
, 16, 5);
5531 bool o0
= extract32(insn
, 15, 1);
5532 bool o1
= extract32(insn
, 21, 1);
5536 if (!fp_access_check(s
)) {
5539 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
5542 if (!fp_access_check(s
)) {
5545 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
5548 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5549 unallocated_encoding(s
);
5552 if (!fp_access_check(s
)) {
5555 handle_fp_3src_half(s
, o0
, o1
, rd
, rn
, rm
, ra
);
5558 unallocated_encoding(s
);
5562 /* The imm8 encodes the sign bit, enough bits to represent an exponent in
5563 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
5564 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
5566 static uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
5572 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
5573 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
5574 extract32(imm8
, 0, 6);
5578 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
5579 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
5580 (extract32(imm8
, 0, 6) << 3);
5584 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
5585 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
5586 (extract32(imm8
, 0, 6) << 6);
5589 g_assert_not_reached();
5594 /* Floating point immediate
5595 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
5596 * +---+---+---+-----------+------+---+------------+-------+------+------+
5597 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
5598 * +---+---+---+-----------+------+---+------------+-------+------+------+
5600 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
5602 int rd
= extract32(insn
, 0, 5);
5603 int imm8
= extract32(insn
, 13, 8);
5604 int is_double
= extract32(insn
, 22, 2);
5608 if (is_double
> 1) {
5609 unallocated_encoding(s
);
5613 if (!fp_access_check(s
)) {
5617 imm
= vfp_expand_imm(MO_32
+ is_double
, imm8
);
5619 tcg_res
= tcg_const_i64(imm
);
5620 write_fp_dreg(s
, rd
, tcg_res
);
5621 tcg_temp_free_i64(tcg_res
);
5624 /* Handle floating point <=> fixed point conversions. Note that we can
5625 * also deal with fp <=> integer conversions as a special case (scale == 64)
5626 * OPTME: consider handling that special case specially or at least skipping
5627 * the call to scalbn in the helpers for zero shifts.
5629 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
5630 bool itof
, int rmode
, int scale
, int sf
, int type
)
5632 bool is_signed
= !(opcode
& 1);
5633 TCGv_ptr tcg_fpstatus
;
5634 TCGv_i32 tcg_shift
, tcg_single
;
5635 TCGv_i64 tcg_double
;
5637 tcg_fpstatus
= get_fpstatus_ptr(type
== 3);
5639 tcg_shift
= tcg_const_i32(64 - scale
);
5642 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
5644 TCGv_i64 tcg_extend
= new_tmp_a64(s
);
5647 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
5649 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
5652 tcg_int
= tcg_extend
;
5656 case 1: /* float64 */
5657 tcg_double
= tcg_temp_new_i64();
5659 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
5660 tcg_shift
, tcg_fpstatus
);
5662 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
5663 tcg_shift
, tcg_fpstatus
);
5665 write_fp_dreg(s
, rd
, tcg_double
);
5666 tcg_temp_free_i64(tcg_double
);
5669 case 0: /* float32 */
5670 tcg_single
= tcg_temp_new_i32();
5672 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
5673 tcg_shift
, tcg_fpstatus
);
5675 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
5676 tcg_shift
, tcg_fpstatus
);
5678 write_fp_sreg(s
, rd
, tcg_single
);
5679 tcg_temp_free_i32(tcg_single
);
5682 case 3: /* float16 */
5683 tcg_single
= tcg_temp_new_i32();
5685 gen_helper_vfp_sqtoh(tcg_single
, tcg_int
,
5686 tcg_shift
, tcg_fpstatus
);
5688 gen_helper_vfp_uqtoh(tcg_single
, tcg_int
,
5689 tcg_shift
, tcg_fpstatus
);
5691 write_fp_sreg(s
, rd
, tcg_single
);
5692 tcg_temp_free_i32(tcg_single
);
5696 g_assert_not_reached();
5699 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
5702 if (extract32(opcode
, 2, 1)) {
5703 /* There are too many rounding modes to all fit into rmode,
5704 * so FCVTA[US] is a special case.
5706 rmode
= FPROUNDING_TIEAWAY
;
5709 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
5711 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
5714 case 1: /* float64 */
5715 tcg_double
= read_fp_dreg(s
, rn
);
5718 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
5719 tcg_shift
, tcg_fpstatus
);
5721 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
5722 tcg_shift
, tcg_fpstatus
);
5726 gen_helper_vfp_tould(tcg_int
, tcg_double
,
5727 tcg_shift
, tcg_fpstatus
);
5729 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
5730 tcg_shift
, tcg_fpstatus
);
5734 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
5736 tcg_temp_free_i64(tcg_double
);
5739 case 0: /* float32 */
5740 tcg_single
= read_fp_sreg(s
, rn
);
5743 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
5744 tcg_shift
, tcg_fpstatus
);
5746 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
5747 tcg_shift
, tcg_fpstatus
);
5750 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
5752 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
5753 tcg_shift
, tcg_fpstatus
);
5755 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
5756 tcg_shift
, tcg_fpstatus
);
5758 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
5759 tcg_temp_free_i32(tcg_dest
);
5761 tcg_temp_free_i32(tcg_single
);
5764 case 3: /* float16 */
5765 tcg_single
= read_fp_sreg(s
, rn
);
5768 gen_helper_vfp_tosqh(tcg_int
, tcg_single
,
5769 tcg_shift
, tcg_fpstatus
);
5771 gen_helper_vfp_touqh(tcg_int
, tcg_single
,
5772 tcg_shift
, tcg_fpstatus
);
5775 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
5777 gen_helper_vfp_toslh(tcg_dest
, tcg_single
,
5778 tcg_shift
, tcg_fpstatus
);
5780 gen_helper_vfp_toulh(tcg_dest
, tcg_single
,
5781 tcg_shift
, tcg_fpstatus
);
5783 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
5784 tcg_temp_free_i32(tcg_dest
);
5786 tcg_temp_free_i32(tcg_single
);
5790 g_assert_not_reached();
5793 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
5794 tcg_temp_free_i32(tcg_rmode
);
5797 tcg_temp_free_ptr(tcg_fpstatus
);
5798 tcg_temp_free_i32(tcg_shift
);
5801 /* Floating point <-> fixed point conversions
5802 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5803 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5804 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
5805 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5807 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
5809 int rd
= extract32(insn
, 0, 5);
5810 int rn
= extract32(insn
, 5, 5);
5811 int scale
= extract32(insn
, 10, 6);
5812 int opcode
= extract32(insn
, 16, 3);
5813 int rmode
= extract32(insn
, 19, 2);
5814 int type
= extract32(insn
, 22, 2);
5815 bool sbit
= extract32(insn
, 29, 1);
5816 bool sf
= extract32(insn
, 31, 1);
5819 if (sbit
|| (!sf
&& scale
< 32)) {
5820 unallocated_encoding(s
);
5825 case 0: /* float32 */
5826 case 1: /* float64 */
5828 case 3: /* float16 */
5829 if (arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5834 unallocated_encoding(s
);
5838 switch ((rmode
<< 3) | opcode
) {
5839 case 0x2: /* SCVTF */
5840 case 0x3: /* UCVTF */
5843 case 0x18: /* FCVTZS */
5844 case 0x19: /* FCVTZU */
5848 unallocated_encoding(s
);
5852 if (!fp_access_check(s
)) {
5856 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
5859 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
5861 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5862 * without conversion.
5866 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5872 tmp
= tcg_temp_new_i64();
5873 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
5874 write_fp_dreg(s
, rd
, tmp
);
5875 tcg_temp_free_i64(tmp
);
5879 write_fp_dreg(s
, rd
, tcg_rn
);
5882 /* 64 bit to top half. */
5883 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_hi_offset(s
, rd
));
5884 clear_vec_high(s
, true, rd
);
5888 tmp
= tcg_temp_new_i64();
5889 tcg_gen_ext16u_i64(tmp
, tcg_rn
);
5890 write_fp_dreg(s
, rd
, tmp
);
5891 tcg_temp_free_i64(tmp
);
5894 g_assert_not_reached();
5897 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5902 tcg_gen_ld32u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_32
));
5906 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_64
));
5909 /* 64 bits from top half */
5910 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_hi_offset(s
, rn
));
5914 tcg_gen_ld16u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_16
));
5917 g_assert_not_reached();
5922 /* Floating point <-> integer conversions
5923 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5924 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5925 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
5926 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5928 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
5930 int rd
= extract32(insn
, 0, 5);
5931 int rn
= extract32(insn
, 5, 5);
5932 int opcode
= extract32(insn
, 16, 3);
5933 int rmode
= extract32(insn
, 19, 2);
5934 int type
= extract32(insn
, 22, 2);
5935 bool sbit
= extract32(insn
, 29, 1);
5936 bool sf
= extract32(insn
, 31, 1);
5939 unallocated_encoding(s
);
5945 bool itof
= opcode
& 1;
5948 unallocated_encoding(s
);
5952 switch (sf
<< 3 | type
<< 1 | rmode
) {
5953 case 0x0: /* 32 bit */
5954 case 0xa: /* 64 bit */
5955 case 0xd: /* 64 bit to top half of quad */
5957 case 0x6: /* 16-bit float, 32-bit int */
5958 case 0xe: /* 16-bit float, 64-bit int */
5959 if (arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5964 /* all other sf/type/rmode combinations are invalid */
5965 unallocated_encoding(s
);
5969 if (!fp_access_check(s
)) {
5972 handle_fmov(s
, rd
, rn
, type
, itof
);
5974 /* actual FP conversions */
5975 bool itof
= extract32(opcode
, 1, 1);
5977 if (rmode
!= 0 && opcode
> 1) {
5978 unallocated_encoding(s
);
5982 case 0: /* float32 */
5983 case 1: /* float64 */
5985 case 3: /* float16 */
5986 if (arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
5991 unallocated_encoding(s
);
5995 if (!fp_access_check(s
)) {
5998 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
6002 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
6003 * 31 30 29 28 25 24 0
6004 * +---+---+---+---------+-----------------------------+
6005 * | | 0 | | 1 1 1 1 | |
6006 * +---+---+---+---------+-----------------------------+
6008 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
6010 if (extract32(insn
, 24, 1)) {
6011 /* Floating point data-processing (3 source) */
6012 disas_fp_3src(s
, insn
);
6013 } else if (extract32(insn
, 21, 1) == 0) {
6014 /* Floating point to fixed point conversions */
6015 disas_fp_fixed_conv(s
, insn
);
6017 switch (extract32(insn
, 10, 2)) {
6019 /* Floating point conditional compare */
6020 disas_fp_ccomp(s
, insn
);
6023 /* Floating point data-processing (2 source) */
6024 disas_fp_2src(s
, insn
);
6027 /* Floating point conditional select */
6028 disas_fp_csel(s
, insn
);
6031 switch (ctz32(extract32(insn
, 12, 4))) {
6032 case 0: /* [15:12] == xxx1 */
6033 /* Floating point immediate */
6034 disas_fp_imm(s
, insn
);
6036 case 1: /* [15:12] == xx10 */
6037 /* Floating point compare */
6038 disas_fp_compare(s
, insn
);
6040 case 2: /* [15:12] == x100 */
6041 /* Floating point data-processing (1 source) */
6042 disas_fp_1src(s
, insn
);
6044 case 3: /* [15:12] == 1000 */
6045 unallocated_encoding(s
);
6047 default: /* [15:12] == 0000 */
6048 /* Floating point <-> integer conversions */
6049 disas_fp_int_conv(s
, insn
);
6057 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
6060 /* Extract 64 bits from the middle of two concatenated 64 bit
6061 * vector register slices left:right. The extracted bits start
6062 * at 'pos' bits into the right (least significant) side.
6063 * We return the result in tcg_right, and guarantee not to
6066 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
6067 assert(pos
> 0 && pos
< 64);
6069 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
6070 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
6071 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
6073 tcg_temp_free_i64(tcg_tmp
);
6077 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
6078 * +---+---+-------------+-----+---+------+---+------+---+------+------+
6079 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
6080 * +---+---+-------------+-----+---+------+---+------+---+------+------+
6082 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
6084 int is_q
= extract32(insn
, 30, 1);
6085 int op2
= extract32(insn
, 22, 2);
6086 int imm4
= extract32(insn
, 11, 4);
6087 int rm
= extract32(insn
, 16, 5);
6088 int rn
= extract32(insn
, 5, 5);
6089 int rd
= extract32(insn
, 0, 5);
6090 int pos
= imm4
<< 3;
6091 TCGv_i64 tcg_resl
, tcg_resh
;
6093 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
6094 unallocated_encoding(s
);
6098 if (!fp_access_check(s
)) {
6102 tcg_resh
= tcg_temp_new_i64();
6103 tcg_resl
= tcg_temp_new_i64();
6105 /* Vd gets bits starting at pos bits into Vm:Vn. This is
6106 * either extracting 128 bits from a 128:128 concatenation, or
6107 * extracting 64 bits from a 64:64 concatenation.
6110 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
6112 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
6113 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
6115 tcg_gen_movi_i64(tcg_resh
, 0);
6122 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
6123 EltPosns
*elt
= eltposns
;
6130 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
6132 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
6135 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
6136 tcg_hh
= tcg_temp_new_i64();
6137 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
6138 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
6139 tcg_temp_free_i64(tcg_hh
);
6143 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
6144 tcg_temp_free_i64(tcg_resl
);
6145 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
6146 tcg_temp_free_i64(tcg_resh
);
6150 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
6151 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
6152 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
6153 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
6155 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
6157 int op2
= extract32(insn
, 22, 2);
6158 int is_q
= extract32(insn
, 30, 1);
6159 int rm
= extract32(insn
, 16, 5);
6160 int rn
= extract32(insn
, 5, 5);
6161 int rd
= extract32(insn
, 0, 5);
6162 int is_tblx
= extract32(insn
, 12, 1);
6163 int len
= extract32(insn
, 13, 2);
6164 TCGv_i64 tcg_resl
, tcg_resh
, tcg_idx
;
6165 TCGv_i32 tcg_regno
, tcg_numregs
;
6168 unallocated_encoding(s
);
6172 if (!fp_access_check(s
)) {
6176 /* This does a table lookup: for every byte element in the input
6177 * we index into a table formed from up to four vector registers,
6178 * and then the output is the result of the lookups. Our helper
6179 * function does the lookup operation for a single 64 bit part of
6182 tcg_resl
= tcg_temp_new_i64();
6183 tcg_resh
= tcg_temp_new_i64();
6186 read_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
6188 tcg_gen_movi_i64(tcg_resl
, 0);
6190 if (is_tblx
&& is_q
) {
6191 read_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
6193 tcg_gen_movi_i64(tcg_resh
, 0);
6196 tcg_idx
= tcg_temp_new_i64();
6197 tcg_regno
= tcg_const_i32(rn
);
6198 tcg_numregs
= tcg_const_i32(len
+ 1);
6199 read_vec_element(s
, tcg_idx
, rm
, 0, MO_64
);
6200 gen_helper_simd_tbl(tcg_resl
, cpu_env
, tcg_resl
, tcg_idx
,
6201 tcg_regno
, tcg_numregs
);
6203 read_vec_element(s
, tcg_idx
, rm
, 1, MO_64
);
6204 gen_helper_simd_tbl(tcg_resh
, cpu_env
, tcg_resh
, tcg_idx
,
6205 tcg_regno
, tcg_numregs
);
6207 tcg_temp_free_i64(tcg_idx
);
6208 tcg_temp_free_i32(tcg_regno
);
6209 tcg_temp_free_i32(tcg_numregs
);
6211 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
6212 tcg_temp_free_i64(tcg_resl
);
6213 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
6214 tcg_temp_free_i64(tcg_resh
);
6218 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
6219 * +---+---+-------------+------+---+------+---+------------------+------+
6220 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
6221 * +---+---+-------------+------+---+------+---+------------------+------+
6223 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
6225 int rd
= extract32(insn
, 0, 5);
6226 int rn
= extract32(insn
, 5, 5);
6227 int rm
= extract32(insn
, 16, 5);
6228 int size
= extract32(insn
, 22, 2);
6229 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
6230 * bit 2 indicates 1 vs 2 variant of the insn.
6232 int opcode
= extract32(insn
, 12, 2);
6233 bool part
= extract32(insn
, 14, 1);
6234 bool is_q
= extract32(insn
, 30, 1);
6235 int esize
= 8 << size
;
6237 int datasize
= is_q
? 128 : 64;
6238 int elements
= datasize
/ esize
;
6239 TCGv_i64 tcg_res
, tcg_resl
, tcg_resh
;
6241 if (opcode
== 0 || (size
== 3 && !is_q
)) {
6242 unallocated_encoding(s
);
6246 if (!fp_access_check(s
)) {
6250 tcg_resl
= tcg_const_i64(0);
6251 tcg_resh
= tcg_const_i64(0);
6252 tcg_res
= tcg_temp_new_i64();
6254 for (i
= 0; i
< elements
; i
++) {
6256 case 1: /* UZP1/2 */
6258 int midpoint
= elements
/ 2;
6260 read_vec_element(s
, tcg_res
, rn
, 2 * i
+ part
, size
);
6262 read_vec_element(s
, tcg_res
, rm
,
6263 2 * (i
- midpoint
) + part
, size
);
6267 case 2: /* TRN1/2 */
6269 read_vec_element(s
, tcg_res
, rm
, (i
& ~1) + part
, size
);
6271 read_vec_element(s
, tcg_res
, rn
, (i
& ~1) + part
, size
);
6274 case 3: /* ZIP1/2 */
6276 int base
= part
* elements
/ 2;
6278 read_vec_element(s
, tcg_res
, rm
, base
+ (i
>> 1), size
);
6280 read_vec_element(s
, tcg_res
, rn
, base
+ (i
>> 1), size
);
6285 g_assert_not_reached();
6290 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
);
6291 tcg_gen_or_i64(tcg_resl
, tcg_resl
, tcg_res
);
6293 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
- 64);
6294 tcg_gen_or_i64(tcg_resh
, tcg_resh
, tcg_res
);
6298 tcg_temp_free_i64(tcg_res
);
6300 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
6301 tcg_temp_free_i64(tcg_resl
);
6302 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
6303 tcg_temp_free_i64(tcg_resh
);
6307 * do_reduction_op helper
6309 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
6310 * important for correct NaN propagation that we do these
6311 * operations in exactly the order specified by the pseudocode.
6313 * This is a recursive function, TCG temps should be freed by the
6314 * calling function once it is done with the values.
6316 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
6317 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
6319 if (esize
== size
) {
6321 TCGMemOp msize
= esize
== 16 ? MO_16
: MO_32
;
6324 /* We should have one register left here */
6325 assert(ctpop8(vmap
) == 1);
6326 element
= ctz32(vmap
);
6327 assert(element
< 8);
6329 tcg_elem
= tcg_temp_new_i32();
6330 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
6333 int bits
= size
/ 2;
6334 int shift
= ctpop8(vmap
) / 2;
6335 int vmap_lo
= (vmap
>> shift
) & vmap
;
6336 int vmap_hi
= (vmap
& ~vmap_lo
);
6337 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
6339 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
6340 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
6341 tcg_res
= tcg_temp_new_i32();
6344 case 0x0c: /* fmaxnmv half-precision */
6345 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6347 case 0x0f: /* fmaxv half-precision */
6348 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6350 case 0x1c: /* fminnmv half-precision */
6351 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6353 case 0x1f: /* fminv half-precision */
6354 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6356 case 0x2c: /* fmaxnmv */
6357 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6359 case 0x2f: /* fmaxv */
6360 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6362 case 0x3c: /* fminnmv */
6363 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6365 case 0x3f: /* fminv */
6366 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
6369 g_assert_not_reached();
6372 tcg_temp_free_i32(tcg_hi
);
6373 tcg_temp_free_i32(tcg_lo
);
6378 /* AdvSIMD across lanes
6379 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6380 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6381 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6382 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6384 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
6386 int rd
= extract32(insn
, 0, 5);
6387 int rn
= extract32(insn
, 5, 5);
6388 int size
= extract32(insn
, 22, 2);
6389 int opcode
= extract32(insn
, 12, 5);
6390 bool is_q
= extract32(insn
, 30, 1);
6391 bool is_u
= extract32(insn
, 29, 1);
6393 bool is_min
= false;
6397 TCGv_i64 tcg_res
, tcg_elt
;
6400 case 0x1b: /* ADDV */
6402 unallocated_encoding(s
);
6406 case 0x3: /* SADDLV, UADDLV */
6407 case 0xa: /* SMAXV, UMAXV */
6408 case 0x1a: /* SMINV, UMINV */
6409 if (size
== 3 || (size
== 2 && !is_q
)) {
6410 unallocated_encoding(s
);
6414 case 0xc: /* FMAXNMV, FMINNMV */
6415 case 0xf: /* FMAXV, FMINV */
6416 /* Bit 1 of size field encodes min vs max and the actual size
6417 * depends on the encoding of the U bit. If not set (and FP16
6418 * enabled) then we do half-precision float instead of single
6421 is_min
= extract32(size
, 1, 1);
6423 if (!is_u
&& arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
6425 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
6426 unallocated_encoding(s
);
6433 unallocated_encoding(s
);
6437 if (!fp_access_check(s
)) {
6442 elements
= (is_q
? 128 : 64) / esize
;
6444 tcg_res
= tcg_temp_new_i64();
6445 tcg_elt
= tcg_temp_new_i64();
6447 /* These instructions operate across all lanes of a vector
6448 * to produce a single result. We can guarantee that a 64
6449 * bit intermediate is sufficient:
6450 * + for [US]ADDLV the maximum element size is 32 bits, and
6451 * the result type is 64 bits
6452 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
6453 * same as the element size, which is 32 bits at most
6454 * For the integer operations we can choose to work at 64
6455 * or 32 bits and truncate at the end; for simplicity
6456 * we use 64 bits always. The floating point
6457 * ops do require 32 bit intermediates, though.
6460 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
6462 for (i
= 1; i
< elements
; i
++) {
6463 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
6466 case 0x03: /* SADDLV / UADDLV */
6467 case 0x1b: /* ADDV */
6468 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
6470 case 0x0a: /* SMAXV / UMAXV */
6472 tcg_gen_umax_i64(tcg_res
, tcg_res
, tcg_elt
);
6474 tcg_gen_smax_i64(tcg_res
, tcg_res
, tcg_elt
);
6477 case 0x1a: /* SMINV / UMINV */
6479 tcg_gen_umin_i64(tcg_res
, tcg_res
, tcg_elt
);
6481 tcg_gen_smin_i64(tcg_res
, tcg_res
, tcg_elt
);
6485 g_assert_not_reached();
6490 /* Floating point vector reduction ops which work across 32
6491 * bit (single) or 16 bit (half-precision) intermediates.
6492 * Note that correct NaN propagation requires that we do these
6493 * operations in exactly the order specified by the pseudocode.
6495 TCGv_ptr fpst
= get_fpstatus_ptr(size
== MO_16
);
6496 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
6497 int vmap
= (1 << elements
) - 1;
6498 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
6499 (is_q
? 128 : 64), vmap
, fpst
);
6500 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
6501 tcg_temp_free_i32(tcg_res32
);
6502 tcg_temp_free_ptr(fpst
);
6505 tcg_temp_free_i64(tcg_elt
);
6507 /* Now truncate the result to the width required for the final output */
6508 if (opcode
== 0x03) {
6509 /* SADDLV, UADDLV: result is 2*esize */
6515 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
6518 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
6521 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
6526 g_assert_not_reached();
6529 write_fp_dreg(s
, rd
, tcg_res
);
6530 tcg_temp_free_i64(tcg_res
);
6533 /* DUP (Element, Vector)
6535 * 31 30 29 21 20 16 15 10 9 5 4 0
6536 * +---+---+-------------------+--------+-------------+------+------+
6537 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6538 * +---+---+-------------------+--------+-------------+------+------+
6540 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6542 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
6545 int size
= ctz32(imm5
);
6546 int index
= imm5
>> (size
+ 1);
6548 if (size
> 3 || (size
== 3 && !is_q
)) {
6549 unallocated_encoding(s
);
6553 if (!fp_access_check(s
)) {
6557 tcg_gen_gvec_dup_mem(size
, vec_full_reg_offset(s
, rd
),
6558 vec_reg_offset(s
, rn
, index
, size
),
6559 is_q
? 16 : 8, vec_full_reg_size(s
));
6562 /* DUP (element, scalar)
6563 * 31 21 20 16 15 10 9 5 4 0
6564 * +-----------------------+--------+-------------+------+------+
6565 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6566 * +-----------------------+--------+-------------+------+------+
6568 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
6571 int size
= ctz32(imm5
);
6576 unallocated_encoding(s
);
6580 if (!fp_access_check(s
)) {
6584 index
= imm5
>> (size
+ 1);
6586 /* This instruction just extracts the specified element and
6587 * zero-extends it into the bottom of the destination register.
6589 tmp
= tcg_temp_new_i64();
6590 read_vec_element(s
, tmp
, rn
, index
, size
);
6591 write_fp_dreg(s
, rd
, tmp
);
6592 tcg_temp_free_i64(tmp
);
6597 * 31 30 29 21 20 16 15 10 9 5 4 0
6598 * +---+---+-------------------+--------+-------------+------+------+
6599 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
6600 * +---+---+-------------------+--------+-------------+------+------+
6602 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6604 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
6607 int size
= ctz32(imm5
);
6608 uint32_t dofs
, oprsz
, maxsz
;
6610 if (size
> 3 || ((size
== 3) && !is_q
)) {
6611 unallocated_encoding(s
);
6615 if (!fp_access_check(s
)) {
6619 dofs
= vec_full_reg_offset(s
, rd
);
6620 oprsz
= is_q
? 16 : 8;
6621 maxsz
= vec_full_reg_size(s
);
6623 tcg_gen_gvec_dup_i64(size
, dofs
, oprsz
, maxsz
, cpu_reg(s
, rn
));
6628 * 31 21 20 16 15 14 11 10 9 5 4 0
6629 * +-----------------------+--------+------------+---+------+------+
6630 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6631 * +-----------------------+--------+------------+---+------+------+
6633 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6634 * index: encoded in imm5<4:size+1>
6636 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
6639 int size
= ctz32(imm5
);
6640 int src_index
, dst_index
;
6644 unallocated_encoding(s
);
6648 if (!fp_access_check(s
)) {
6652 dst_index
= extract32(imm5
, 1+size
, 5);
6653 src_index
= extract32(imm4
, size
, 4);
6655 tmp
= tcg_temp_new_i64();
6657 read_vec_element(s
, tmp
, rn
, src_index
, size
);
6658 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
6660 tcg_temp_free_i64(tmp
);
6666 * 31 21 20 16 15 10 9 5 4 0
6667 * +-----------------------+--------+-------------+------+------+
6668 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
6669 * +-----------------------+--------+-------------+------+------+
6671 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6672 * index: encoded in imm5<4:size+1>
6674 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
6676 int size
= ctz32(imm5
);
6680 unallocated_encoding(s
);
6684 if (!fp_access_check(s
)) {
6688 idx
= extract32(imm5
, 1 + size
, 4 - size
);
6689 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
6696 * 31 30 29 21 20 16 15 12 10 9 5 4 0
6697 * +---+---+-------------------+--------+-------------+------+------+
6698 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
6699 * +---+---+-------------------+--------+-------------+------+------+
6701 * U: unsigned when set
6702 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6704 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
6705 int rn
, int rd
, int imm5
)
6707 int size
= ctz32(imm5
);
6711 /* Check for UnallocatedEncodings */
6713 if (size
> 2 || (size
== 2 && !is_q
)) {
6714 unallocated_encoding(s
);
6719 || (size
< 3 && is_q
)
6720 || (size
== 3 && !is_q
)) {
6721 unallocated_encoding(s
);
6726 if (!fp_access_check(s
)) {
6730 element
= extract32(imm5
, 1+size
, 4);
6732 tcg_rd
= cpu_reg(s
, rd
);
6733 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
6734 if (is_signed
&& !is_q
) {
6735 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
6740 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6741 * +---+---+----+-----------------+------+---+------+---+------+------+
6742 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6743 * +---+---+----+-----------------+------+---+------+---+------+------+
6745 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
6747 int rd
= extract32(insn
, 0, 5);
6748 int rn
= extract32(insn
, 5, 5);
6749 int imm4
= extract32(insn
, 11, 4);
6750 int op
= extract32(insn
, 29, 1);
6751 int is_q
= extract32(insn
, 30, 1);
6752 int imm5
= extract32(insn
, 16, 5);
6757 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
6759 unallocated_encoding(s
);
6764 /* DUP (element - vector) */
6765 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
6769 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
6774 handle_simd_insg(s
, rd
, rn
, imm5
);
6776 unallocated_encoding(s
);
6781 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6782 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
6785 unallocated_encoding(s
);
6791 /* AdvSIMD modified immediate
6792 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
6793 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6794 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
6795 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6797 * There are a number of operations that can be carried out here:
6798 * MOVI - move (shifted) imm into register
6799 * MVNI - move inverted (shifted) imm into register
6800 * ORR - bitwise OR of (shifted) imm with register
6801 * BIC - bitwise clear of (shifted) imm with register
6802 * With ARMv8.2 we also have:
6803 * FMOV half-precision
6805 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
6807 int rd
= extract32(insn
, 0, 5);
6808 int cmode
= extract32(insn
, 12, 4);
6809 int cmode_3_1
= extract32(cmode
, 1, 3);
6810 int cmode_0
= extract32(cmode
, 0, 1);
6811 int o2
= extract32(insn
, 11, 1);
6812 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
6813 bool is_neg
= extract32(insn
, 29, 1);
6814 bool is_q
= extract32(insn
, 30, 1);
6817 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
6818 /* Check for FMOV (vector, immediate) - half-precision */
6819 if (!(arm_dc_feature(s
, ARM_FEATURE_V8_FP16
) && o2
&& cmode
== 0xf)) {
6820 unallocated_encoding(s
);
6825 if (!fp_access_check(s
)) {
6829 /* See AdvSIMDExpandImm() in ARM ARM */
6830 switch (cmode_3_1
) {
6831 case 0: /* Replicate(Zeros(24):imm8, 2) */
6832 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6833 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6834 case 3: /* Replicate(imm8:Zeros(24), 2) */
6836 int shift
= cmode_3_1
* 8;
6837 imm
= bitfield_replicate(abcdefgh
<< shift
, 32);
6840 case 4: /* Replicate(Zeros(8):imm8, 4) */
6841 case 5: /* Replicate(imm8:Zeros(8), 4) */
6843 int shift
= (cmode_3_1
& 0x1) * 8;
6844 imm
= bitfield_replicate(abcdefgh
<< shift
, 16);
6849 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6850 imm
= (abcdefgh
<< 16) | 0xffff;
6852 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6853 imm
= (abcdefgh
<< 8) | 0xff;
6855 imm
= bitfield_replicate(imm
, 32);
6858 if (!cmode_0
&& !is_neg
) {
6859 imm
= bitfield_replicate(abcdefgh
, 8);
6860 } else if (!cmode_0
&& is_neg
) {
6863 for (i
= 0; i
< 8; i
++) {
6864 if ((abcdefgh
) & (1 << i
)) {
6865 imm
|= 0xffULL
<< (i
* 8);
6868 } else if (cmode_0
) {
6870 imm
= (abcdefgh
& 0x3f) << 48;
6871 if (abcdefgh
& 0x80) {
6872 imm
|= 0x8000000000000000ULL
;
6874 if (abcdefgh
& 0x40) {
6875 imm
|= 0x3fc0000000000000ULL
;
6877 imm
|= 0x4000000000000000ULL
;
6881 /* FMOV (vector, immediate) - half-precision */
6882 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
6883 /* now duplicate across the lanes */
6884 imm
= bitfield_replicate(imm
, 16);
6886 imm
= (abcdefgh
& 0x3f) << 19;
6887 if (abcdefgh
& 0x80) {
6890 if (abcdefgh
& 0x40) {
6901 fprintf(stderr
, "%s: cmode_3_1: %x\n", __func__
, cmode_3_1
);
6902 g_assert_not_reached();
6905 if (cmode_3_1
!= 7 && is_neg
) {
6909 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
6910 /* MOVI or MVNI, with MVNI negation handled above. */
6911 tcg_gen_gvec_dup64i(vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
6912 vec_full_reg_size(s
), imm
);
6914 /* ORR or BIC, with BIC negation to AND handled above. */
6916 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
6918 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
6923 /* AdvSIMD scalar copy
6924 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6925 * +-----+----+-----------------+------+---+------+---+------+------+
6926 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6927 * +-----+----+-----------------+------+---+------+---+------+------+
6929 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
6931 int rd
= extract32(insn
, 0, 5);
6932 int rn
= extract32(insn
, 5, 5);
6933 int imm4
= extract32(insn
, 11, 4);
6934 int imm5
= extract32(insn
, 16, 5);
6935 int op
= extract32(insn
, 29, 1);
6937 if (op
!= 0 || imm4
!= 0) {
6938 unallocated_encoding(s
);
6942 /* DUP (element, scalar) */
6943 handle_simd_dupes(s
, rd
, rn
, imm5
);
6946 /* AdvSIMD scalar pairwise
6947 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6948 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6949 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6950 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6952 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
6954 int u
= extract32(insn
, 29, 1);
6955 int size
= extract32(insn
, 22, 2);
6956 int opcode
= extract32(insn
, 12, 5);
6957 int rn
= extract32(insn
, 5, 5);
6958 int rd
= extract32(insn
, 0, 5);
6961 /* For some ops (the FP ones), size[1] is part of the encoding.
6962 * For ADDP strictly it is not but size[1] is always 1 for valid
6965 opcode
|= (extract32(size
, 1, 1) << 5);
6968 case 0x3b: /* ADDP */
6969 if (u
|| size
!= 3) {
6970 unallocated_encoding(s
);
6973 if (!fp_access_check(s
)) {
6979 case 0xc: /* FMAXNMP */
6980 case 0xd: /* FADDP */
6981 case 0xf: /* FMAXP */
6982 case 0x2c: /* FMINNMP */
6983 case 0x2f: /* FMINP */
6984 /* FP op, size[0] is 32 or 64 bit*/
6986 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
6987 unallocated_encoding(s
);
6993 size
= extract32(size
, 0, 1) ? MO_64
: MO_32
;
6996 if (!fp_access_check(s
)) {
7000 fpst
= get_fpstatus_ptr(size
== MO_16
);
7003 unallocated_encoding(s
);
7007 if (size
== MO_64
) {
7008 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
7009 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
7010 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7012 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
7013 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
7016 case 0x3b: /* ADDP */
7017 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
7019 case 0xc: /* FMAXNMP */
7020 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7022 case 0xd: /* FADDP */
7023 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7025 case 0xf: /* FMAXP */
7026 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7028 case 0x2c: /* FMINNMP */
7029 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7031 case 0x2f: /* FMINP */
7032 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7035 g_assert_not_reached();
7038 write_fp_dreg(s
, rd
, tcg_res
);
7040 tcg_temp_free_i64(tcg_op1
);
7041 tcg_temp_free_i64(tcg_op2
);
7042 tcg_temp_free_i64(tcg_res
);
7044 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
7045 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
7046 TCGv_i32 tcg_res
= tcg_temp_new_i32();
7048 read_vec_element_i32(s
, tcg_op1
, rn
, 0, size
);
7049 read_vec_element_i32(s
, tcg_op2
, rn
, 1, size
);
7051 if (size
== MO_16
) {
7053 case 0xc: /* FMAXNMP */
7054 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7056 case 0xd: /* FADDP */
7057 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7059 case 0xf: /* FMAXP */
7060 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7062 case 0x2c: /* FMINNMP */
7063 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7065 case 0x2f: /* FMINP */
7066 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7069 g_assert_not_reached();
7073 case 0xc: /* FMAXNMP */
7074 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7076 case 0xd: /* FADDP */
7077 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7079 case 0xf: /* FMAXP */
7080 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7082 case 0x2c: /* FMINNMP */
7083 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7085 case 0x2f: /* FMINP */
7086 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
7089 g_assert_not_reached();
7093 write_fp_sreg(s
, rd
, tcg_res
);
7095 tcg_temp_free_i32(tcg_op1
);
7096 tcg_temp_free_i32(tcg_op2
);
7097 tcg_temp_free_i32(tcg_res
);
7101 tcg_temp_free_ptr(fpst
);
7106 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
7108 * This code is handles the common shifting code and is used by both
7109 * the vector and scalar code.
7111 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
7112 TCGv_i64 tcg_rnd
, bool accumulate
,
7113 bool is_u
, int size
, int shift
)
7115 bool extended_result
= false;
7116 bool round
= tcg_rnd
!= NULL
;
7118 TCGv_i64 tcg_src_hi
;
7120 if (round
&& size
== 3) {
7121 extended_result
= true;
7122 ext_lshift
= 64 - shift
;
7123 tcg_src_hi
= tcg_temp_new_i64();
7124 } else if (shift
== 64) {
7125 if (!accumulate
&& is_u
) {
7126 /* result is zero */
7127 tcg_gen_movi_i64(tcg_res
, 0);
7132 /* Deal with the rounding step */
7134 if (extended_result
) {
7135 TCGv_i64 tcg_zero
= tcg_const_i64(0);
7137 /* take care of sign extending tcg_res */
7138 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
7139 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
7140 tcg_src
, tcg_src_hi
,
7143 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
7147 tcg_temp_free_i64(tcg_zero
);
7149 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
7153 /* Now do the shift right */
7154 if (round
&& extended_result
) {
7155 /* extended case, >64 bit precision required */
7156 if (ext_lshift
== 0) {
7157 /* special case, only high bits matter */
7158 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
7160 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
7161 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
7162 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
7167 /* essentially shifting in 64 zeros */
7168 tcg_gen_movi_i64(tcg_src
, 0);
7170 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
7174 /* effectively extending the sign-bit */
7175 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
7177 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
7183 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
7185 tcg_gen_mov_i64(tcg_res
, tcg_src
);
7188 if (extended_result
) {
7189 tcg_temp_free_i64(tcg_src_hi
);
7193 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
7194 static void handle_scalar_simd_shri(DisasContext
*s
,
7195 bool is_u
, int immh
, int immb
,
7196 int opcode
, int rn
, int rd
)
7199 int immhb
= immh
<< 3 | immb
;
7200 int shift
= 2 * (8 << size
) - immhb
;
7201 bool accumulate
= false;
7203 bool insert
= false;
7208 if (!extract32(immh
, 3, 1)) {
7209 unallocated_encoding(s
);
7213 if (!fp_access_check(s
)) {
7218 case 0x02: /* SSRA / USRA (accumulate) */
7221 case 0x04: /* SRSHR / URSHR (rounding) */
7224 case 0x06: /* SRSRA / URSRA (accum + rounding) */
7225 accumulate
= round
= true;
7227 case 0x08: /* SRI */
7233 uint64_t round_const
= 1ULL << (shift
- 1);
7234 tcg_round
= tcg_const_i64(round_const
);
7239 tcg_rn
= read_fp_dreg(s
, rn
);
7240 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
7243 /* shift count same as element size is valid but does nothing;
7244 * special case to avoid potential shift by 64.
7246 int esize
= 8 << size
;
7247 if (shift
!= esize
) {
7248 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
7249 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
7252 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
7253 accumulate
, is_u
, size
, shift
);
7256 write_fp_dreg(s
, rd
, tcg_rd
);
7258 tcg_temp_free_i64(tcg_rn
);
7259 tcg_temp_free_i64(tcg_rd
);
7261 tcg_temp_free_i64(tcg_round
);
7265 /* SHL/SLI - Scalar shift left */
7266 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
7267 int immh
, int immb
, int opcode
,
7270 int size
= 32 - clz32(immh
) - 1;
7271 int immhb
= immh
<< 3 | immb
;
7272 int shift
= immhb
- (8 << size
);
7273 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
7274 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
7276 if (!extract32(immh
, 3, 1)) {
7277 unallocated_encoding(s
);
7281 if (!fp_access_check(s
)) {
7285 tcg_rn
= read_fp_dreg(s
, rn
);
7286 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
7289 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
7291 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
7294 write_fp_dreg(s
, rd
, tcg_rd
);
7296 tcg_temp_free_i64(tcg_rn
);
7297 tcg_temp_free_i64(tcg_rd
);
7300 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
7301 * (signed/unsigned) narrowing */
7302 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
7303 bool is_u_shift
, bool is_u_narrow
,
7304 int immh
, int immb
, int opcode
,
7307 int immhb
= immh
<< 3 | immb
;
7308 int size
= 32 - clz32(immh
) - 1;
7309 int esize
= 8 << size
;
7310 int shift
= (2 * esize
) - immhb
;
7311 int elements
= is_scalar
? 1 : (64 / esize
);
7312 bool round
= extract32(opcode
, 0, 1);
7313 TCGMemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
7314 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
7315 TCGv_i32 tcg_rd_narrowed
;
7318 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
7319 { gen_helper_neon_narrow_sat_s8
,
7320 gen_helper_neon_unarrow_sat8
},
7321 { gen_helper_neon_narrow_sat_s16
,
7322 gen_helper_neon_unarrow_sat16
},
7323 { gen_helper_neon_narrow_sat_s32
,
7324 gen_helper_neon_unarrow_sat32
},
7327 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
7328 gen_helper_neon_narrow_sat_u8
,
7329 gen_helper_neon_narrow_sat_u16
,
7330 gen_helper_neon_narrow_sat_u32
,
7333 NeonGenNarrowEnvFn
*narrowfn
;
7339 if (extract32(immh
, 3, 1)) {
7340 unallocated_encoding(s
);
7344 if (!fp_access_check(s
)) {
7349 narrowfn
= unsigned_narrow_fns
[size
];
7351 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
7354 tcg_rn
= tcg_temp_new_i64();
7355 tcg_rd
= tcg_temp_new_i64();
7356 tcg_rd_narrowed
= tcg_temp_new_i32();
7357 tcg_final
= tcg_const_i64(0);
7360 uint64_t round_const
= 1ULL << (shift
- 1);
7361 tcg_round
= tcg_const_i64(round_const
);
7366 for (i
= 0; i
< elements
; i
++) {
7367 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
7368 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
7369 false, is_u_shift
, size
+1, shift
);
7370 narrowfn(tcg_rd_narrowed
, cpu_env
, tcg_rd
);
7371 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
7372 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
7376 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
7378 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
7382 tcg_temp_free_i64(tcg_round
);
7384 tcg_temp_free_i64(tcg_rn
);
7385 tcg_temp_free_i64(tcg_rd
);
7386 tcg_temp_free_i32(tcg_rd_narrowed
);
7387 tcg_temp_free_i64(tcg_final
);
7389 clear_vec_high(s
, is_q
, rd
);
7392 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
7393 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
7394 bool src_unsigned
, bool dst_unsigned
,
7395 int immh
, int immb
, int rn
, int rd
)
7397 int immhb
= immh
<< 3 | immb
;
7398 int size
= 32 - clz32(immh
) - 1;
7399 int shift
= immhb
- (8 << size
);
7403 assert(!(scalar
&& is_q
));
7406 if (!is_q
&& extract32(immh
, 3, 1)) {
7407 unallocated_encoding(s
);
7411 /* Since we use the variable-shift helpers we must
7412 * replicate the shift count into each element of
7413 * the tcg_shift value.
7417 shift
|= shift
<< 8;
7420 shift
|= shift
<< 16;
7426 g_assert_not_reached();
7430 if (!fp_access_check(s
)) {
7435 TCGv_i64 tcg_shift
= tcg_const_i64(shift
);
7436 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
7437 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
7438 { NULL
, gen_helper_neon_qshl_u64
},
7440 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
7441 int maxpass
= is_q
? 2 : 1;
7443 for (pass
= 0; pass
< maxpass
; pass
++) {
7444 TCGv_i64 tcg_op
= tcg_temp_new_i64();
7446 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
7447 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
7448 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
7450 tcg_temp_free_i64(tcg_op
);
7452 tcg_temp_free_i64(tcg_shift
);
7453 clear_vec_high(s
, is_q
, rd
);
7455 TCGv_i32 tcg_shift
= tcg_const_i32(shift
);
7456 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
7458 { gen_helper_neon_qshl_s8
,
7459 gen_helper_neon_qshl_s16
,
7460 gen_helper_neon_qshl_s32
},
7461 { gen_helper_neon_qshlu_s8
,
7462 gen_helper_neon_qshlu_s16
,
7463 gen_helper_neon_qshlu_s32
}
7465 { NULL
, NULL
, NULL
},
7466 { gen_helper_neon_qshl_u8
,
7467 gen_helper_neon_qshl_u16
,
7468 gen_helper_neon_qshl_u32
}
7471 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
7472 TCGMemOp memop
= scalar
? size
: MO_32
;
7473 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
7475 for (pass
= 0; pass
< maxpass
; pass
++) {
7476 TCGv_i32 tcg_op
= tcg_temp_new_i32();
7478 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
7479 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
7483 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
7486 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
7491 g_assert_not_reached();
7493 write_fp_sreg(s
, rd
, tcg_op
);
7495 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
7498 tcg_temp_free_i32(tcg_op
);
7500 tcg_temp_free_i32(tcg_shift
);
7503 clear_vec_high(s
, is_q
, rd
);
7508 /* Common vector code for handling integer to FP conversion */
7509 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
7510 int elements
, int is_signed
,
7511 int fracbits
, int size
)
7513 TCGv_ptr tcg_fpst
= get_fpstatus_ptr(size
== MO_16
);
7514 TCGv_i32 tcg_shift
= NULL
;
7516 TCGMemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
7519 if (fracbits
|| size
== MO_64
) {
7520 tcg_shift
= tcg_const_i32(fracbits
);
7523 if (size
== MO_64
) {
7524 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
7525 TCGv_i64 tcg_double
= tcg_temp_new_i64();
7527 for (pass
= 0; pass
< elements
; pass
++) {
7528 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
7531 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
7532 tcg_shift
, tcg_fpst
);
7534 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
7535 tcg_shift
, tcg_fpst
);
7537 if (elements
== 1) {
7538 write_fp_dreg(s
, rd
, tcg_double
);
7540 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
7544 tcg_temp_free_i64(tcg_int64
);
7545 tcg_temp_free_i64(tcg_double
);
7548 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
7549 TCGv_i32 tcg_float
= tcg_temp_new_i32();
7551 for (pass
= 0; pass
< elements
; pass
++) {
7552 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
7558 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
7559 tcg_shift
, tcg_fpst
);
7561 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
7562 tcg_shift
, tcg_fpst
);
7566 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
7568 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
7575 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
7576 tcg_shift
, tcg_fpst
);
7578 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
7579 tcg_shift
, tcg_fpst
);
7583 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
7585 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
7590 g_assert_not_reached();
7593 if (elements
== 1) {
7594 write_fp_sreg(s
, rd
, tcg_float
);
7596 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
7600 tcg_temp_free_i32(tcg_int32
);
7601 tcg_temp_free_i32(tcg_float
);
7604 tcg_temp_free_ptr(tcg_fpst
);
7606 tcg_temp_free_i32(tcg_shift
);
7609 clear_vec_high(s
, elements
<< size
== 16, rd
);
7612 /* UCVTF/SCVTF - Integer to FP conversion */
7613 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
7614 bool is_q
, bool is_u
,
7615 int immh
, int immb
, int opcode
,
7618 int size
, elements
, fracbits
;
7619 int immhb
= immh
<< 3 | immb
;
7623 if (!is_scalar
&& !is_q
) {
7624 unallocated_encoding(s
);
7627 } else if (immh
& 4) {
7629 } else if (immh
& 2) {
7631 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
7632 unallocated_encoding(s
);
7636 /* immh == 0 would be a failure of the decode logic */
7637 g_assert(immh
== 1);
7638 unallocated_encoding(s
);
7645 elements
= (8 << is_q
) >> size
;
7647 fracbits
= (16 << size
) - immhb
;
7649 if (!fp_access_check(s
)) {
7653 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
7656 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
7657 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
7658 bool is_q
, bool is_u
,
7659 int immh
, int immb
, int rn
, int rd
)
7661 int immhb
= immh
<< 3 | immb
;
7662 int pass
, size
, fracbits
;
7663 TCGv_ptr tcg_fpstatus
;
7664 TCGv_i32 tcg_rmode
, tcg_shift
;
7668 if (!is_scalar
&& !is_q
) {
7669 unallocated_encoding(s
);
7672 } else if (immh
& 0x4) {
7674 } else if (immh
& 0x2) {
7676 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
7677 unallocated_encoding(s
);
7681 /* Should have split out AdvSIMD modified immediate earlier. */
7683 unallocated_encoding(s
);
7687 if (!fp_access_check(s
)) {
7691 assert(!(is_scalar
&& is_q
));
7693 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO
));
7694 tcg_fpstatus
= get_fpstatus_ptr(size
== MO_16
);
7695 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
7696 fracbits
= (16 << size
) - immhb
;
7697 tcg_shift
= tcg_const_i32(fracbits
);
7699 if (size
== MO_64
) {
7700 int maxpass
= is_scalar
? 1 : 2;
7702 for (pass
= 0; pass
< maxpass
; pass
++) {
7703 TCGv_i64 tcg_op
= tcg_temp_new_i64();
7705 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
7707 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7709 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7711 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
7712 tcg_temp_free_i64(tcg_op
);
7714 clear_vec_high(s
, is_q
, rd
);
7716 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
7717 int maxpass
= is_scalar
? 1 : ((8 << is_q
) >> size
);
7722 fn
= gen_helper_vfp_touhh
;
7724 fn
= gen_helper_vfp_toshh
;
7729 fn
= gen_helper_vfp_touls
;
7731 fn
= gen_helper_vfp_tosls
;
7735 g_assert_not_reached();
7738 for (pass
= 0; pass
< maxpass
; pass
++) {
7739 TCGv_i32 tcg_op
= tcg_temp_new_i32();
7741 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
7742 fn(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
7744 write_fp_sreg(s
, rd
, tcg_op
);
7746 write_vec_element_i32(s
, tcg_op
, rd
, pass
, size
);
7748 tcg_temp_free_i32(tcg_op
);
7751 clear_vec_high(s
, is_q
, rd
);
7755 tcg_temp_free_ptr(tcg_fpstatus
);
7756 tcg_temp_free_i32(tcg_shift
);
7757 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
7758 tcg_temp_free_i32(tcg_rmode
);
7761 /* AdvSIMD scalar shift by immediate
7762 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
7763 * +-----+---+-------------+------+------+--------+---+------+------+
7764 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
7765 * +-----+---+-------------+------+------+--------+---+------+------+
7767 * This is the scalar version so it works on a fixed sized registers
7769 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
7771 int rd
= extract32(insn
, 0, 5);
7772 int rn
= extract32(insn
, 5, 5);
7773 int opcode
= extract32(insn
, 11, 5);
7774 int immb
= extract32(insn
, 16, 3);
7775 int immh
= extract32(insn
, 19, 4);
7776 bool is_u
= extract32(insn
, 29, 1);
7779 unallocated_encoding(s
);
7784 case 0x08: /* SRI */
7786 unallocated_encoding(s
);
7790 case 0x00: /* SSHR / USHR */
7791 case 0x02: /* SSRA / USRA */
7792 case 0x04: /* SRSHR / URSHR */
7793 case 0x06: /* SRSRA / URSRA */
7794 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
7796 case 0x0a: /* SHL / SLI */
7797 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
7799 case 0x1c: /* SCVTF, UCVTF */
7800 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
7803 case 0x10: /* SQSHRUN, SQSHRUN2 */
7804 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7806 unallocated_encoding(s
);
7809 handle_vec_simd_sqshrn(s
, true, false, false, true,
7810 immh
, immb
, opcode
, rn
, rd
);
7812 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7813 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7814 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
7815 immh
, immb
, opcode
, rn
, rd
);
7817 case 0xc: /* SQSHLU */
7819 unallocated_encoding(s
);
7822 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
7824 case 0xe: /* SQSHL, UQSHL */
7825 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
7827 case 0x1f: /* FCVTZS, FCVTZU */
7828 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
7831 unallocated_encoding(s
);
7836 /* AdvSIMD scalar three different
7837 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
7838 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7839 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
7840 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7842 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
7844 bool is_u
= extract32(insn
, 29, 1);
7845 int size
= extract32(insn
, 22, 2);
7846 int opcode
= extract32(insn
, 12, 4);
7847 int rm
= extract32(insn
, 16, 5);
7848 int rn
= extract32(insn
, 5, 5);
7849 int rd
= extract32(insn
, 0, 5);
7852 unallocated_encoding(s
);
7857 case 0x9: /* SQDMLAL, SQDMLAL2 */
7858 case 0xb: /* SQDMLSL, SQDMLSL2 */
7859 case 0xd: /* SQDMULL, SQDMULL2 */
7860 if (size
== 0 || size
== 3) {
7861 unallocated_encoding(s
);
7866 unallocated_encoding(s
);
7870 if (!fp_access_check(s
)) {
7875 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
7876 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
7877 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7879 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
7880 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
7882 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
7883 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
7886 case 0xd: /* SQDMULL, SQDMULL2 */
7888 case 0xb: /* SQDMLSL, SQDMLSL2 */
7889 tcg_gen_neg_i64(tcg_res
, tcg_res
);
7891 case 0x9: /* SQDMLAL, SQDMLAL2 */
7892 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
7893 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
,
7897 g_assert_not_reached();
7900 write_fp_dreg(s
, rd
, tcg_res
);
7902 tcg_temp_free_i64(tcg_op1
);
7903 tcg_temp_free_i64(tcg_op2
);
7904 tcg_temp_free_i64(tcg_res
);
7906 TCGv_i32 tcg_op1
= read_fp_hreg(s
, rn
);
7907 TCGv_i32 tcg_op2
= read_fp_hreg(s
, rm
);
7908 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7910 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
7911 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
7914 case 0xd: /* SQDMULL, SQDMULL2 */
7916 case 0xb: /* SQDMLSL, SQDMLSL2 */
7917 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
7919 case 0x9: /* SQDMLAL, SQDMLAL2 */
7921 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
7922 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
7923 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
,
7925 tcg_temp_free_i64(tcg_op3
);
7929 g_assert_not_reached();
7932 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
7933 write_fp_dreg(s
, rd
, tcg_res
);
7935 tcg_temp_free_i32(tcg_op1
);
7936 tcg_temp_free_i32(tcg_op2
);
7937 tcg_temp_free_i64(tcg_res
);
7941 /* CMTST : test is "if (X & Y != 0)". */
7942 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
7944 tcg_gen_and_i32(d
, a
, b
);
7945 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
7946 tcg_gen_neg_i32(d
, d
);
7949 static void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
7951 tcg_gen_and_i64(d
, a
, b
);
7952 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
7953 tcg_gen_neg_i64(d
, d
);
7956 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
7958 tcg_gen_and_vec(vece
, d
, a
, b
);
7959 tcg_gen_dupi_vec(vece
, a
, 0);
7960 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
7963 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
7964 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
7966 /* Handle 64x64->64 opcodes which are shared between the scalar
7967 * and vector 3-same groups. We cover every opcode where size == 3
7968 * is valid in either the three-reg-same (integer, not pairwise)
7969 * or scalar-three-reg-same groups.
7974 case 0x1: /* SQADD */
7976 gen_helper_neon_qadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7978 gen_helper_neon_qadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7981 case 0x5: /* SQSUB */
7983 gen_helper_neon_qsub_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7985 gen_helper_neon_qsub_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
7988 case 0x6: /* CMGT, CMHI */
7989 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
7990 * We implement this using setcond (test) and then negating.
7992 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
7994 tcg_gen_setcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
7995 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
7997 case 0x7: /* CMGE, CMHS */
7998 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
8000 case 0x11: /* CMTST, CMEQ */
8005 gen_cmtst_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8007 case 0x8: /* SSHL, USHL */
8009 gen_helper_neon_shl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
8011 gen_helper_neon_shl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
8014 case 0x9: /* SQSHL, UQSHL */
8016 gen_helper_neon_qshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8018 gen_helper_neon_qshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8021 case 0xa: /* SRSHL, URSHL */
8023 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
8025 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
8028 case 0xb: /* SQRSHL, UQRSHL */
8030 gen_helper_neon_qrshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8032 gen_helper_neon_qrshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
8035 case 0x10: /* ADD, SUB */
8037 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8039 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
8043 g_assert_not_reached();
8047 /* Handle the 3-same-operands float operations; shared by the scalar
8048 * and vector encodings. The caller must filter out any encodings
8049 * not allocated for the encoding it is dealing with.
8051 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
8052 int fpopcode
, int rd
, int rn
, int rm
)
8055 TCGv_ptr fpst
= get_fpstatus_ptr(false);
8057 for (pass
= 0; pass
< elements
; pass
++) {
8060 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8061 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8062 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8064 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
8065 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
8068 case 0x39: /* FMLS */
8069 /* As usual for ARM, separate negation for fused multiply-add */
8070 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
8072 case 0x19: /* FMLA */
8073 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8074 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
,
8077 case 0x18: /* FMAXNM */
8078 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8080 case 0x1a: /* FADD */
8081 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8083 case 0x1b: /* FMULX */
8084 gen_helper_vfp_mulxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8086 case 0x1c: /* FCMEQ */
8087 gen_helper_neon_ceq_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8089 case 0x1e: /* FMAX */
8090 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8092 case 0x1f: /* FRECPS */
8093 gen_helper_recpsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8095 case 0x38: /* FMINNM */
8096 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8098 case 0x3a: /* FSUB */
8099 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8101 case 0x3e: /* FMIN */
8102 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8104 case 0x3f: /* FRSQRTS */
8105 gen_helper_rsqrtsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8107 case 0x5b: /* FMUL */
8108 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8110 case 0x5c: /* FCMGE */
8111 gen_helper_neon_cge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8113 case 0x5d: /* FACGE */
8114 gen_helper_neon_acge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8116 case 0x5f: /* FDIV */
8117 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8119 case 0x7a: /* FABD */
8120 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8121 gen_helper_vfp_absd(tcg_res
, tcg_res
);
8123 case 0x7c: /* FCMGT */
8124 gen_helper_neon_cgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8126 case 0x7d: /* FACGT */
8127 gen_helper_neon_acgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8130 g_assert_not_reached();
8133 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8135 tcg_temp_free_i64(tcg_res
);
8136 tcg_temp_free_i64(tcg_op1
);
8137 tcg_temp_free_i64(tcg_op2
);
8140 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
8141 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
8142 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8144 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
8145 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
8148 case 0x39: /* FMLS */
8149 /* As usual for ARM, separate negation for fused multiply-add */
8150 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
8152 case 0x19: /* FMLA */
8153 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
8154 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
,
8157 case 0x1a: /* FADD */
8158 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8160 case 0x1b: /* FMULX */
8161 gen_helper_vfp_mulxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8163 case 0x1c: /* FCMEQ */
8164 gen_helper_neon_ceq_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8166 case 0x1e: /* FMAX */
8167 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8169 case 0x1f: /* FRECPS */
8170 gen_helper_recpsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8172 case 0x18: /* FMAXNM */
8173 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8175 case 0x38: /* FMINNM */
8176 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8178 case 0x3a: /* FSUB */
8179 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8181 case 0x3e: /* FMIN */
8182 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8184 case 0x3f: /* FRSQRTS */
8185 gen_helper_rsqrtsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8187 case 0x5b: /* FMUL */
8188 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8190 case 0x5c: /* FCMGE */
8191 gen_helper_neon_cge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8193 case 0x5d: /* FACGE */
8194 gen_helper_neon_acge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8196 case 0x5f: /* FDIV */
8197 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8199 case 0x7a: /* FABD */
8200 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8201 gen_helper_vfp_abss(tcg_res
, tcg_res
);
8203 case 0x7c: /* FCMGT */
8204 gen_helper_neon_cgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8206 case 0x7d: /* FACGT */
8207 gen_helper_neon_acgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8210 g_assert_not_reached();
8213 if (elements
== 1) {
8214 /* scalar single so clear high part */
8215 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
8217 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
8218 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
8219 tcg_temp_free_i64(tcg_tmp
);
8221 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
8224 tcg_temp_free_i32(tcg_res
);
8225 tcg_temp_free_i32(tcg_op1
);
8226 tcg_temp_free_i32(tcg_op2
);
8230 tcg_temp_free_ptr(fpst
);
8232 clear_vec_high(s
, elements
* (size
? 8 : 4) > 8, rd
);
8235 /* AdvSIMD scalar three same
8236 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
8237 * +-----+---+-----------+------+---+------+--------+---+------+------+
8238 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
8239 * +-----+---+-----------+------+---+------+--------+---+------+------+
8241 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
8243 int rd
= extract32(insn
, 0, 5);
8244 int rn
= extract32(insn
, 5, 5);
8245 int opcode
= extract32(insn
, 11, 5);
8246 int rm
= extract32(insn
, 16, 5);
8247 int size
= extract32(insn
, 22, 2);
8248 bool u
= extract32(insn
, 29, 1);
8251 if (opcode
>= 0x18) {
8252 /* Floating point: U, size[1] and opcode indicate operation */
8253 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
8255 case 0x1b: /* FMULX */
8256 case 0x1f: /* FRECPS */
8257 case 0x3f: /* FRSQRTS */
8258 case 0x5d: /* FACGE */
8259 case 0x7d: /* FACGT */
8260 case 0x1c: /* FCMEQ */
8261 case 0x5c: /* FCMGE */
8262 case 0x7c: /* FCMGT */
8263 case 0x7a: /* FABD */
8266 unallocated_encoding(s
);
8270 if (!fp_access_check(s
)) {
8274 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
8279 case 0x1: /* SQADD, UQADD */
8280 case 0x5: /* SQSUB, UQSUB */
8281 case 0x9: /* SQSHL, UQSHL */
8282 case 0xb: /* SQRSHL, UQRSHL */
8284 case 0x8: /* SSHL, USHL */
8285 case 0xa: /* SRSHL, URSHL */
8286 case 0x6: /* CMGT, CMHI */
8287 case 0x7: /* CMGE, CMHS */
8288 case 0x11: /* CMTST, CMEQ */
8289 case 0x10: /* ADD, SUB (vector) */
8291 unallocated_encoding(s
);
8295 case 0x16: /* SQDMULH, SQRDMULH (vector) */
8296 if (size
!= 1 && size
!= 2) {
8297 unallocated_encoding(s
);
8302 unallocated_encoding(s
);
8306 if (!fp_access_check(s
)) {
8310 tcg_rd
= tcg_temp_new_i64();
8313 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
8314 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
8316 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
8317 tcg_temp_free_i64(tcg_rn
);
8318 tcg_temp_free_i64(tcg_rm
);
8320 /* Do a single operation on the lowest element in the vector.
8321 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
8322 * no side effects for all these operations.
8323 * OPTME: special-purpose helpers would avoid doing some
8324 * unnecessary work in the helper for the 8 and 16 bit cases.
8326 NeonGenTwoOpEnvFn
*genenvfn
;
8327 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
8328 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
8329 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
8331 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
8332 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
8335 case 0x1: /* SQADD, UQADD */
8337 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
8338 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
8339 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
8340 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
8342 genenvfn
= fns
[size
][u
];
8345 case 0x5: /* SQSUB, UQSUB */
8347 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
8348 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
8349 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
8350 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
8352 genenvfn
= fns
[size
][u
];
8355 case 0x9: /* SQSHL, UQSHL */
8357 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
8358 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
8359 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
8360 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
8362 genenvfn
= fns
[size
][u
];
8365 case 0xb: /* SQRSHL, UQRSHL */
8367 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
8368 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
8369 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
8370 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
8372 genenvfn
= fns
[size
][u
];
8375 case 0x16: /* SQDMULH, SQRDMULH */
8377 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
8378 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
8379 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
8381 assert(size
== 1 || size
== 2);
8382 genenvfn
= fns
[size
- 1][u
];
8386 g_assert_not_reached();
8389 genenvfn(tcg_rd32
, cpu_env
, tcg_rn
, tcg_rm
);
8390 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
8391 tcg_temp_free_i32(tcg_rd32
);
8392 tcg_temp_free_i32(tcg_rn
);
8393 tcg_temp_free_i32(tcg_rm
);
8396 write_fp_dreg(s
, rd
, tcg_rd
);
8398 tcg_temp_free_i64(tcg_rd
);
8401 /* AdvSIMD scalar three same FP16
8402 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
8403 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
8404 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
8405 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
8406 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
8407 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
8409 static void disas_simd_scalar_three_reg_same_fp16(DisasContext
*s
,
8412 int rd
= extract32(insn
, 0, 5);
8413 int rn
= extract32(insn
, 5, 5);
8414 int opcode
= extract32(insn
, 11, 3);
8415 int rm
= extract32(insn
, 16, 5);
8416 bool u
= extract32(insn
, 29, 1);
8417 bool a
= extract32(insn
, 23, 1);
8418 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
8425 case 0x03: /* FMULX */
8426 case 0x04: /* FCMEQ (reg) */
8427 case 0x07: /* FRECPS */
8428 case 0x0f: /* FRSQRTS */
8429 case 0x14: /* FCMGE (reg) */
8430 case 0x15: /* FACGE */
8431 case 0x1a: /* FABD */
8432 case 0x1c: /* FCMGT (reg) */
8433 case 0x1d: /* FACGT */
8436 unallocated_encoding(s
);
8440 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
8441 unallocated_encoding(s
);
8444 if (!fp_access_check(s
)) {
8448 fpst
= get_fpstatus_ptr(true);
8450 tcg_op1
= read_fp_hreg(s
, rn
);
8451 tcg_op2
= read_fp_hreg(s
, rm
);
8452 tcg_res
= tcg_temp_new_i32();
8455 case 0x03: /* FMULX */
8456 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8458 case 0x04: /* FCMEQ (reg) */
8459 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8461 case 0x07: /* FRECPS */
8462 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8464 case 0x0f: /* FRSQRTS */
8465 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8467 case 0x14: /* FCMGE (reg) */
8468 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8470 case 0x15: /* FACGE */
8471 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8473 case 0x1a: /* FABD */
8474 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8475 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
8477 case 0x1c: /* FCMGT (reg) */
8478 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8480 case 0x1d: /* FACGT */
8481 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8484 g_assert_not_reached();
8487 write_fp_sreg(s
, rd
, tcg_res
);
8490 tcg_temp_free_i32(tcg_res
);
8491 tcg_temp_free_i32(tcg_op1
);
8492 tcg_temp_free_i32(tcg_op2
);
8493 tcg_temp_free_ptr(fpst
);
8496 /* AdvSIMD scalar three same extra
8497 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
8498 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8499 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
8500 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8502 static void disas_simd_scalar_three_reg_same_extra(DisasContext
*s
,
8505 int rd
= extract32(insn
, 0, 5);
8506 int rn
= extract32(insn
, 5, 5);
8507 int opcode
= extract32(insn
, 11, 4);
8508 int rm
= extract32(insn
, 16, 5);
8509 int size
= extract32(insn
, 22, 2);
8510 bool u
= extract32(insn
, 29, 1);
8511 TCGv_i32 ele1
, ele2
, ele3
;
8515 switch (u
* 16 + opcode
) {
8516 case 0x10: /* SQRDMLAH (vector) */
8517 case 0x11: /* SQRDMLSH (vector) */
8518 if (size
!= 1 && size
!= 2) {
8519 unallocated_encoding(s
);
8522 feature
= ARM_FEATURE_V8_RDM
;
8525 unallocated_encoding(s
);
8528 if (!arm_dc_feature(s
, feature
)) {
8529 unallocated_encoding(s
);
8532 if (!fp_access_check(s
)) {
8536 /* Do a single operation on the lowest element in the vector.
8537 * We use the standard Neon helpers and rely on 0 OP 0 == 0
8538 * with no side effects for all these operations.
8539 * OPTME: special-purpose helpers would avoid doing some
8540 * unnecessary work in the helper for the 16 bit cases.
8542 ele1
= tcg_temp_new_i32();
8543 ele2
= tcg_temp_new_i32();
8544 ele3
= tcg_temp_new_i32();
8546 read_vec_element_i32(s
, ele1
, rn
, 0, size
);
8547 read_vec_element_i32(s
, ele2
, rm
, 0, size
);
8548 read_vec_element_i32(s
, ele3
, rd
, 0, size
);
8551 case 0x0: /* SQRDMLAH */
8553 gen_helper_neon_qrdmlah_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8555 gen_helper_neon_qrdmlah_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8558 case 0x1: /* SQRDMLSH */
8560 gen_helper_neon_qrdmlsh_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8562 gen_helper_neon_qrdmlsh_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
8566 g_assert_not_reached();
8568 tcg_temp_free_i32(ele1
);
8569 tcg_temp_free_i32(ele2
);
8571 res
= tcg_temp_new_i64();
8572 tcg_gen_extu_i32_i64(res
, ele3
);
8573 tcg_temp_free_i32(ele3
);
8575 write_fp_dreg(s
, rd
, res
);
8576 tcg_temp_free_i64(res
);
8579 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
8580 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
8581 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
8583 /* Handle 64->64 opcodes which are shared between the scalar and
8584 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
8585 * is valid in either group and also the double-precision fp ops.
8586 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
8592 case 0x4: /* CLS, CLZ */
8594 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
8596 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
8600 /* This opcode is shared with CNT and RBIT but we have earlier
8601 * enforced that size == 3 if and only if this is the NOT insn.
8603 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
8605 case 0x7: /* SQABS, SQNEG */
8607 gen_helper_neon_qneg_s64(tcg_rd
, cpu_env
, tcg_rn
);
8609 gen_helper_neon_qabs_s64(tcg_rd
, cpu_env
, tcg_rn
);
8612 case 0xa: /* CMLT */
8613 /* 64 bit integer comparison against zero, result is
8614 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
8619 tcg_gen_setcondi_i64(cond
, tcg_rd
, tcg_rn
, 0);
8620 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
8622 case 0x8: /* CMGT, CMGE */
8623 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
8625 case 0x9: /* CMEQ, CMLE */
8626 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
8628 case 0xb: /* ABS, NEG */
8630 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
8632 TCGv_i64 tcg_zero
= tcg_const_i64(0);
8633 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
8634 tcg_gen_movcond_i64(TCG_COND_GT
, tcg_rd
, tcg_rn
, tcg_zero
,
8636 tcg_temp_free_i64(tcg_zero
);
8639 case 0x2f: /* FABS */
8640 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
8642 case 0x6f: /* FNEG */
8643 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
8645 case 0x7f: /* FSQRT */
8646 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, cpu_env
);
8648 case 0x1a: /* FCVTNS */
8649 case 0x1b: /* FCVTMS */
8650 case 0x1c: /* FCVTAS */
8651 case 0x3a: /* FCVTPS */
8652 case 0x3b: /* FCVTZS */
8654 TCGv_i32 tcg_shift
= tcg_const_i32(0);
8655 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
8656 tcg_temp_free_i32(tcg_shift
);
8659 case 0x5a: /* FCVTNU */
8660 case 0x5b: /* FCVTMU */
8661 case 0x5c: /* FCVTAU */
8662 case 0x7a: /* FCVTPU */
8663 case 0x7b: /* FCVTZU */
8665 TCGv_i32 tcg_shift
= tcg_const_i32(0);
8666 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
8667 tcg_temp_free_i32(tcg_shift
);
8670 case 0x18: /* FRINTN */
8671 case 0x19: /* FRINTM */
8672 case 0x38: /* FRINTP */
8673 case 0x39: /* FRINTZ */
8674 case 0x58: /* FRINTA */
8675 case 0x79: /* FRINTI */
8676 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
8678 case 0x59: /* FRINTX */
8679 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
8682 g_assert_not_reached();
8686 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
8687 bool is_scalar
, bool is_u
, bool is_q
,
8688 int size
, int rn
, int rd
)
8690 bool is_double
= (size
== MO_64
);
8693 if (!fp_access_check(s
)) {
8697 fpst
= get_fpstatus_ptr(size
== MO_16
);
8700 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8701 TCGv_i64 tcg_zero
= tcg_const_i64(0);
8702 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8703 NeonGenTwoDoubleOPFn
*genfn
;
8708 case 0x2e: /* FCMLT (zero) */
8711 case 0x2c: /* FCMGT (zero) */
8712 genfn
= gen_helper_neon_cgt_f64
;
8714 case 0x2d: /* FCMEQ (zero) */
8715 genfn
= gen_helper_neon_ceq_f64
;
8717 case 0x6d: /* FCMLE (zero) */
8720 case 0x6c: /* FCMGE (zero) */
8721 genfn
= gen_helper_neon_cge_f64
;
8724 g_assert_not_reached();
8727 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
8728 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8730 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
8732 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
8734 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8736 tcg_temp_free_i64(tcg_res
);
8737 tcg_temp_free_i64(tcg_zero
);
8738 tcg_temp_free_i64(tcg_op
);
8740 clear_vec_high(s
, !is_scalar
, rd
);
8742 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8743 TCGv_i32 tcg_zero
= tcg_const_i32(0);
8744 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8745 NeonGenTwoSingleOPFn
*genfn
;
8747 int pass
, maxpasses
;
8749 if (size
== MO_16
) {
8751 case 0x2e: /* FCMLT (zero) */
8754 case 0x2c: /* FCMGT (zero) */
8755 genfn
= gen_helper_advsimd_cgt_f16
;
8757 case 0x2d: /* FCMEQ (zero) */
8758 genfn
= gen_helper_advsimd_ceq_f16
;
8760 case 0x6d: /* FCMLE (zero) */
8763 case 0x6c: /* FCMGE (zero) */
8764 genfn
= gen_helper_advsimd_cge_f16
;
8767 g_assert_not_reached();
8771 case 0x2e: /* FCMLT (zero) */
8774 case 0x2c: /* FCMGT (zero) */
8775 genfn
= gen_helper_neon_cgt_f32
;
8777 case 0x2d: /* FCMEQ (zero) */
8778 genfn
= gen_helper_neon_ceq_f32
;
8780 case 0x6d: /* FCMLE (zero) */
8783 case 0x6c: /* FCMGE (zero) */
8784 genfn
= gen_helper_neon_cge_f32
;
8787 g_assert_not_reached();
8794 int vector_size
= 8 << is_q
;
8795 maxpasses
= vector_size
>> size
;
8798 for (pass
= 0; pass
< maxpasses
; pass
++) {
8799 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
8801 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
8803 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
8806 write_fp_sreg(s
, rd
, tcg_res
);
8808 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
8811 tcg_temp_free_i32(tcg_res
);
8812 tcg_temp_free_i32(tcg_zero
);
8813 tcg_temp_free_i32(tcg_op
);
8815 clear_vec_high(s
, is_q
, rd
);
8819 tcg_temp_free_ptr(fpst
);
8822 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
8823 bool is_scalar
, bool is_u
, bool is_q
,
8824 int size
, int rn
, int rd
)
8826 bool is_double
= (size
== 3);
8827 TCGv_ptr fpst
= get_fpstatus_ptr(false);
8830 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8831 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8834 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
8835 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8837 case 0x3d: /* FRECPE */
8838 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
8840 case 0x3f: /* FRECPX */
8841 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
8843 case 0x7d: /* FRSQRTE */
8844 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
8847 g_assert_not_reached();
8849 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
8851 tcg_temp_free_i64(tcg_res
);
8852 tcg_temp_free_i64(tcg_op
);
8853 clear_vec_high(s
, !is_scalar
, rd
);
8855 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8856 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8857 int pass
, maxpasses
;
8862 maxpasses
= is_q
? 4 : 2;
8865 for (pass
= 0; pass
< maxpasses
; pass
++) {
8866 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
8869 case 0x3c: /* URECPE */
8870 gen_helper_recpe_u32(tcg_res
, tcg_op
, fpst
);
8872 case 0x3d: /* FRECPE */
8873 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
8875 case 0x3f: /* FRECPX */
8876 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
8878 case 0x7d: /* FRSQRTE */
8879 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
8882 g_assert_not_reached();
8886 write_fp_sreg(s
, rd
, tcg_res
);
8888 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
8891 tcg_temp_free_i32(tcg_res
);
8892 tcg_temp_free_i32(tcg_op
);
8894 clear_vec_high(s
, is_q
, rd
);
8897 tcg_temp_free_ptr(fpst
);
8900 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
8901 int opcode
, bool u
, bool is_q
,
8902 int size
, int rn
, int rd
)
8904 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
8905 * in the source becomes a size element in the destination).
8908 TCGv_i32 tcg_res
[2];
8909 int destelt
= is_q
? 2 : 0;
8910 int passes
= scalar
? 1 : 2;
8913 tcg_res
[1] = tcg_const_i32(0);
8916 for (pass
= 0; pass
< passes
; pass
++) {
8917 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8918 NeonGenNarrowFn
*genfn
= NULL
;
8919 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
8922 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
8924 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8926 tcg_res
[pass
] = tcg_temp_new_i32();
8929 case 0x12: /* XTN, SQXTUN */
8931 static NeonGenNarrowFn
* const xtnfns
[3] = {
8932 gen_helper_neon_narrow_u8
,
8933 gen_helper_neon_narrow_u16
,
8934 tcg_gen_extrl_i64_i32
,
8936 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
8937 gen_helper_neon_unarrow_sat8
,
8938 gen_helper_neon_unarrow_sat16
,
8939 gen_helper_neon_unarrow_sat32
,
8942 genenvfn
= sqxtunfns
[size
];
8944 genfn
= xtnfns
[size
];
8948 case 0x14: /* SQXTN, UQXTN */
8950 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
8951 { gen_helper_neon_narrow_sat_s8
,
8952 gen_helper_neon_narrow_sat_u8
},
8953 { gen_helper_neon_narrow_sat_s16
,
8954 gen_helper_neon_narrow_sat_u16
},
8955 { gen_helper_neon_narrow_sat_s32
,
8956 gen_helper_neon_narrow_sat_u32
},
8958 genenvfn
= fns
[size
][u
];
8961 case 0x16: /* FCVTN, FCVTN2 */
8962 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
8964 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, cpu_env
);
8966 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
8967 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
8968 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
8969 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, cpu_env
);
8970 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, cpu_env
);
8971 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
8972 tcg_temp_free_i32(tcg_lo
);
8973 tcg_temp_free_i32(tcg_hi
);
8976 case 0x56: /* FCVTXN, FCVTXN2 */
8977 /* 64 bit to 32 bit float conversion
8978 * with von Neumann rounding (round to odd)
8981 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, cpu_env
);
8984 g_assert_not_reached();
8988 genfn(tcg_res
[pass
], tcg_op
);
8989 } else if (genenvfn
) {
8990 genenvfn(tcg_res
[pass
], cpu_env
, tcg_op
);
8993 tcg_temp_free_i64(tcg_op
);
8996 for (pass
= 0; pass
< 2; pass
++) {
8997 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
8998 tcg_temp_free_i32(tcg_res
[pass
]);
9000 clear_vec_high(s
, is_q
, rd
);
9003 /* Remaining saturating accumulating ops */
9004 static void handle_2misc_satacc(DisasContext
*s
, bool is_scalar
, bool is_u
,
9005 bool is_q
, int size
, int rn
, int rd
)
9007 bool is_double
= (size
== 3);
9010 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
9011 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
9014 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9015 read_vec_element(s
, tcg_rn
, rn
, pass
, MO_64
);
9016 read_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9018 if (is_u
) { /* USQADD */
9019 gen_helper_neon_uqadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9020 } else { /* SUQADD */
9021 gen_helper_neon_sqadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9023 write_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
9025 tcg_temp_free_i64(tcg_rd
);
9026 tcg_temp_free_i64(tcg_rn
);
9027 clear_vec_high(s
, !is_scalar
, rd
);
9029 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9030 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
9031 int pass
, maxpasses
;
9036 maxpasses
= is_q
? 4 : 2;
9039 for (pass
= 0; pass
< maxpasses
; pass
++) {
9041 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, size
);
9042 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, size
);
9044 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, MO_32
);
9045 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9048 if (is_u
) { /* USQADD */
9051 gen_helper_neon_uqadd_s8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9054 gen_helper_neon_uqadd_s16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9057 gen_helper_neon_uqadd_s32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9060 g_assert_not_reached();
9062 } else { /* SUQADD */
9065 gen_helper_neon_sqadd_u8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9068 gen_helper_neon_sqadd_u16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9071 gen_helper_neon_sqadd_u32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
9074 g_assert_not_reached();
9079 TCGv_i64 tcg_zero
= tcg_const_i64(0);
9080 write_vec_element(s
, tcg_zero
, rd
, 0, MO_64
);
9081 tcg_temp_free_i64(tcg_zero
);
9083 write_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
9085 tcg_temp_free_i32(tcg_rd
);
9086 tcg_temp_free_i32(tcg_rn
);
9087 clear_vec_high(s
, is_q
, rd
);
9091 /* AdvSIMD scalar two reg misc
9092 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
9093 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9094 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
9095 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9097 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
9099 int rd
= extract32(insn
, 0, 5);
9100 int rn
= extract32(insn
, 5, 5);
9101 int opcode
= extract32(insn
, 12, 5);
9102 int size
= extract32(insn
, 22, 2);
9103 bool u
= extract32(insn
, 29, 1);
9104 bool is_fcvt
= false;
9107 TCGv_ptr tcg_fpstatus
;
9110 case 0x3: /* USQADD / SUQADD*/
9111 if (!fp_access_check(s
)) {
9114 handle_2misc_satacc(s
, true, u
, false, size
, rn
, rd
);
9116 case 0x7: /* SQABS / SQNEG */
9118 case 0xa: /* CMLT */
9120 unallocated_encoding(s
);
9124 case 0x8: /* CMGT, CMGE */
9125 case 0x9: /* CMEQ, CMLE */
9126 case 0xb: /* ABS, NEG */
9128 unallocated_encoding(s
);
9132 case 0x12: /* SQXTUN */
9134 unallocated_encoding(s
);
9138 case 0x14: /* SQXTN, UQXTN */
9140 unallocated_encoding(s
);
9143 if (!fp_access_check(s
)) {
9146 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
9151 /* Floating point: U, size[1] and opcode indicate operation;
9152 * size[0] indicates single or double precision.
9154 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
9155 size
= extract32(size
, 0, 1) ? 3 : 2;
9157 case 0x2c: /* FCMGT (zero) */
9158 case 0x2d: /* FCMEQ (zero) */
9159 case 0x2e: /* FCMLT (zero) */
9160 case 0x6c: /* FCMGE (zero) */
9161 case 0x6d: /* FCMLE (zero) */
9162 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
9164 case 0x1d: /* SCVTF */
9165 case 0x5d: /* UCVTF */
9167 bool is_signed
= (opcode
== 0x1d);
9168 if (!fp_access_check(s
)) {
9171 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
9174 case 0x3d: /* FRECPE */
9175 case 0x3f: /* FRECPX */
9176 case 0x7d: /* FRSQRTE */
9177 if (!fp_access_check(s
)) {
9180 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
9182 case 0x1a: /* FCVTNS */
9183 case 0x1b: /* FCVTMS */
9184 case 0x3a: /* FCVTPS */
9185 case 0x3b: /* FCVTZS */
9186 case 0x5a: /* FCVTNU */
9187 case 0x5b: /* FCVTMU */
9188 case 0x7a: /* FCVTPU */
9189 case 0x7b: /* FCVTZU */
9191 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
9193 case 0x1c: /* FCVTAS */
9194 case 0x5c: /* FCVTAU */
9195 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
9197 rmode
= FPROUNDING_TIEAWAY
;
9199 case 0x56: /* FCVTXN, FCVTXN2 */
9201 unallocated_encoding(s
);
9204 if (!fp_access_check(s
)) {
9207 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
9210 unallocated_encoding(s
);
9215 unallocated_encoding(s
);
9219 if (!fp_access_check(s
)) {
9224 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
9225 tcg_fpstatus
= get_fpstatus_ptr(false);
9226 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
9229 tcg_fpstatus
= NULL
;
9233 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
9234 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
9236 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
9237 write_fp_dreg(s
, rd
, tcg_rd
);
9238 tcg_temp_free_i64(tcg_rd
);
9239 tcg_temp_free_i64(tcg_rn
);
9241 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9242 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
9244 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
9247 case 0x7: /* SQABS, SQNEG */
9249 NeonGenOneOpEnvFn
*genfn
;
9250 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
9251 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
9252 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
9253 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
9255 genfn
= fns
[size
][u
];
9256 genfn(tcg_rd
, cpu_env
, tcg_rn
);
9259 case 0x1a: /* FCVTNS */
9260 case 0x1b: /* FCVTMS */
9261 case 0x1c: /* FCVTAS */
9262 case 0x3a: /* FCVTPS */
9263 case 0x3b: /* FCVTZS */
9265 TCGv_i32 tcg_shift
= tcg_const_i32(0);
9266 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
9267 tcg_temp_free_i32(tcg_shift
);
9270 case 0x5a: /* FCVTNU */
9271 case 0x5b: /* FCVTMU */
9272 case 0x5c: /* FCVTAU */
9273 case 0x7a: /* FCVTPU */
9274 case 0x7b: /* FCVTZU */
9276 TCGv_i32 tcg_shift
= tcg_const_i32(0);
9277 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_shift
, tcg_fpstatus
);
9278 tcg_temp_free_i32(tcg_shift
);
9282 g_assert_not_reached();
9285 write_fp_sreg(s
, rd
, tcg_rd
);
9286 tcg_temp_free_i32(tcg_rd
);
9287 tcg_temp_free_i32(tcg_rn
);
9291 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
9292 tcg_temp_free_i32(tcg_rmode
);
9293 tcg_temp_free_ptr(tcg_fpstatus
);
9297 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9299 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
9300 tcg_gen_vec_add8_i64(d
, d
, a
);
9303 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9305 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
9306 tcg_gen_vec_add16_i64(d
, d
, a
);
9309 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
9311 tcg_gen_sari_i32(a
, a
, shift
);
9312 tcg_gen_add_i32(d
, d
, a
);
9315 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9317 tcg_gen_sari_i64(a
, a
, shift
);
9318 tcg_gen_add_i64(d
, d
, a
);
9321 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
9323 tcg_gen_sari_vec(vece
, a
, a
, sh
);
9324 tcg_gen_add_vec(vece
, d
, d
, a
);
9327 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9329 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
9330 tcg_gen_vec_add8_i64(d
, d
, a
);
9333 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9335 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
9336 tcg_gen_vec_add16_i64(d
, d
, a
);
9339 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
9341 tcg_gen_shri_i32(a
, a
, shift
);
9342 tcg_gen_add_i32(d
, d
, a
);
9345 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9347 tcg_gen_shri_i64(a
, a
, shift
);
9348 tcg_gen_add_i64(d
, d
, a
);
9351 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
9353 tcg_gen_shri_vec(vece
, a
, a
, sh
);
9354 tcg_gen_add_vec(vece
, d
, d
, a
);
9357 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9359 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
9360 TCGv_i64 t
= tcg_temp_new_i64();
9362 tcg_gen_shri_i64(t
, a
, shift
);
9363 tcg_gen_andi_i64(t
, t
, mask
);
9364 tcg_gen_andi_i64(d
, d
, ~mask
);
9365 tcg_gen_or_i64(d
, d
, t
);
9366 tcg_temp_free_i64(t
);
9369 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9371 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
9372 TCGv_i64 t
= tcg_temp_new_i64();
9374 tcg_gen_shri_i64(t
, a
, shift
);
9375 tcg_gen_andi_i64(t
, t
, mask
);
9376 tcg_gen_andi_i64(d
, d
, ~mask
);
9377 tcg_gen_or_i64(d
, d
, t
);
9378 tcg_temp_free_i64(t
);
9381 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
9383 tcg_gen_shri_i32(a
, a
, shift
);
9384 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
9387 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9389 tcg_gen_shri_i64(a
, a
, shift
);
9390 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
9393 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
9395 uint64_t mask
= (2ull << ((8 << vece
) - 1)) - 1;
9396 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
9397 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
9399 tcg_gen_dupi_vec(vece
, m
, mask
^ (mask
>> sh
));
9400 tcg_gen_shri_vec(vece
, t
, a
, sh
);
9401 tcg_gen_and_vec(vece
, d
, d
, m
);
9402 tcg_gen_or_vec(vece
, d
, d
, t
);
9404 tcg_temp_free_vec(t
);
9405 tcg_temp_free_vec(m
);
9408 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
9409 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
9410 int immh
, int immb
, int opcode
, int rn
, int rd
)
9412 static const GVecGen2i ssra_op
[4] = {
9413 { .fni8
= gen_ssra8_i64
,
9414 .fniv
= gen_ssra_vec
,
9416 .opc
= INDEX_op_sari_vec
,
9418 { .fni8
= gen_ssra16_i64
,
9419 .fniv
= gen_ssra_vec
,
9421 .opc
= INDEX_op_sari_vec
,
9423 { .fni4
= gen_ssra32_i32
,
9424 .fniv
= gen_ssra_vec
,
9426 .opc
= INDEX_op_sari_vec
,
9428 { .fni8
= gen_ssra64_i64
,
9429 .fniv
= gen_ssra_vec
,
9430 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9432 .opc
= INDEX_op_sari_vec
,
9435 static const GVecGen2i usra_op
[4] = {
9436 { .fni8
= gen_usra8_i64
,
9437 .fniv
= gen_usra_vec
,
9439 .opc
= INDEX_op_shri_vec
,
9441 { .fni8
= gen_usra16_i64
,
9442 .fniv
= gen_usra_vec
,
9444 .opc
= INDEX_op_shri_vec
,
9446 { .fni4
= gen_usra32_i32
,
9447 .fniv
= gen_usra_vec
,
9449 .opc
= INDEX_op_shri_vec
,
9451 { .fni8
= gen_usra64_i64
,
9452 .fniv
= gen_usra_vec
,
9453 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9455 .opc
= INDEX_op_shri_vec
,
9458 static const GVecGen2i sri_op
[4] = {
9459 { .fni8
= gen_shr8_ins_i64
,
9460 .fniv
= gen_shr_ins_vec
,
9462 .opc
= INDEX_op_shri_vec
,
9464 { .fni8
= gen_shr16_ins_i64
,
9465 .fniv
= gen_shr_ins_vec
,
9467 .opc
= INDEX_op_shri_vec
,
9469 { .fni4
= gen_shr32_ins_i32
,
9470 .fniv
= gen_shr_ins_vec
,
9472 .opc
= INDEX_op_shri_vec
,
9474 { .fni8
= gen_shr64_ins_i64
,
9475 .fniv
= gen_shr_ins_vec
,
9476 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9478 .opc
= INDEX_op_shri_vec
,
9482 int size
= 32 - clz32(immh
) - 1;
9483 int immhb
= immh
<< 3 | immb
;
9484 int shift
= 2 * (8 << size
) - immhb
;
9485 bool accumulate
= false;
9486 int dsize
= is_q
? 128 : 64;
9487 int esize
= 8 << size
;
9488 int elements
= dsize
/esize
;
9489 TCGMemOp memop
= size
| (is_u
? 0 : MO_SIGN
);
9490 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
9491 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
9493 uint64_t round_const
;
9496 if (extract32(immh
, 3, 1) && !is_q
) {
9497 unallocated_encoding(s
);
9500 tcg_debug_assert(size
<= 3);
9502 if (!fp_access_check(s
)) {
9507 case 0x02: /* SSRA / USRA (accumulate) */
9509 /* Shift count same as element size produces zero to add. */
9510 if (shift
== 8 << size
) {
9513 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &usra_op
[size
]);
9515 /* Shift count same as element size produces all sign to add. */
9516 if (shift
== 8 << size
) {
9519 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &ssra_op
[size
]);
9522 case 0x08: /* SRI */
9523 /* Shift count same as element size is valid but does nothing. */
9524 if (shift
== 8 << size
) {
9527 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &sri_op
[size
]);
9530 case 0x00: /* SSHR / USHR */
9532 if (shift
== 8 << size
) {
9533 /* Shift count the same size as element size produces zero. */
9534 tcg_gen_gvec_dup8i(vec_full_reg_offset(s
, rd
),
9535 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
9537 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shri
, size
);
9540 /* Shift count the same size as element size produces all sign. */
9541 if (shift
== 8 << size
) {
9544 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_sari
, size
);
9548 case 0x04: /* SRSHR / URSHR (rounding) */
9550 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9554 g_assert_not_reached();
9557 round_const
= 1ULL << (shift
- 1);
9558 tcg_round
= tcg_const_i64(round_const
);
9560 for (i
= 0; i
< elements
; i
++) {
9561 read_vec_element(s
, tcg_rn
, rn
, i
, memop
);
9563 read_vec_element(s
, tcg_rd
, rd
, i
, memop
);
9566 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
9567 accumulate
, is_u
, size
, shift
);
9569 write_vec_element(s
, tcg_rd
, rd
, i
, size
);
9571 tcg_temp_free_i64(tcg_round
);
9574 clear_vec_high(s
, is_q
, rd
);
9577 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9579 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
9580 TCGv_i64 t
= tcg_temp_new_i64();
9582 tcg_gen_shli_i64(t
, a
, shift
);
9583 tcg_gen_andi_i64(t
, t
, mask
);
9584 tcg_gen_andi_i64(d
, d
, ~mask
);
9585 tcg_gen_or_i64(d
, d
, t
);
9586 tcg_temp_free_i64(t
);
9589 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9591 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
9592 TCGv_i64 t
= tcg_temp_new_i64();
9594 tcg_gen_shli_i64(t
, a
, shift
);
9595 tcg_gen_andi_i64(t
, t
, mask
);
9596 tcg_gen_andi_i64(d
, d
, ~mask
);
9597 tcg_gen_or_i64(d
, d
, t
);
9598 tcg_temp_free_i64(t
);
9601 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
9603 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
9606 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
9608 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
9611 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
9613 uint64_t mask
= (1ull << sh
) - 1;
9614 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
9615 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
9617 tcg_gen_dupi_vec(vece
, m
, mask
);
9618 tcg_gen_shli_vec(vece
, t
, a
, sh
);
9619 tcg_gen_and_vec(vece
, d
, d
, m
);
9620 tcg_gen_or_vec(vece
, d
, d
, t
);
9622 tcg_temp_free_vec(t
);
9623 tcg_temp_free_vec(m
);
9626 /* SHL/SLI - Vector shift left */
9627 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
9628 int immh
, int immb
, int opcode
, int rn
, int rd
)
9630 static const GVecGen2i shi_op
[4] = {
9631 { .fni8
= gen_shl8_ins_i64
,
9632 .fniv
= gen_shl_ins_vec
,
9633 .opc
= INDEX_op_shli_vec
,
9634 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9637 { .fni8
= gen_shl16_ins_i64
,
9638 .fniv
= gen_shl_ins_vec
,
9639 .opc
= INDEX_op_shli_vec
,
9640 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9643 { .fni4
= gen_shl32_ins_i32
,
9644 .fniv
= gen_shl_ins_vec
,
9645 .opc
= INDEX_op_shli_vec
,
9646 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9649 { .fni8
= gen_shl64_ins_i64
,
9650 .fniv
= gen_shl_ins_vec
,
9651 .opc
= INDEX_op_shli_vec
,
9652 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
9656 int size
= 32 - clz32(immh
) - 1;
9657 int immhb
= immh
<< 3 | immb
;
9658 int shift
= immhb
- (8 << size
);
9660 if (extract32(immh
, 3, 1) && !is_q
) {
9661 unallocated_encoding(s
);
9665 if (size
> 3 && !is_q
) {
9666 unallocated_encoding(s
);
9670 if (!fp_access_check(s
)) {
9675 gen_gvec_op2i(s
, is_q
, rd
, rn
, shift
, &shi_op
[size
]);
9677 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
9681 /* USHLL/SHLL - Vector shift left with widening */
9682 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
9683 int immh
, int immb
, int opcode
, int rn
, int rd
)
9685 int size
= 32 - clz32(immh
) - 1;
9686 int immhb
= immh
<< 3 | immb
;
9687 int shift
= immhb
- (8 << size
);
9689 int esize
= 8 << size
;
9690 int elements
= dsize
/esize
;
9691 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
9692 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
9696 unallocated_encoding(s
);
9700 if (!fp_access_check(s
)) {
9704 /* For the LL variants the store is larger than the load,
9705 * so if rd == rn we would overwrite parts of our input.
9706 * So load everything right now and use shifts in the main loop.
9708 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
9710 for (i
= 0; i
< elements
; i
++) {
9711 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
9712 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
9713 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
9714 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
9718 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
9719 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
9720 int immh
, int immb
, int opcode
, int rn
, int rd
)
9722 int immhb
= immh
<< 3 | immb
;
9723 int size
= 32 - clz32(immh
) - 1;
9725 int esize
= 8 << size
;
9726 int elements
= dsize
/esize
;
9727 int shift
= (2 * esize
) - immhb
;
9728 bool round
= extract32(opcode
, 0, 1);
9729 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
9733 if (extract32(immh
, 3, 1)) {
9734 unallocated_encoding(s
);
9738 if (!fp_access_check(s
)) {
9742 tcg_rn
= tcg_temp_new_i64();
9743 tcg_rd
= tcg_temp_new_i64();
9744 tcg_final
= tcg_temp_new_i64();
9745 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
9748 uint64_t round_const
= 1ULL << (shift
- 1);
9749 tcg_round
= tcg_const_i64(round_const
);
9754 for (i
= 0; i
< elements
; i
++) {
9755 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
9756 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
9757 false, true, size
+1, shift
);
9759 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
9763 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
9765 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
9768 tcg_temp_free_i64(tcg_round
);
9770 tcg_temp_free_i64(tcg_rn
);
9771 tcg_temp_free_i64(tcg_rd
);
9772 tcg_temp_free_i64(tcg_final
);
9774 clear_vec_high(s
, is_q
, rd
);
9778 /* AdvSIMD shift by immediate
9779 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9780 * +---+---+---+-------------+------+------+--------+---+------+------+
9781 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9782 * +---+---+---+-------------+------+------+--------+---+------+------+
9784 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
9786 int rd
= extract32(insn
, 0, 5);
9787 int rn
= extract32(insn
, 5, 5);
9788 int opcode
= extract32(insn
, 11, 5);
9789 int immb
= extract32(insn
, 16, 3);
9790 int immh
= extract32(insn
, 19, 4);
9791 bool is_u
= extract32(insn
, 29, 1);
9792 bool is_q
= extract32(insn
, 30, 1);
9795 case 0x08: /* SRI */
9797 unallocated_encoding(s
);
9801 case 0x00: /* SSHR / USHR */
9802 case 0x02: /* SSRA / USRA (accumulate) */
9803 case 0x04: /* SRSHR / URSHR (rounding) */
9804 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9805 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9807 case 0x0a: /* SHL / SLI */
9808 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9810 case 0x10: /* SHRN */
9811 case 0x11: /* RSHRN / SQRSHRUN */
9813 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
9816 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
9819 case 0x12: /* SQSHRN / UQSHRN */
9820 case 0x13: /* SQRSHRN / UQRSHRN */
9821 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
9824 case 0x14: /* SSHLL / USHLL */
9825 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9827 case 0x1c: /* SCVTF / UCVTF */
9828 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
9831 case 0xc: /* SQSHLU */
9833 unallocated_encoding(s
);
9836 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
9838 case 0xe: /* SQSHL, UQSHL */
9839 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
9841 case 0x1f: /* FCVTZS/ FCVTZU */
9842 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
9845 unallocated_encoding(s
);
9850 /* Generate code to do a "long" addition or subtraction, ie one done in
9851 * TCGv_i64 on vector lanes twice the width specified by size.
9853 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
9854 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
9856 static NeonGenTwo64OpFn
* const fns
[3][2] = {
9857 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
9858 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
9859 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
9861 NeonGenTwo64OpFn
*genfn
;
9864 genfn
= fns
[size
][is_sub
];
9865 genfn(tcg_res
, tcg_op1
, tcg_op2
);
9868 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
9869 int opcode
, int rd
, int rn
, int rm
)
9871 /* 3-reg-different widening insns: 64 x 64 -> 128 */
9872 TCGv_i64 tcg_res
[2];
9875 tcg_res
[0] = tcg_temp_new_i64();
9876 tcg_res
[1] = tcg_temp_new_i64();
9878 /* Does this op do an adding accumulate, a subtracting accumulate,
9879 * or no accumulate at all?
9897 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
9898 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
9901 /* size == 2 means two 32x32->64 operations; this is worth special
9902 * casing because we can generally handle it inline.
9905 for (pass
= 0; pass
< 2; pass
++) {
9906 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9907 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9908 TCGv_i64 tcg_passres
;
9909 TCGMemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
9911 int elt
= pass
+ is_q
* 2;
9913 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
9914 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
9917 tcg_passres
= tcg_res
[pass
];
9919 tcg_passres
= tcg_temp_new_i64();
9923 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9924 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9926 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9927 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9929 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9930 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9932 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
9933 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
9935 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
9936 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
9937 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
9939 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
9940 tcg_temp_free_i64(tcg_tmp1
);
9941 tcg_temp_free_i64(tcg_tmp2
);
9944 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9945 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9946 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9947 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9949 case 9: /* SQDMLAL, SQDMLAL2 */
9950 case 11: /* SQDMLSL, SQDMLSL2 */
9951 case 13: /* SQDMULL, SQDMULL2 */
9952 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
9953 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
9954 tcg_passres
, tcg_passres
);
9957 g_assert_not_reached();
9960 if (opcode
== 9 || opcode
== 11) {
9961 /* saturating accumulate ops */
9963 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
9965 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
9966 tcg_res
[pass
], tcg_passres
);
9967 } else if (accop
> 0) {
9968 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
9969 } else if (accop
< 0) {
9970 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
9974 tcg_temp_free_i64(tcg_passres
);
9977 tcg_temp_free_i64(tcg_op1
);
9978 tcg_temp_free_i64(tcg_op2
);
9981 /* size 0 or 1, generally helper functions */
9982 for (pass
= 0; pass
< 2; pass
++) {
9983 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
9984 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
9985 TCGv_i64 tcg_passres
;
9986 int elt
= pass
+ is_q
* 2;
9988 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
9989 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
9992 tcg_passres
= tcg_res
[pass
];
9994 tcg_passres
= tcg_temp_new_i64();
9998 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9999 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10001 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
10002 static NeonGenWidenFn
* const widenfns
[2][2] = {
10003 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10004 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10006 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10008 widenfn(tcg_op2_64
, tcg_op2
);
10009 widenfn(tcg_passres
, tcg_op1
);
10010 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
10011 tcg_passres
, tcg_op2_64
);
10012 tcg_temp_free_i64(tcg_op2_64
);
10015 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10016 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10019 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10021 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10025 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
10027 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
10031 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10032 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10033 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10036 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
10038 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
10042 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10044 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10048 case 9: /* SQDMLAL, SQDMLAL2 */
10049 case 11: /* SQDMLSL, SQDMLSL2 */
10050 case 13: /* SQDMULL, SQDMULL2 */
10052 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10053 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
10054 tcg_passres
, tcg_passres
);
10056 case 14: /* PMULL */
10058 gen_helper_neon_mull_p8(tcg_passres
, tcg_op1
, tcg_op2
);
10061 g_assert_not_reached();
10063 tcg_temp_free_i32(tcg_op1
);
10064 tcg_temp_free_i32(tcg_op2
);
10067 if (opcode
== 9 || opcode
== 11) {
10068 /* saturating accumulate ops */
10070 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
10072 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
10076 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
10077 tcg_res
[pass
], tcg_passres
);
10079 tcg_temp_free_i64(tcg_passres
);
10084 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10085 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10086 tcg_temp_free_i64(tcg_res
[0]);
10087 tcg_temp_free_i64(tcg_res
[1]);
10090 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
10091 int opcode
, int rd
, int rn
, int rm
)
10093 TCGv_i64 tcg_res
[2];
10094 int part
= is_q
? 2 : 0;
10097 for (pass
= 0; pass
< 2; pass
++) {
10098 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10099 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10100 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
10101 static NeonGenWidenFn
* const widenfns
[3][2] = {
10102 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10103 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10104 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
10106 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10108 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10109 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
10110 widenfn(tcg_op2_wide
, tcg_op2
);
10111 tcg_temp_free_i32(tcg_op2
);
10112 tcg_res
[pass
] = tcg_temp_new_i64();
10113 gen_neon_addl(size
, (opcode
== 3),
10114 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
10115 tcg_temp_free_i64(tcg_op1
);
10116 tcg_temp_free_i64(tcg_op2_wide
);
10119 for (pass
= 0; pass
< 2; pass
++) {
10120 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10121 tcg_temp_free_i64(tcg_res
[pass
]);
10125 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
10127 tcg_gen_addi_i64(in
, in
, 1U << 31);
10128 tcg_gen_extrh_i64_i32(res
, in
);
10131 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
10132 int opcode
, int rd
, int rn
, int rm
)
10134 TCGv_i32 tcg_res
[2];
10135 int part
= is_q
? 2 : 0;
10138 for (pass
= 0; pass
< 2; pass
++) {
10139 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10140 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10141 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
10142 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
10143 { gen_helper_neon_narrow_high_u8
,
10144 gen_helper_neon_narrow_round_high_u8
},
10145 { gen_helper_neon_narrow_high_u16
,
10146 gen_helper_neon_narrow_round_high_u16
},
10147 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
10149 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
10151 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10152 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10154 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
10156 tcg_temp_free_i64(tcg_op1
);
10157 tcg_temp_free_i64(tcg_op2
);
10159 tcg_res
[pass
] = tcg_temp_new_i32();
10160 gennarrow(tcg_res
[pass
], tcg_wideres
);
10161 tcg_temp_free_i64(tcg_wideres
);
10164 for (pass
= 0; pass
< 2; pass
++) {
10165 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
10166 tcg_temp_free_i32(tcg_res
[pass
]);
10168 clear_vec_high(s
, is_q
, rd
);
10171 static void handle_pmull_64(DisasContext
*s
, int is_q
, int rd
, int rn
, int rm
)
10173 /* PMULL of 64 x 64 -> 128 is an odd special case because it
10174 * is the only three-reg-diff instruction which produces a
10175 * 128-bit wide result from a single operation. However since
10176 * it's possible to calculate the two halves more or less
10177 * separately we just use two helper calls.
10179 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10180 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10181 TCGv_i64 tcg_res
= tcg_temp_new_i64();
10183 read_vec_element(s
, tcg_op1
, rn
, is_q
, MO_64
);
10184 read_vec_element(s
, tcg_op2
, rm
, is_q
, MO_64
);
10185 gen_helper_neon_pmull_64_lo(tcg_res
, tcg_op1
, tcg_op2
);
10186 write_vec_element(s
, tcg_res
, rd
, 0, MO_64
);
10187 gen_helper_neon_pmull_64_hi(tcg_res
, tcg_op1
, tcg_op2
);
10188 write_vec_element(s
, tcg_res
, rd
, 1, MO_64
);
10190 tcg_temp_free_i64(tcg_op1
);
10191 tcg_temp_free_i64(tcg_op2
);
10192 tcg_temp_free_i64(tcg_res
);
10195 /* AdvSIMD three different
10196 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10197 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10198 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10199 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10201 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
10203 /* Instructions in this group fall into three basic classes
10204 * (in each case with the operation working on each element in
10205 * the input vectors):
10206 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10208 * (2) wide 64 x 128 -> 128
10209 * (3) narrowing 128 x 128 -> 64
10210 * Here we do initial decode, catch unallocated cases and
10211 * dispatch to separate functions for each class.
10213 int is_q
= extract32(insn
, 30, 1);
10214 int is_u
= extract32(insn
, 29, 1);
10215 int size
= extract32(insn
, 22, 2);
10216 int opcode
= extract32(insn
, 12, 4);
10217 int rm
= extract32(insn
, 16, 5);
10218 int rn
= extract32(insn
, 5, 5);
10219 int rd
= extract32(insn
, 0, 5);
10222 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10223 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10224 /* 64 x 128 -> 128 */
10226 unallocated_encoding(s
);
10229 if (!fp_access_check(s
)) {
10232 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10234 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10235 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10236 /* 128 x 128 -> 64 */
10238 unallocated_encoding(s
);
10241 if (!fp_access_check(s
)) {
10244 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10246 case 14: /* PMULL, PMULL2 */
10247 if (is_u
|| size
== 1 || size
== 2) {
10248 unallocated_encoding(s
);
10252 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
10253 unallocated_encoding(s
);
10256 if (!fp_access_check(s
)) {
10259 handle_pmull_64(s
, is_q
, rd
, rn
, rm
);
10263 case 9: /* SQDMLAL, SQDMLAL2 */
10264 case 11: /* SQDMLSL, SQDMLSL2 */
10265 case 13: /* SQDMULL, SQDMULL2 */
10266 if (is_u
|| size
== 0) {
10267 unallocated_encoding(s
);
10271 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10272 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10273 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10274 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10275 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10276 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10277 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10278 /* 64 x 64 -> 128 */
10280 unallocated_encoding(s
);
10284 if (!fp_access_check(s
)) {
10288 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10291 /* opcode 15 not allocated */
10292 unallocated_encoding(s
);
10297 static void gen_bsl_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
10299 tcg_gen_xor_i64(rn
, rn
, rm
);
10300 tcg_gen_and_i64(rn
, rn
, rd
);
10301 tcg_gen_xor_i64(rd
, rm
, rn
);
10304 static void gen_bit_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
10306 tcg_gen_xor_i64(rn
, rn
, rd
);
10307 tcg_gen_and_i64(rn
, rn
, rm
);
10308 tcg_gen_xor_i64(rd
, rd
, rn
);
10311 static void gen_bif_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
10313 tcg_gen_xor_i64(rn
, rn
, rd
);
10314 tcg_gen_andc_i64(rn
, rn
, rm
);
10315 tcg_gen_xor_i64(rd
, rd
, rn
);
10318 static void gen_bsl_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
10320 tcg_gen_xor_vec(vece
, rn
, rn
, rm
);
10321 tcg_gen_and_vec(vece
, rn
, rn
, rd
);
10322 tcg_gen_xor_vec(vece
, rd
, rm
, rn
);
10325 static void gen_bit_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
10327 tcg_gen_xor_vec(vece
, rn
, rn
, rd
);
10328 tcg_gen_and_vec(vece
, rn
, rn
, rm
);
10329 tcg_gen_xor_vec(vece
, rd
, rd
, rn
);
10332 static void gen_bif_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
10334 tcg_gen_xor_vec(vece
, rn
, rn
, rd
);
10335 tcg_gen_andc_vec(vece
, rn
, rn
, rm
);
10336 tcg_gen_xor_vec(vece
, rd
, rd
, rn
);
10339 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10340 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
10342 static const GVecGen3 bsl_op
= {
10343 .fni8
= gen_bsl_i64
,
10344 .fniv
= gen_bsl_vec
,
10345 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10348 static const GVecGen3 bit_op
= {
10349 .fni8
= gen_bit_i64
,
10350 .fniv
= gen_bit_vec
,
10351 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10354 static const GVecGen3 bif_op
= {
10355 .fni8
= gen_bif_i64
,
10356 .fniv
= gen_bif_vec
,
10357 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10361 int rd
= extract32(insn
, 0, 5);
10362 int rn
= extract32(insn
, 5, 5);
10363 int rm
= extract32(insn
, 16, 5);
10364 int size
= extract32(insn
, 22, 2);
10365 bool is_u
= extract32(insn
, 29, 1);
10366 bool is_q
= extract32(insn
, 30, 1);
10368 if (!fp_access_check(s
)) {
10372 switch (size
+ 4 * is_u
) {
10374 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_and
, 0);
10377 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_andc
, 0);
10380 if (rn
== rm
) { /* MOV */
10381 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_mov
, 0);
10383 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_or
, 0);
10387 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_orc
, 0);
10390 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_xor
, 0);
10393 case 5: /* BSL bitwise select */
10394 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &bsl_op
);
10396 case 6: /* BIT, bitwise insert if true */
10397 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &bit_op
);
10399 case 7: /* BIF, bitwise insert if false */
10400 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &bif_op
);
10404 g_assert_not_reached();
10408 /* Pairwise op subgroup of C3.6.16.
10410 * This is called directly or via the handle_3same_float for float pairwise
10411 * operations where the opcode and size are calculated differently.
10413 static void handle_simd_3same_pair(DisasContext
*s
, int is_q
, int u
, int opcode
,
10414 int size
, int rn
, int rm
, int rd
)
10419 /* Floating point operations need fpst */
10420 if (opcode
>= 0x58) {
10421 fpst
= get_fpstatus_ptr(false);
10426 if (!fp_access_check(s
)) {
10430 /* These operations work on the concatenated rm:rn, with each pair of
10431 * adjacent elements being operated on to produce an element in the result.
10434 TCGv_i64 tcg_res
[2];
10436 for (pass
= 0; pass
< 2; pass
++) {
10437 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10438 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10439 int passreg
= (pass
== 0) ? rn
: rm
;
10441 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
10442 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
10443 tcg_res
[pass
] = tcg_temp_new_i64();
10446 case 0x17: /* ADDP */
10447 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10449 case 0x58: /* FMAXNMP */
10450 gen_helper_vfp_maxnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10452 case 0x5a: /* FADDP */
10453 gen_helper_vfp_addd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10455 case 0x5e: /* FMAXP */
10456 gen_helper_vfp_maxd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10458 case 0x78: /* FMINNMP */
10459 gen_helper_vfp_minnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10461 case 0x7e: /* FMINP */
10462 gen_helper_vfp_mind(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10465 g_assert_not_reached();
10468 tcg_temp_free_i64(tcg_op1
);
10469 tcg_temp_free_i64(tcg_op2
);
10472 for (pass
= 0; pass
< 2; pass
++) {
10473 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10474 tcg_temp_free_i64(tcg_res
[pass
]);
10477 int maxpass
= is_q
? 4 : 2;
10478 TCGv_i32 tcg_res
[4];
10480 for (pass
= 0; pass
< maxpass
; pass
++) {
10481 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10482 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10483 NeonGenTwoOpFn
*genfn
= NULL
;
10484 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
10485 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
10487 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
10488 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
10489 tcg_res
[pass
] = tcg_temp_new_i32();
10492 case 0x17: /* ADDP */
10494 static NeonGenTwoOpFn
* const fns
[3] = {
10495 gen_helper_neon_padd_u8
,
10496 gen_helper_neon_padd_u16
,
10502 case 0x14: /* SMAXP, UMAXP */
10504 static NeonGenTwoOpFn
* const fns
[3][2] = {
10505 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
10506 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
10507 { tcg_gen_smax_i32
, tcg_gen_umax_i32
},
10509 genfn
= fns
[size
][u
];
10512 case 0x15: /* SMINP, UMINP */
10514 static NeonGenTwoOpFn
* const fns
[3][2] = {
10515 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
10516 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
10517 { tcg_gen_smin_i32
, tcg_gen_umin_i32
},
10519 genfn
= fns
[size
][u
];
10522 /* The FP operations are all on single floats (32 bit) */
10523 case 0x58: /* FMAXNMP */
10524 gen_helper_vfp_maxnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10526 case 0x5a: /* FADDP */
10527 gen_helper_vfp_adds(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10529 case 0x5e: /* FMAXP */
10530 gen_helper_vfp_maxs(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10532 case 0x78: /* FMINNMP */
10533 gen_helper_vfp_minnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10535 case 0x7e: /* FMINP */
10536 gen_helper_vfp_mins(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
10539 g_assert_not_reached();
10542 /* FP ops called directly, otherwise call now */
10544 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
10547 tcg_temp_free_i32(tcg_op1
);
10548 tcg_temp_free_i32(tcg_op2
);
10551 for (pass
= 0; pass
< maxpass
; pass
++) {
10552 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
10553 tcg_temp_free_i32(tcg_res
[pass
]);
10555 clear_vec_high(s
, is_q
, rd
);
10559 tcg_temp_free_ptr(fpst
);
10563 /* Floating point op subgroup of C3.6.16. */
10564 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
10566 /* For floating point ops, the U, size[1] and opcode bits
10567 * together indicate the operation. size[0] indicates single
10570 int fpopcode
= extract32(insn
, 11, 5)
10571 | (extract32(insn
, 23, 1) << 5)
10572 | (extract32(insn
, 29, 1) << 6);
10573 int is_q
= extract32(insn
, 30, 1);
10574 int size
= extract32(insn
, 22, 1);
10575 int rm
= extract32(insn
, 16, 5);
10576 int rn
= extract32(insn
, 5, 5);
10577 int rd
= extract32(insn
, 0, 5);
10579 int datasize
= is_q
? 128 : 64;
10580 int esize
= 32 << size
;
10581 int elements
= datasize
/ esize
;
10583 if (size
== 1 && !is_q
) {
10584 unallocated_encoding(s
);
10588 switch (fpopcode
) {
10589 case 0x58: /* FMAXNMP */
10590 case 0x5a: /* FADDP */
10591 case 0x5e: /* FMAXP */
10592 case 0x78: /* FMINNMP */
10593 case 0x7e: /* FMINP */
10594 if (size
&& !is_q
) {
10595 unallocated_encoding(s
);
10598 handle_simd_3same_pair(s
, is_q
, 0, fpopcode
, size
? MO_64
: MO_32
,
10601 case 0x1b: /* FMULX */
10602 case 0x1f: /* FRECPS */
10603 case 0x3f: /* FRSQRTS */
10604 case 0x5d: /* FACGE */
10605 case 0x7d: /* FACGT */
10606 case 0x19: /* FMLA */
10607 case 0x39: /* FMLS */
10608 case 0x18: /* FMAXNM */
10609 case 0x1a: /* FADD */
10610 case 0x1c: /* FCMEQ */
10611 case 0x1e: /* FMAX */
10612 case 0x38: /* FMINNM */
10613 case 0x3a: /* FSUB */
10614 case 0x3e: /* FMIN */
10615 case 0x5b: /* FMUL */
10616 case 0x5c: /* FCMGE */
10617 case 0x5f: /* FDIV */
10618 case 0x7a: /* FABD */
10619 case 0x7c: /* FCMGT */
10620 if (!fp_access_check(s
)) {
10624 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
10627 unallocated_encoding(s
);
10632 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10634 gen_helper_neon_mul_u8(a
, a
, b
);
10635 gen_helper_neon_add_u8(d
, d
, a
);
10638 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10640 gen_helper_neon_mul_u16(a
, a
, b
);
10641 gen_helper_neon_add_u16(d
, d
, a
);
10644 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10646 tcg_gen_mul_i32(a
, a
, b
);
10647 tcg_gen_add_i32(d
, d
, a
);
10650 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
10652 tcg_gen_mul_i64(a
, a
, b
);
10653 tcg_gen_add_i64(d
, d
, a
);
10656 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
10658 tcg_gen_mul_vec(vece
, a
, a
, b
);
10659 tcg_gen_add_vec(vece
, d
, d
, a
);
10662 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10664 gen_helper_neon_mul_u8(a
, a
, b
);
10665 gen_helper_neon_sub_u8(d
, d
, a
);
10668 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10670 gen_helper_neon_mul_u16(a
, a
, b
);
10671 gen_helper_neon_sub_u16(d
, d
, a
);
10674 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
10676 tcg_gen_mul_i32(a
, a
, b
);
10677 tcg_gen_sub_i32(d
, d
, a
);
10680 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
10682 tcg_gen_mul_i64(a
, a
, b
);
10683 tcg_gen_sub_i64(d
, d
, a
);
10686 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
10688 tcg_gen_mul_vec(vece
, a
, a
, b
);
10689 tcg_gen_sub_vec(vece
, d
, d
, a
);
10692 /* Integer op subgroup of C3.6.16. */
10693 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
10695 static const GVecGen3 cmtst_op
[4] = {
10696 { .fni4
= gen_helper_neon_tst_u8
,
10697 .fniv
= gen_cmtst_vec
,
10699 { .fni4
= gen_helper_neon_tst_u16
,
10700 .fniv
= gen_cmtst_vec
,
10702 { .fni4
= gen_cmtst_i32
,
10703 .fniv
= gen_cmtst_vec
,
10705 { .fni8
= gen_cmtst_i64
,
10706 .fniv
= gen_cmtst_vec
,
10707 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10710 static const GVecGen3 mla_op
[4] = {
10711 { .fni4
= gen_mla8_i32
,
10712 .fniv
= gen_mla_vec
,
10713 .opc
= INDEX_op_mul_vec
,
10716 { .fni4
= gen_mla16_i32
,
10717 .fniv
= gen_mla_vec
,
10718 .opc
= INDEX_op_mul_vec
,
10721 { .fni4
= gen_mla32_i32
,
10722 .fniv
= gen_mla_vec
,
10723 .opc
= INDEX_op_mul_vec
,
10726 { .fni8
= gen_mla64_i64
,
10727 .fniv
= gen_mla_vec
,
10728 .opc
= INDEX_op_mul_vec
,
10729 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10733 static const GVecGen3 mls_op
[4] = {
10734 { .fni4
= gen_mls8_i32
,
10735 .fniv
= gen_mls_vec
,
10736 .opc
= INDEX_op_mul_vec
,
10739 { .fni4
= gen_mls16_i32
,
10740 .fniv
= gen_mls_vec
,
10741 .opc
= INDEX_op_mul_vec
,
10744 { .fni4
= gen_mls32_i32
,
10745 .fniv
= gen_mls_vec
,
10746 .opc
= INDEX_op_mul_vec
,
10749 { .fni8
= gen_mls64_i64
,
10750 .fniv
= gen_mls_vec
,
10751 .opc
= INDEX_op_mul_vec
,
10752 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
10757 int is_q
= extract32(insn
, 30, 1);
10758 int u
= extract32(insn
, 29, 1);
10759 int size
= extract32(insn
, 22, 2);
10760 int opcode
= extract32(insn
, 11, 5);
10761 int rm
= extract32(insn
, 16, 5);
10762 int rn
= extract32(insn
, 5, 5);
10763 int rd
= extract32(insn
, 0, 5);
10768 case 0x13: /* MUL, PMUL */
10769 if (u
&& size
!= 0) {
10770 unallocated_encoding(s
);
10774 case 0x0: /* SHADD, UHADD */
10775 case 0x2: /* SRHADD, URHADD */
10776 case 0x4: /* SHSUB, UHSUB */
10777 case 0xc: /* SMAX, UMAX */
10778 case 0xd: /* SMIN, UMIN */
10779 case 0xe: /* SABD, UABD */
10780 case 0xf: /* SABA, UABA */
10781 case 0x12: /* MLA, MLS */
10783 unallocated_encoding(s
);
10787 case 0x16: /* SQDMULH, SQRDMULH */
10788 if (size
== 0 || size
== 3) {
10789 unallocated_encoding(s
);
10794 if (size
== 3 && !is_q
) {
10795 unallocated_encoding(s
);
10801 if (!fp_access_check(s
)) {
10806 case 0x10: /* ADD, SUB */
10808 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_sub
, size
);
10810 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_add
, size
);
10813 case 0x13: /* MUL, PMUL */
10814 if (!u
) { /* MUL */
10815 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_mul
, size
);
10819 case 0x12: /* MLA, MLS */
10821 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &mls_op
[size
]);
10823 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &mla_op
[size
]);
10827 if (!u
) { /* CMTST */
10828 gen_gvec_op3(s
, is_q
, rd
, rn
, rm
, &cmtst_op
[size
]);
10832 cond
= TCG_COND_EQ
;
10834 case 0x06: /* CMGT, CMHI */
10835 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
10837 case 0x07: /* CMGE, CMHS */
10838 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
10840 tcg_gen_gvec_cmp(cond
, size
, vec_full_reg_offset(s
, rd
),
10841 vec_full_reg_offset(s
, rn
),
10842 vec_full_reg_offset(s
, rm
),
10843 is_q
? 16 : 8, vec_full_reg_size(s
));
10849 for (pass
= 0; pass
< 2; pass
++) {
10850 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10851 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10852 TCGv_i64 tcg_res
= tcg_temp_new_i64();
10854 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10855 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10857 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
10859 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
10861 tcg_temp_free_i64(tcg_res
);
10862 tcg_temp_free_i64(tcg_op1
);
10863 tcg_temp_free_i64(tcg_op2
);
10866 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
10867 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10868 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10869 TCGv_i32 tcg_res
= tcg_temp_new_i32();
10870 NeonGenTwoOpFn
*genfn
= NULL
;
10871 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
10873 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
10874 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
10877 case 0x0: /* SHADD, UHADD */
10879 static NeonGenTwoOpFn
* const fns
[3][2] = {
10880 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
10881 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
10882 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
10884 genfn
= fns
[size
][u
];
10887 case 0x1: /* SQADD, UQADD */
10889 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10890 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
10891 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
10892 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
10894 genenvfn
= fns
[size
][u
];
10897 case 0x2: /* SRHADD, URHADD */
10899 static NeonGenTwoOpFn
* const fns
[3][2] = {
10900 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
10901 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
10902 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
10904 genfn
= fns
[size
][u
];
10907 case 0x4: /* SHSUB, UHSUB */
10909 static NeonGenTwoOpFn
* const fns
[3][2] = {
10910 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
10911 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
10912 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
10914 genfn
= fns
[size
][u
];
10917 case 0x5: /* SQSUB, UQSUB */
10919 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10920 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
10921 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
10922 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
10924 genenvfn
= fns
[size
][u
];
10927 case 0x8: /* SSHL, USHL */
10929 static NeonGenTwoOpFn
* const fns
[3][2] = {
10930 { gen_helper_neon_shl_s8
, gen_helper_neon_shl_u8
},
10931 { gen_helper_neon_shl_s16
, gen_helper_neon_shl_u16
},
10932 { gen_helper_neon_shl_s32
, gen_helper_neon_shl_u32
},
10934 genfn
= fns
[size
][u
];
10937 case 0x9: /* SQSHL, UQSHL */
10939 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10940 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
10941 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
10942 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
10944 genenvfn
= fns
[size
][u
];
10947 case 0xa: /* SRSHL, URSHL */
10949 static NeonGenTwoOpFn
* const fns
[3][2] = {
10950 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
10951 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
10952 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
10954 genfn
= fns
[size
][u
];
10957 case 0xb: /* SQRSHL, UQRSHL */
10959 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
10960 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
10961 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
10962 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
10964 genenvfn
= fns
[size
][u
];
10967 case 0xc: /* SMAX, UMAX */
10969 static NeonGenTwoOpFn
* const fns
[3][2] = {
10970 { gen_helper_neon_max_s8
, gen_helper_neon_max_u8
},
10971 { gen_helper_neon_max_s16
, gen_helper_neon_max_u16
},
10972 { tcg_gen_smax_i32
, tcg_gen_umax_i32
},
10974 genfn
= fns
[size
][u
];
10978 case 0xd: /* SMIN, UMIN */
10980 static NeonGenTwoOpFn
* const fns
[3][2] = {
10981 { gen_helper_neon_min_s8
, gen_helper_neon_min_u8
},
10982 { gen_helper_neon_min_s16
, gen_helper_neon_min_u16
},
10983 { tcg_gen_smin_i32
, tcg_gen_umin_i32
},
10985 genfn
= fns
[size
][u
];
10988 case 0xe: /* SABD, UABD */
10989 case 0xf: /* SABA, UABA */
10991 static NeonGenTwoOpFn
* const fns
[3][2] = {
10992 { gen_helper_neon_abd_s8
, gen_helper_neon_abd_u8
},
10993 { gen_helper_neon_abd_s16
, gen_helper_neon_abd_u16
},
10994 { gen_helper_neon_abd_s32
, gen_helper_neon_abd_u32
},
10996 genfn
= fns
[size
][u
];
10999 case 0x13: /* MUL, PMUL */
11000 assert(u
); /* PMUL */
11002 genfn
= gen_helper_neon_mul_p8
;
11004 case 0x16: /* SQDMULH, SQRDMULH */
11006 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
11007 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
11008 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
11010 assert(size
== 1 || size
== 2);
11011 genenvfn
= fns
[size
- 1][u
];
11015 g_assert_not_reached();
11019 genenvfn(tcg_res
, cpu_env
, tcg_op1
, tcg_op2
);
11021 genfn(tcg_res
, tcg_op1
, tcg_op2
);
11024 if (opcode
== 0xf) {
11025 /* SABA, UABA: accumulating ops */
11026 static NeonGenTwoOpFn
* const fns
[3] = {
11027 gen_helper_neon_add_u8
,
11028 gen_helper_neon_add_u16
,
11032 read_vec_element_i32(s
, tcg_op1
, rd
, pass
, MO_32
);
11033 fns
[size
](tcg_res
, tcg_op1
, tcg_res
);
11036 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11038 tcg_temp_free_i32(tcg_res
);
11039 tcg_temp_free_i32(tcg_op1
);
11040 tcg_temp_free_i32(tcg_op2
);
11043 clear_vec_high(s
, is_q
, rd
);
11046 /* AdvSIMD three same
11047 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11048 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11049 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11050 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11052 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
11054 int opcode
= extract32(insn
, 11, 5);
11057 case 0x3: /* logic ops */
11058 disas_simd_3same_logic(s
, insn
);
11060 case 0x17: /* ADDP */
11061 case 0x14: /* SMAXP, UMAXP */
11062 case 0x15: /* SMINP, UMINP */
11064 /* Pairwise operations */
11065 int is_q
= extract32(insn
, 30, 1);
11066 int u
= extract32(insn
, 29, 1);
11067 int size
= extract32(insn
, 22, 2);
11068 int rm
= extract32(insn
, 16, 5);
11069 int rn
= extract32(insn
, 5, 5);
11070 int rd
= extract32(insn
, 0, 5);
11071 if (opcode
== 0x17) {
11072 if (u
|| (size
== 3 && !is_q
)) {
11073 unallocated_encoding(s
);
11078 unallocated_encoding(s
);
11082 handle_simd_3same_pair(s
, is_q
, u
, opcode
, size
, rn
, rm
, rd
);
11085 case 0x18 ... 0x31:
11086 /* floating point ops, sz[1] and U are part of opcode */
11087 disas_simd_3same_float(s
, insn
);
11090 disas_simd_3same_int(s
, insn
);
11096 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11098 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11099 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11100 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11101 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11103 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11104 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11107 static void disas_simd_three_reg_same_fp16(DisasContext
*s
, uint32_t insn
)
11109 int opcode
, fpopcode
;
11110 int is_q
, u
, a
, rm
, rn
, rd
;
11111 int datasize
, elements
;
11114 bool pairwise
= false;
11116 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
11117 unallocated_encoding(s
);
11121 if (!fp_access_check(s
)) {
11125 /* For these floating point ops, the U, a and opcode bits
11126 * together indicate the operation.
11128 opcode
= extract32(insn
, 11, 3);
11129 u
= extract32(insn
, 29, 1);
11130 a
= extract32(insn
, 23, 1);
11131 is_q
= extract32(insn
, 30, 1);
11132 rm
= extract32(insn
, 16, 5);
11133 rn
= extract32(insn
, 5, 5);
11134 rd
= extract32(insn
, 0, 5);
11136 fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
11137 datasize
= is_q
? 128 : 64;
11138 elements
= datasize
/ 16;
11140 switch (fpopcode
) {
11141 case 0x10: /* FMAXNMP */
11142 case 0x12: /* FADDP */
11143 case 0x16: /* FMAXP */
11144 case 0x18: /* FMINNMP */
11145 case 0x1e: /* FMINP */
11150 fpst
= get_fpstatus_ptr(true);
11153 int maxpass
= is_q
? 8 : 4;
11154 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11155 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11156 TCGv_i32 tcg_res
[8];
11158 for (pass
= 0; pass
< maxpass
; pass
++) {
11159 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
11160 int passelt
= (pass
<< 1) & (maxpass
- 1);
11162 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_16
);
11163 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_16
);
11164 tcg_res
[pass
] = tcg_temp_new_i32();
11166 switch (fpopcode
) {
11167 case 0x10: /* FMAXNMP */
11168 gen_helper_advsimd_maxnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11171 case 0x12: /* FADDP */
11172 gen_helper_advsimd_addh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11174 case 0x16: /* FMAXP */
11175 gen_helper_advsimd_maxh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11177 case 0x18: /* FMINNMP */
11178 gen_helper_advsimd_minnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11181 case 0x1e: /* FMINP */
11182 gen_helper_advsimd_minh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11185 g_assert_not_reached();
11189 for (pass
= 0; pass
< maxpass
; pass
++) {
11190 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_16
);
11191 tcg_temp_free_i32(tcg_res
[pass
]);
11194 tcg_temp_free_i32(tcg_op1
);
11195 tcg_temp_free_i32(tcg_op2
);
11198 for (pass
= 0; pass
< elements
; pass
++) {
11199 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11200 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11201 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11203 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_16
);
11204 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_16
);
11206 switch (fpopcode
) {
11207 case 0x0: /* FMAXNM */
11208 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11210 case 0x1: /* FMLA */
11211 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11212 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11215 case 0x2: /* FADD */
11216 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11218 case 0x3: /* FMULX */
11219 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11221 case 0x4: /* FCMEQ */
11222 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11224 case 0x6: /* FMAX */
11225 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11227 case 0x7: /* FRECPS */
11228 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11230 case 0x8: /* FMINNM */
11231 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11233 case 0x9: /* FMLS */
11234 /* As usual for ARM, separate negation for fused multiply-add */
11235 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
11236 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11237 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11240 case 0xa: /* FSUB */
11241 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11243 case 0xe: /* FMIN */
11244 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11246 case 0xf: /* FRSQRTS */
11247 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11249 case 0x13: /* FMUL */
11250 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11252 case 0x14: /* FCMGE */
11253 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11255 case 0x15: /* FACGE */
11256 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11258 case 0x17: /* FDIV */
11259 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11261 case 0x1a: /* FABD */
11262 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11263 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
11265 case 0x1c: /* FCMGT */
11266 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11268 case 0x1d: /* FACGT */
11269 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11272 fprintf(stderr
, "%s: insn %#04x, fpop %#2x @ %#" PRIx64
"\n",
11273 __func__
, insn
, fpopcode
, s
->pc
);
11274 g_assert_not_reached();
11277 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11278 tcg_temp_free_i32(tcg_res
);
11279 tcg_temp_free_i32(tcg_op1
);
11280 tcg_temp_free_i32(tcg_op2
);
11284 tcg_temp_free_ptr(fpst
);
11286 clear_vec_high(s
, is_q
, rd
);
11289 /* AdvSIMD three same extra
11290 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11291 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11292 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11293 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11295 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
11297 int rd
= extract32(insn
, 0, 5);
11298 int rn
= extract32(insn
, 5, 5);
11299 int opcode
= extract32(insn
, 11, 4);
11300 int rm
= extract32(insn
, 16, 5);
11301 int size
= extract32(insn
, 22, 2);
11302 bool u
= extract32(insn
, 29, 1);
11303 bool is_q
= extract32(insn
, 30, 1);
11306 switch (u
* 16 + opcode
) {
11307 case 0x10: /* SQRDMLAH (vector) */
11308 case 0x11: /* SQRDMLSH (vector) */
11309 if (size
!= 1 && size
!= 2) {
11310 unallocated_encoding(s
);
11313 feature
= ARM_FEATURE_V8_RDM
;
11315 case 0x8: /* FCMLA, #0 */
11316 case 0x9: /* FCMLA, #90 */
11317 case 0xa: /* FCMLA, #180 */
11318 case 0xb: /* FCMLA, #270 */
11319 case 0xc: /* FCADD, #90 */
11320 case 0xe: /* FCADD, #270 */
11322 || (size
== 1 && !arm_dc_feature(s
, ARM_FEATURE_V8_FP16
))
11323 || (size
== 3 && !is_q
)) {
11324 unallocated_encoding(s
);
11327 feature
= ARM_FEATURE_V8_FCMA
;
11330 unallocated_encoding(s
);
11333 if (!arm_dc_feature(s
, feature
)) {
11334 unallocated_encoding(s
);
11337 if (!fp_access_check(s
)) {
11342 case 0x0: /* SQRDMLAH (vector) */
11345 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlah_s16
);
11348 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlah_s32
);
11351 g_assert_not_reached();
11355 case 0x1: /* SQRDMLSH (vector) */
11358 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlsh_s16
);
11361 gen_gvec_op3_env(s
, is_q
, rd
, rn
, rm
, gen_helper_gvec_qrdmlsh_s32
);
11364 g_assert_not_reached();
11368 case 0x8: /* FCMLA, #0 */
11369 case 0x9: /* FCMLA, #90 */
11370 case 0xa: /* FCMLA, #180 */
11371 case 0xb: /* FCMLA, #270 */
11372 rot
= extract32(opcode
, 0, 2);
11375 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, true, rot
,
11376 gen_helper_gvec_fcmlah
);
11379 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, false, rot
,
11380 gen_helper_gvec_fcmlas
);
11383 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, false, rot
,
11384 gen_helper_gvec_fcmlad
);
11387 g_assert_not_reached();
11391 case 0xc: /* FCADD, #90 */
11392 case 0xe: /* FCADD, #270 */
11393 rot
= extract32(opcode
, 1, 1);
11396 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11397 gen_helper_gvec_fcaddh
);
11400 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11401 gen_helper_gvec_fcadds
);
11404 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11405 gen_helper_gvec_fcaddd
);
11408 g_assert_not_reached();
11413 g_assert_not_reached();
11417 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
11418 int size
, int rn
, int rd
)
11420 /* Handle 2-reg-misc ops which are widening (so each size element
11421 * in the source becomes a 2*size element in the destination.
11422 * The only instruction like this is FCVTL.
11427 /* 32 -> 64 bit fp conversion */
11428 TCGv_i64 tcg_res
[2];
11429 int srcelt
= is_q
? 2 : 0;
11431 for (pass
= 0; pass
< 2; pass
++) {
11432 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11433 tcg_res
[pass
] = tcg_temp_new_i64();
11435 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
11436 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, cpu_env
);
11437 tcg_temp_free_i32(tcg_op
);
11439 for (pass
= 0; pass
< 2; pass
++) {
11440 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11441 tcg_temp_free_i64(tcg_res
[pass
]);
11444 /* 16 -> 32 bit fp conversion */
11445 int srcelt
= is_q
? 4 : 0;
11446 TCGv_i32 tcg_res
[4];
11448 for (pass
= 0; pass
< 4; pass
++) {
11449 tcg_res
[pass
] = tcg_temp_new_i32();
11451 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
11452 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
11455 for (pass
= 0; pass
< 4; pass
++) {
11456 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11457 tcg_temp_free_i32(tcg_res
[pass
]);
11462 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
11463 bool is_q
, int size
, int rn
, int rd
)
11465 int op
= (opcode
<< 1) | u
;
11466 int opsz
= op
+ size
;
11467 int grp_size
= 3 - opsz
;
11468 int dsize
= is_q
? 128 : 64;
11472 unallocated_encoding(s
);
11476 if (!fp_access_check(s
)) {
11481 /* Special case bytes, use bswap op on each group of elements */
11482 int groups
= dsize
/ (8 << grp_size
);
11484 for (i
= 0; i
< groups
; i
++) {
11485 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
11487 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
11488 switch (grp_size
) {
11490 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
);
11493 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
);
11496 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
11499 g_assert_not_reached();
11501 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
11502 tcg_temp_free_i64(tcg_tmp
);
11504 clear_vec_high(s
, is_q
, rd
);
11506 int revmask
= (1 << grp_size
) - 1;
11507 int esize
= 8 << size
;
11508 int elements
= dsize
/ esize
;
11509 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
11510 TCGv_i64 tcg_rd
= tcg_const_i64(0);
11511 TCGv_i64 tcg_rd_hi
= tcg_const_i64(0);
11513 for (i
= 0; i
< elements
; i
++) {
11514 int e_rev
= (i
& 0xf) ^ revmask
;
11515 int off
= e_rev
* esize
;
11516 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
11518 tcg_gen_deposit_i64(tcg_rd_hi
, tcg_rd_hi
,
11519 tcg_rn
, off
- 64, esize
);
11521 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, off
, esize
);
11524 write_vec_element(s
, tcg_rd
, rd
, 0, MO_64
);
11525 write_vec_element(s
, tcg_rd_hi
, rd
, 1, MO_64
);
11527 tcg_temp_free_i64(tcg_rd_hi
);
11528 tcg_temp_free_i64(tcg_rd
);
11529 tcg_temp_free_i64(tcg_rn
);
11533 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
11534 bool is_q
, int size
, int rn
, int rd
)
11536 /* Implement the pairwise operations from 2-misc:
11537 * SADDLP, UADDLP, SADALP, UADALP.
11538 * These all add pairs of elements in the input to produce a
11539 * double-width result element in the output (possibly accumulating).
11541 bool accum
= (opcode
== 0x6);
11542 int maxpass
= is_q
? 2 : 1;
11544 TCGv_i64 tcg_res
[2];
11547 /* 32 + 32 -> 64 op */
11548 TCGMemOp memop
= size
+ (u
? 0 : MO_SIGN
);
11550 for (pass
= 0; pass
< maxpass
; pass
++) {
11551 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11552 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11554 tcg_res
[pass
] = tcg_temp_new_i64();
11556 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
11557 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
11558 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11560 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
11561 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
11564 tcg_temp_free_i64(tcg_op1
);
11565 tcg_temp_free_i64(tcg_op2
);
11568 for (pass
= 0; pass
< maxpass
; pass
++) {
11569 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11570 NeonGenOneOpFn
*genfn
;
11571 static NeonGenOneOpFn
* const fns
[2][2] = {
11572 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
11573 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
11576 genfn
= fns
[size
][u
];
11578 tcg_res
[pass
] = tcg_temp_new_i64();
11580 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11581 genfn(tcg_res
[pass
], tcg_op
);
11584 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
11586 gen_helper_neon_addl_u16(tcg_res
[pass
],
11587 tcg_res
[pass
], tcg_op
);
11589 gen_helper_neon_addl_u32(tcg_res
[pass
],
11590 tcg_res
[pass
], tcg_op
);
11593 tcg_temp_free_i64(tcg_op
);
11597 tcg_res
[1] = tcg_const_i64(0);
11599 for (pass
= 0; pass
< 2; pass
++) {
11600 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11601 tcg_temp_free_i64(tcg_res
[pass
]);
11605 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
11607 /* Implement SHLL and SHLL2 */
11609 int part
= is_q
? 2 : 0;
11610 TCGv_i64 tcg_res
[2];
11612 for (pass
= 0; pass
< 2; pass
++) {
11613 static NeonGenWidenFn
* const widenfns
[3] = {
11614 gen_helper_neon_widen_u8
,
11615 gen_helper_neon_widen_u16
,
11616 tcg_gen_extu_i32_i64
,
11618 NeonGenWidenFn
*widenfn
= widenfns
[size
];
11619 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11621 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
11622 tcg_res
[pass
] = tcg_temp_new_i64();
11623 widenfn(tcg_res
[pass
], tcg_op
);
11624 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
11626 tcg_temp_free_i32(tcg_op
);
11629 for (pass
= 0; pass
< 2; pass
++) {
11630 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11631 tcg_temp_free_i64(tcg_res
[pass
]);
11635 /* AdvSIMD two reg misc
11636 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11637 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11638 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11639 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11641 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
11643 int size
= extract32(insn
, 22, 2);
11644 int opcode
= extract32(insn
, 12, 5);
11645 bool u
= extract32(insn
, 29, 1);
11646 bool is_q
= extract32(insn
, 30, 1);
11647 int rn
= extract32(insn
, 5, 5);
11648 int rd
= extract32(insn
, 0, 5);
11649 bool need_fpstatus
= false;
11650 bool need_rmode
= false;
11652 TCGv_i32 tcg_rmode
;
11653 TCGv_ptr tcg_fpstatus
;
11656 case 0x0: /* REV64, REV32 */
11657 case 0x1: /* REV16 */
11658 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11660 case 0x5: /* CNT, NOT, RBIT */
11661 if (u
&& size
== 0) {
11664 } else if (u
&& size
== 1) {
11667 } else if (!u
&& size
== 0) {
11671 unallocated_encoding(s
);
11673 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11674 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11676 unallocated_encoding(s
);
11679 if (!fp_access_check(s
)) {
11683 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
11685 case 0x4: /* CLS, CLZ */
11687 unallocated_encoding(s
);
11691 case 0x2: /* SADDLP, UADDLP */
11692 case 0x6: /* SADALP, UADALP */
11694 unallocated_encoding(s
);
11697 if (!fp_access_check(s
)) {
11700 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11702 case 0x13: /* SHLL, SHLL2 */
11703 if (u
== 0 || size
== 3) {
11704 unallocated_encoding(s
);
11707 if (!fp_access_check(s
)) {
11710 handle_shll(s
, is_q
, size
, rn
, rd
);
11712 case 0xa: /* CMLT */
11714 unallocated_encoding(s
);
11718 case 0x8: /* CMGT, CMGE */
11719 case 0x9: /* CMEQ, CMLE */
11720 case 0xb: /* ABS, NEG */
11721 if (size
== 3 && !is_q
) {
11722 unallocated_encoding(s
);
11726 case 0x3: /* SUQADD, USQADD */
11727 if (size
== 3 && !is_q
) {
11728 unallocated_encoding(s
);
11731 if (!fp_access_check(s
)) {
11734 handle_2misc_satacc(s
, false, u
, is_q
, size
, rn
, rd
);
11736 case 0x7: /* SQABS, SQNEG */
11737 if (size
== 3 && !is_q
) {
11738 unallocated_encoding(s
);
11743 case 0x16 ... 0x1d:
11746 /* Floating point: U, size[1] and opcode indicate operation;
11747 * size[0] indicates single or double precision.
11749 int is_double
= extract32(size
, 0, 1);
11750 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
11751 size
= is_double
? 3 : 2;
11753 case 0x2f: /* FABS */
11754 case 0x6f: /* FNEG */
11755 if (size
== 3 && !is_q
) {
11756 unallocated_encoding(s
);
11760 case 0x1d: /* SCVTF */
11761 case 0x5d: /* UCVTF */
11763 bool is_signed
= (opcode
== 0x1d) ? true : false;
11764 int elements
= is_double
? 2 : is_q
? 4 : 2;
11765 if (is_double
&& !is_q
) {
11766 unallocated_encoding(s
);
11769 if (!fp_access_check(s
)) {
11772 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
11775 case 0x2c: /* FCMGT (zero) */
11776 case 0x2d: /* FCMEQ (zero) */
11777 case 0x2e: /* FCMLT (zero) */
11778 case 0x6c: /* FCMGE (zero) */
11779 case 0x6d: /* FCMLE (zero) */
11780 if (size
== 3 && !is_q
) {
11781 unallocated_encoding(s
);
11784 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
11786 case 0x7f: /* FSQRT */
11787 if (size
== 3 && !is_q
) {
11788 unallocated_encoding(s
);
11792 case 0x1a: /* FCVTNS */
11793 case 0x1b: /* FCVTMS */
11794 case 0x3a: /* FCVTPS */
11795 case 0x3b: /* FCVTZS */
11796 case 0x5a: /* FCVTNU */
11797 case 0x5b: /* FCVTMU */
11798 case 0x7a: /* FCVTPU */
11799 case 0x7b: /* FCVTZU */
11800 need_fpstatus
= true;
11802 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
11803 if (size
== 3 && !is_q
) {
11804 unallocated_encoding(s
);
11808 case 0x5c: /* FCVTAU */
11809 case 0x1c: /* FCVTAS */
11810 need_fpstatus
= true;
11812 rmode
= FPROUNDING_TIEAWAY
;
11813 if (size
== 3 && !is_q
) {
11814 unallocated_encoding(s
);
11818 case 0x3c: /* URECPE */
11820 unallocated_encoding(s
);
11824 case 0x3d: /* FRECPE */
11825 case 0x7d: /* FRSQRTE */
11826 if (size
== 3 && !is_q
) {
11827 unallocated_encoding(s
);
11830 if (!fp_access_check(s
)) {
11833 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
11835 case 0x56: /* FCVTXN, FCVTXN2 */
11837 unallocated_encoding(s
);
11841 case 0x16: /* FCVTN, FCVTN2 */
11842 /* handle_2misc_narrow does a 2*size -> size operation, but these
11843 * instructions encode the source size rather than dest size.
11845 if (!fp_access_check(s
)) {
11848 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
11850 case 0x17: /* FCVTL, FCVTL2 */
11851 if (!fp_access_check(s
)) {
11854 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
11856 case 0x18: /* FRINTN */
11857 case 0x19: /* FRINTM */
11858 case 0x38: /* FRINTP */
11859 case 0x39: /* FRINTZ */
11861 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
11863 case 0x59: /* FRINTX */
11864 case 0x79: /* FRINTI */
11865 need_fpstatus
= true;
11866 if (size
== 3 && !is_q
) {
11867 unallocated_encoding(s
);
11871 case 0x58: /* FRINTA */
11873 rmode
= FPROUNDING_TIEAWAY
;
11874 need_fpstatus
= true;
11875 if (size
== 3 && !is_q
) {
11876 unallocated_encoding(s
);
11880 case 0x7c: /* URSQRTE */
11882 unallocated_encoding(s
);
11885 need_fpstatus
= true;
11888 unallocated_encoding(s
);
11894 unallocated_encoding(s
);
11898 if (!fp_access_check(s
)) {
11902 if (need_fpstatus
|| need_rmode
) {
11903 tcg_fpstatus
= get_fpstatus_ptr(false);
11905 tcg_fpstatus
= NULL
;
11908 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
11909 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
11916 if (u
&& size
== 0) { /* NOT */
11917 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
11923 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
11930 /* All 64-bit element operations can be shared with scalar 2misc */
11933 /* Coverity claims (size == 3 && !is_q) has been eliminated
11934 * from all paths leading to here.
11936 tcg_debug_assert(is_q
);
11937 for (pass
= 0; pass
< 2; pass
++) {
11938 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11939 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11941 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11943 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
11944 tcg_rmode
, tcg_fpstatus
);
11946 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11948 tcg_temp_free_i64(tcg_res
);
11949 tcg_temp_free_i64(tcg_op
);
11954 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11955 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11956 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11959 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
11962 /* Special cases for 32 bit elements */
11964 case 0xa: /* CMLT */
11965 /* 32 bit integer comparison against zero, result is
11966 * test ? (2^32 - 1) : 0. We implement via setcond(test)
11969 cond
= TCG_COND_LT
;
11971 tcg_gen_setcondi_i32(cond
, tcg_res
, tcg_op
, 0);
11972 tcg_gen_neg_i32(tcg_res
, tcg_res
);
11974 case 0x8: /* CMGT, CMGE */
11975 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
11977 case 0x9: /* CMEQ, CMLE */
11978 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
11980 case 0x4: /* CLS */
11982 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
11984 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
11987 case 0x7: /* SQABS, SQNEG */
11989 gen_helper_neon_qneg_s32(tcg_res
, cpu_env
, tcg_op
);
11991 gen_helper_neon_qabs_s32(tcg_res
, cpu_env
, tcg_op
);
11994 case 0xb: /* ABS, NEG */
11996 tcg_gen_neg_i32(tcg_res
, tcg_op
);
11998 TCGv_i32 tcg_zero
= tcg_const_i32(0);
11999 tcg_gen_neg_i32(tcg_res
, tcg_op
);
12000 tcg_gen_movcond_i32(TCG_COND_GT
, tcg_res
, tcg_op
,
12001 tcg_zero
, tcg_op
, tcg_res
);
12002 tcg_temp_free_i32(tcg_zero
);
12005 case 0x2f: /* FABS */
12006 gen_helper_vfp_abss(tcg_res
, tcg_op
);
12008 case 0x6f: /* FNEG */
12009 gen_helper_vfp_negs(tcg_res
, tcg_op
);
12011 case 0x7f: /* FSQRT */
12012 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
12014 case 0x1a: /* FCVTNS */
12015 case 0x1b: /* FCVTMS */
12016 case 0x1c: /* FCVTAS */
12017 case 0x3a: /* FCVTPS */
12018 case 0x3b: /* FCVTZS */
12020 TCGv_i32 tcg_shift
= tcg_const_i32(0);
12021 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
12022 tcg_shift
, tcg_fpstatus
);
12023 tcg_temp_free_i32(tcg_shift
);
12026 case 0x5a: /* FCVTNU */
12027 case 0x5b: /* FCVTMU */
12028 case 0x5c: /* FCVTAU */
12029 case 0x7a: /* FCVTPU */
12030 case 0x7b: /* FCVTZU */
12032 TCGv_i32 tcg_shift
= tcg_const_i32(0);
12033 gen_helper_vfp_touls(tcg_res
, tcg_op
,
12034 tcg_shift
, tcg_fpstatus
);
12035 tcg_temp_free_i32(tcg_shift
);
12038 case 0x18: /* FRINTN */
12039 case 0x19: /* FRINTM */
12040 case 0x38: /* FRINTP */
12041 case 0x39: /* FRINTZ */
12042 case 0x58: /* FRINTA */
12043 case 0x79: /* FRINTI */
12044 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
12046 case 0x59: /* FRINTX */
12047 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12049 case 0x7c: /* URSQRTE */
12050 gen_helper_rsqrte_u32(tcg_res
, tcg_op
, tcg_fpstatus
);
12053 g_assert_not_reached();
12056 /* Use helpers for 8 and 16 bit elements */
12058 case 0x5: /* CNT, RBIT */
12059 /* For these two insns size is part of the opcode specifier
12060 * (handled earlier); they always operate on byte elements.
12063 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
12065 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
12068 case 0x7: /* SQABS, SQNEG */
12070 NeonGenOneOpEnvFn
*genfn
;
12071 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
12072 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
12073 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
12075 genfn
= fns
[size
][u
];
12076 genfn(tcg_res
, cpu_env
, tcg_op
);
12079 case 0x8: /* CMGT, CMGE */
12080 case 0x9: /* CMEQ, CMLE */
12081 case 0xa: /* CMLT */
12083 static NeonGenTwoOpFn
* const fns
[3][2] = {
12084 { gen_helper_neon_cgt_s8
, gen_helper_neon_cgt_s16
},
12085 { gen_helper_neon_cge_s8
, gen_helper_neon_cge_s16
},
12086 { gen_helper_neon_ceq_u8
, gen_helper_neon_ceq_u16
},
12088 NeonGenTwoOpFn
*genfn
;
12091 TCGv_i32 tcg_zero
= tcg_const_i32(0);
12093 /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
12094 comp
= (opcode
- 0x8) * 2 + u
;
12095 /* ...but LE, LT are implemented as reverse GE, GT */
12096 reverse
= (comp
> 2);
12100 genfn
= fns
[comp
][size
];
12102 genfn(tcg_res
, tcg_zero
, tcg_op
);
12104 genfn(tcg_res
, tcg_op
, tcg_zero
);
12106 tcg_temp_free_i32(tcg_zero
);
12109 case 0xb: /* ABS, NEG */
12111 TCGv_i32 tcg_zero
= tcg_const_i32(0);
12113 gen_helper_neon_sub_u16(tcg_res
, tcg_zero
, tcg_op
);
12115 gen_helper_neon_sub_u8(tcg_res
, tcg_zero
, tcg_op
);
12117 tcg_temp_free_i32(tcg_zero
);
12120 gen_helper_neon_abs_s16(tcg_res
, tcg_op
);
12122 gen_helper_neon_abs_s8(tcg_res
, tcg_op
);
12126 case 0x4: /* CLS, CLZ */
12129 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
12131 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
12135 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
12137 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
12142 g_assert_not_reached();
12146 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12148 tcg_temp_free_i32(tcg_res
);
12149 tcg_temp_free_i32(tcg_op
);
12152 clear_vec_high(s
, is_q
, rd
);
12155 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
12156 tcg_temp_free_i32(tcg_rmode
);
12158 if (need_fpstatus
) {
12159 tcg_temp_free_ptr(tcg_fpstatus
);
12163 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12165 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12166 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12167 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12168 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12169 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12170 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12172 * This actually covers two groups where scalar access is governed by
12173 * bit 28. A bunch of the instructions (float to integral) only exist
12174 * in the vector form and are un-allocated for the scalar decode. Also
12175 * in the scalar decode Q is always 1.
12177 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
12179 int fpop
, opcode
, a
, u
;
12183 bool only_in_vector
= false;
12186 TCGv_i32 tcg_rmode
= NULL
;
12187 TCGv_ptr tcg_fpstatus
= NULL
;
12188 bool need_rmode
= false;
12189 bool need_fpst
= true;
12192 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
12193 unallocated_encoding(s
);
12197 rd
= extract32(insn
, 0, 5);
12198 rn
= extract32(insn
, 5, 5);
12200 a
= extract32(insn
, 23, 1);
12201 u
= extract32(insn
, 29, 1);
12202 is_scalar
= extract32(insn
, 28, 1);
12203 is_q
= extract32(insn
, 30, 1);
12205 opcode
= extract32(insn
, 12, 5);
12206 fpop
= deposit32(opcode
, 5, 1, a
);
12207 fpop
= deposit32(fpop
, 6, 1, u
);
12209 rd
= extract32(insn
, 0, 5);
12210 rn
= extract32(insn
, 5, 5);
12213 case 0x1d: /* SCVTF */
12214 case 0x5d: /* UCVTF */
12221 elements
= (is_q
? 8 : 4);
12224 if (!fp_access_check(s
)) {
12227 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
12231 case 0x2c: /* FCMGT (zero) */
12232 case 0x2d: /* FCMEQ (zero) */
12233 case 0x2e: /* FCMLT (zero) */
12234 case 0x6c: /* FCMGE (zero) */
12235 case 0x6d: /* FCMLE (zero) */
12236 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
12238 case 0x3d: /* FRECPE */
12239 case 0x3f: /* FRECPX */
12241 case 0x18: /* FRINTN */
12243 only_in_vector
= true;
12244 rmode
= FPROUNDING_TIEEVEN
;
12246 case 0x19: /* FRINTM */
12248 only_in_vector
= true;
12249 rmode
= FPROUNDING_NEGINF
;
12251 case 0x38: /* FRINTP */
12253 only_in_vector
= true;
12254 rmode
= FPROUNDING_POSINF
;
12256 case 0x39: /* FRINTZ */
12258 only_in_vector
= true;
12259 rmode
= FPROUNDING_ZERO
;
12261 case 0x58: /* FRINTA */
12263 only_in_vector
= true;
12264 rmode
= FPROUNDING_TIEAWAY
;
12266 case 0x59: /* FRINTX */
12267 case 0x79: /* FRINTI */
12268 only_in_vector
= true;
12269 /* current rounding mode */
12271 case 0x1a: /* FCVTNS */
12273 rmode
= FPROUNDING_TIEEVEN
;
12275 case 0x1b: /* FCVTMS */
12277 rmode
= FPROUNDING_NEGINF
;
12279 case 0x1c: /* FCVTAS */
12281 rmode
= FPROUNDING_TIEAWAY
;
12283 case 0x3a: /* FCVTPS */
12285 rmode
= FPROUNDING_POSINF
;
12287 case 0x3b: /* FCVTZS */
12289 rmode
= FPROUNDING_ZERO
;
12291 case 0x5a: /* FCVTNU */
12293 rmode
= FPROUNDING_TIEEVEN
;
12295 case 0x5b: /* FCVTMU */
12297 rmode
= FPROUNDING_NEGINF
;
12299 case 0x5c: /* FCVTAU */
12301 rmode
= FPROUNDING_TIEAWAY
;
12303 case 0x7a: /* FCVTPU */
12305 rmode
= FPROUNDING_POSINF
;
12307 case 0x7b: /* FCVTZU */
12309 rmode
= FPROUNDING_ZERO
;
12311 case 0x2f: /* FABS */
12312 case 0x6f: /* FNEG */
12315 case 0x7d: /* FRSQRTE */
12316 case 0x7f: /* FSQRT (vector) */
12319 fprintf(stderr
, "%s: insn %#04x fpop %#2x\n", __func__
, insn
, fpop
);
12320 g_assert_not_reached();
12324 /* Check additional constraints for the scalar encoding */
12327 unallocated_encoding(s
);
12330 /* FRINTxx is only in the vector form */
12331 if (only_in_vector
) {
12332 unallocated_encoding(s
);
12337 if (!fp_access_check(s
)) {
12341 if (need_rmode
|| need_fpst
) {
12342 tcg_fpstatus
= get_fpstatus_ptr(true);
12346 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
12347 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
12351 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
12352 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12355 case 0x1a: /* FCVTNS */
12356 case 0x1b: /* FCVTMS */
12357 case 0x1c: /* FCVTAS */
12358 case 0x3a: /* FCVTPS */
12359 case 0x3b: /* FCVTZS */
12360 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12362 case 0x3d: /* FRECPE */
12363 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12365 case 0x3f: /* FRECPX */
12366 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12368 case 0x5a: /* FCVTNU */
12369 case 0x5b: /* FCVTMU */
12370 case 0x5c: /* FCVTAU */
12371 case 0x7a: /* FCVTPU */
12372 case 0x7b: /* FCVTZU */
12373 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12375 case 0x6f: /* FNEG */
12376 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12378 case 0x7d: /* FRSQRTE */
12379 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12382 g_assert_not_reached();
12385 /* limit any sign extension going on */
12386 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
12387 write_fp_sreg(s
, rd
, tcg_res
);
12389 tcg_temp_free_i32(tcg_res
);
12390 tcg_temp_free_i32(tcg_op
);
12392 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
12393 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12394 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12396 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
12399 case 0x1a: /* FCVTNS */
12400 case 0x1b: /* FCVTMS */
12401 case 0x1c: /* FCVTAS */
12402 case 0x3a: /* FCVTPS */
12403 case 0x3b: /* FCVTZS */
12404 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12406 case 0x3d: /* FRECPE */
12407 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12409 case 0x5a: /* FCVTNU */
12410 case 0x5b: /* FCVTMU */
12411 case 0x5c: /* FCVTAU */
12412 case 0x7a: /* FCVTPU */
12413 case 0x7b: /* FCVTZU */
12414 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12416 case 0x18: /* FRINTN */
12417 case 0x19: /* FRINTM */
12418 case 0x38: /* FRINTP */
12419 case 0x39: /* FRINTZ */
12420 case 0x58: /* FRINTA */
12421 case 0x79: /* FRINTI */
12422 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
12424 case 0x59: /* FRINTX */
12425 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12427 case 0x2f: /* FABS */
12428 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
12430 case 0x6f: /* FNEG */
12431 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
12433 case 0x7d: /* FRSQRTE */
12434 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12436 case 0x7f: /* FSQRT */
12437 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
12440 g_assert_not_reached();
12443 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
12445 tcg_temp_free_i32(tcg_res
);
12446 tcg_temp_free_i32(tcg_op
);
12449 clear_vec_high(s
, is_q
, rd
);
12453 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
12454 tcg_temp_free_i32(tcg_rmode
);
12457 if (tcg_fpstatus
) {
12458 tcg_temp_free_ptr(tcg_fpstatus
);
12462 /* AdvSIMD scalar x indexed element
12463 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12464 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12465 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12466 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12467 * AdvSIMD vector x indexed element
12468 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12469 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12470 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12471 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12473 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
12475 /* This encoding has two kinds of instruction:
12476 * normal, where we perform elt x idxelt => elt for each
12477 * element in the vector
12478 * long, where we perform elt x idxelt and generate a result of
12479 * double the width of the input element
12480 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12482 bool is_scalar
= extract32(insn
, 28, 1);
12483 bool is_q
= extract32(insn
, 30, 1);
12484 bool u
= extract32(insn
, 29, 1);
12485 int size
= extract32(insn
, 22, 2);
12486 int l
= extract32(insn
, 21, 1);
12487 int m
= extract32(insn
, 20, 1);
12488 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12489 int rm
= extract32(insn
, 16, 4);
12490 int opcode
= extract32(insn
, 12, 4);
12491 int h
= extract32(insn
, 11, 1);
12492 int rn
= extract32(insn
, 5, 5);
12493 int rd
= extract32(insn
, 0, 5);
12494 bool is_long
= false;
12496 bool is_fp16
= false;
12500 switch (16 * u
+ opcode
) {
12501 case 0x08: /* MUL */
12502 case 0x10: /* MLA */
12503 case 0x14: /* MLS */
12505 unallocated_encoding(s
);
12509 case 0x02: /* SMLAL, SMLAL2 */
12510 case 0x12: /* UMLAL, UMLAL2 */
12511 case 0x06: /* SMLSL, SMLSL2 */
12512 case 0x16: /* UMLSL, UMLSL2 */
12513 case 0x0a: /* SMULL, SMULL2 */
12514 case 0x1a: /* UMULL, UMULL2 */
12516 unallocated_encoding(s
);
12521 case 0x03: /* SQDMLAL, SQDMLAL2 */
12522 case 0x07: /* SQDMLSL, SQDMLSL2 */
12523 case 0x0b: /* SQDMULL, SQDMULL2 */
12526 case 0x0c: /* SQDMULH */
12527 case 0x0d: /* SQRDMULH */
12529 case 0x01: /* FMLA */
12530 case 0x05: /* FMLS */
12531 case 0x09: /* FMUL */
12532 case 0x19: /* FMULX */
12535 case 0x1d: /* SQRDMLAH */
12536 case 0x1f: /* SQRDMLSH */
12537 if (!arm_dc_feature(s
, ARM_FEATURE_V8_RDM
)) {
12538 unallocated_encoding(s
);
12542 case 0x11: /* FCMLA #0 */
12543 case 0x13: /* FCMLA #90 */
12544 case 0x15: /* FCMLA #180 */
12545 case 0x17: /* FCMLA #270 */
12546 if (!arm_dc_feature(s
, ARM_FEATURE_V8_FCMA
)) {
12547 unallocated_encoding(s
);
12553 unallocated_encoding(s
);
12558 case 1: /* normal fp */
12559 /* convert insn encoded size to TCGMemOp size */
12561 case 0: /* half-precision */
12565 case MO_32
: /* single precision */
12566 case MO_64
: /* double precision */
12569 unallocated_encoding(s
);
12574 case 2: /* complex fp */
12575 /* Each indexable element is a complex pair. */
12580 unallocated_encoding(s
);
12588 unallocated_encoding(s
);
12593 default: /* integer */
12597 unallocated_encoding(s
);
12602 if (is_fp16
&& !arm_dc_feature(s
, ARM_FEATURE_V8_FP16
)) {
12603 unallocated_encoding(s
);
12607 /* Given TCGMemOp size, adjust register and indexing. */
12610 index
= h
<< 2 | l
<< 1 | m
;
12613 index
= h
<< 1 | l
;
12618 unallocated_encoding(s
);
12625 g_assert_not_reached();
12628 if (!fp_access_check(s
)) {
12633 fpst
= get_fpstatus_ptr(is_fp16
);
12638 switch (16 * u
+ opcode
) {
12639 case 0x11: /* FCMLA #0 */
12640 case 0x13: /* FCMLA #90 */
12641 case 0x15: /* FCMLA #180 */
12642 case 0x17: /* FCMLA #270 */
12643 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
12644 vec_full_reg_offset(s
, rn
),
12645 vec_reg_offset(s
, rm
, index
, size
), fpst
,
12646 is_q
? 16 : 8, vec_full_reg_size(s
),
12647 extract32(insn
, 13, 2), /* rot */
12649 ? gen_helper_gvec_fcmlas_idx
12650 : gen_helper_gvec_fcmlah_idx
);
12651 tcg_temp_free_ptr(fpst
);
12656 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
12659 assert(is_fp
&& is_q
&& !is_long
);
12661 read_vec_element(s
, tcg_idx
, rm
, index
, MO_64
);
12663 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12664 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12665 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12667 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12669 switch (16 * u
+ opcode
) {
12670 case 0x05: /* FMLS */
12671 /* As usual for ARM, separate negation for fused multiply-add */
12672 gen_helper_vfp_negd(tcg_op
, tcg_op
);
12674 case 0x01: /* FMLA */
12675 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12676 gen_helper_vfp_muladdd(tcg_res
, tcg_op
, tcg_idx
, tcg_res
, fpst
);
12678 case 0x09: /* FMUL */
12679 gen_helper_vfp_muld(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12681 case 0x19: /* FMULX */
12682 gen_helper_vfp_mulxd(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12685 g_assert_not_reached();
12688 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12689 tcg_temp_free_i64(tcg_op
);
12690 tcg_temp_free_i64(tcg_res
);
12693 tcg_temp_free_i64(tcg_idx
);
12694 clear_vec_high(s
, !is_scalar
, rd
);
12695 } else if (!is_long
) {
12696 /* 32 bit floating point, or 16 or 32 bit integer.
12697 * For the 16 bit scalar case we use the usual Neon helpers and
12698 * rely on the fact that 0 op 0 == 0 with no side effects.
12700 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
12701 int pass
, maxpasses
;
12706 maxpasses
= is_q
? 4 : 2;
12709 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
12711 if (size
== 1 && !is_scalar
) {
12712 /* The simplest way to handle the 16x16 indexed ops is to duplicate
12713 * the index into both halves of the 32 bit tcg_idx and then use
12714 * the usual Neon helpers.
12716 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
12719 for (pass
= 0; pass
< maxpasses
; pass
++) {
12720 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12721 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12723 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
12725 switch (16 * u
+ opcode
) {
12726 case 0x08: /* MUL */
12727 case 0x10: /* MLA */
12728 case 0x14: /* MLS */
12730 static NeonGenTwoOpFn
* const fns
[2][2] = {
12731 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
12732 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
12734 NeonGenTwoOpFn
*genfn
;
12735 bool is_sub
= opcode
== 0x4;
12738 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
12740 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
12742 if (opcode
== 0x8) {
12745 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
12746 genfn
= fns
[size
- 1][is_sub
];
12747 genfn(tcg_res
, tcg_op
, tcg_res
);
12750 case 0x05: /* FMLS */
12751 case 0x01: /* FMLA */
12752 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
12753 is_scalar
? size
: MO_32
);
12756 if (opcode
== 0x5) {
12757 /* As usual for ARM, separate negation for fused
12759 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80008000);
12762 gen_helper_advsimd_muladdh(tcg_res
, tcg_op
, tcg_idx
,
12765 gen_helper_advsimd_muladd2h(tcg_res
, tcg_op
, tcg_idx
,
12770 if (opcode
== 0x5) {
12771 /* As usual for ARM, separate negation for
12772 * fused multiply-add */
12773 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80000000);
12775 gen_helper_vfp_muladds(tcg_res
, tcg_op
, tcg_idx
,
12779 g_assert_not_reached();
12782 case 0x09: /* FMUL */
12786 gen_helper_advsimd_mulh(tcg_res
, tcg_op
,
12789 gen_helper_advsimd_mul2h(tcg_res
, tcg_op
,
12794 gen_helper_vfp_muls(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12797 g_assert_not_reached();
12800 case 0x19: /* FMULX */
12804 gen_helper_advsimd_mulxh(tcg_res
, tcg_op
,
12807 gen_helper_advsimd_mulx2h(tcg_res
, tcg_op
,
12812 gen_helper_vfp_mulxs(tcg_res
, tcg_op
, tcg_idx
, fpst
);
12815 g_assert_not_reached();
12818 case 0x0c: /* SQDMULH */
12820 gen_helper_neon_qdmulh_s16(tcg_res
, cpu_env
,
12823 gen_helper_neon_qdmulh_s32(tcg_res
, cpu_env
,
12827 case 0x0d: /* SQRDMULH */
12829 gen_helper_neon_qrdmulh_s16(tcg_res
, cpu_env
,
12832 gen_helper_neon_qrdmulh_s32(tcg_res
, cpu_env
,
12836 case 0x1d: /* SQRDMLAH */
12837 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
12838 is_scalar
? size
: MO_32
);
12840 gen_helper_neon_qrdmlah_s16(tcg_res
, cpu_env
,
12841 tcg_op
, tcg_idx
, tcg_res
);
12843 gen_helper_neon_qrdmlah_s32(tcg_res
, cpu_env
,
12844 tcg_op
, tcg_idx
, tcg_res
);
12847 case 0x1f: /* SQRDMLSH */
12848 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
12849 is_scalar
? size
: MO_32
);
12851 gen_helper_neon_qrdmlsh_s16(tcg_res
, cpu_env
,
12852 tcg_op
, tcg_idx
, tcg_res
);
12854 gen_helper_neon_qrdmlsh_s32(tcg_res
, cpu_env
,
12855 tcg_op
, tcg_idx
, tcg_res
);
12859 g_assert_not_reached();
12863 write_fp_sreg(s
, rd
, tcg_res
);
12865 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12868 tcg_temp_free_i32(tcg_op
);
12869 tcg_temp_free_i32(tcg_res
);
12872 tcg_temp_free_i32(tcg_idx
);
12873 clear_vec_high(s
, is_q
, rd
);
12875 /* long ops: 16x16->32 or 32x32->64 */
12876 TCGv_i64 tcg_res
[2];
12878 bool satop
= extract32(opcode
, 0, 1);
12879 TCGMemOp memop
= MO_32
;
12886 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
12888 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
12890 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12891 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12892 TCGv_i64 tcg_passres
;
12898 passelt
= pass
+ (is_q
* 2);
12901 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
12903 tcg_res
[pass
] = tcg_temp_new_i64();
12905 if (opcode
== 0xa || opcode
== 0xb) {
12906 /* Non-accumulating ops */
12907 tcg_passres
= tcg_res
[pass
];
12909 tcg_passres
= tcg_temp_new_i64();
12912 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
12913 tcg_temp_free_i64(tcg_op
);
12916 /* saturating, doubling */
12917 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
12918 tcg_passres
, tcg_passres
);
12921 if (opcode
== 0xa || opcode
== 0xb) {
12925 /* Accumulating op: handle accumulate step */
12926 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12929 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12930 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
12932 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12933 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
12935 case 0x7: /* SQDMLSL, SQDMLSL2 */
12936 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
12938 case 0x3: /* SQDMLAL, SQDMLAL2 */
12939 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
12944 g_assert_not_reached();
12946 tcg_temp_free_i64(tcg_passres
);
12948 tcg_temp_free_i64(tcg_idx
);
12950 clear_vec_high(s
, !is_scalar
, rd
);
12952 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
12955 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
12958 /* The simplest way to handle the 16x16 indexed ops is to
12959 * duplicate the index into both halves of the 32 bit tcg_idx
12960 * and then use the usual Neon helpers.
12962 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
12965 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12966 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12967 TCGv_i64 tcg_passres
;
12970 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
12972 read_vec_element_i32(s
, tcg_op
, rn
,
12973 pass
+ (is_q
* 2), MO_32
);
12976 tcg_res
[pass
] = tcg_temp_new_i64();
12978 if (opcode
== 0xa || opcode
== 0xb) {
12979 /* Non-accumulating ops */
12980 tcg_passres
= tcg_res
[pass
];
12982 tcg_passres
= tcg_temp_new_i64();
12985 if (memop
& MO_SIGN
) {
12986 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
12988 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
12991 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
12992 tcg_passres
, tcg_passres
);
12994 tcg_temp_free_i32(tcg_op
);
12996 if (opcode
== 0xa || opcode
== 0xb) {
13000 /* Accumulating op: handle accumulate step */
13001 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13004 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13005 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
13008 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13009 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
13012 case 0x7: /* SQDMLSL, SQDMLSL2 */
13013 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
13015 case 0x3: /* SQDMLAL, SQDMLAL2 */
13016 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
13021 g_assert_not_reached();
13023 tcg_temp_free_i64(tcg_passres
);
13025 tcg_temp_free_i32(tcg_idx
);
13028 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
13033 tcg_res
[1] = tcg_const_i64(0);
13036 for (pass
= 0; pass
< 2; pass
++) {
13037 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13038 tcg_temp_free_i64(tcg_res
[pass
]);
13043 tcg_temp_free_ptr(fpst
);
13048 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13049 * +-----------------+------+-----------+--------+-----+------+------+
13050 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13051 * +-----------------+------+-----------+--------+-----+------+------+
13053 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
13055 int size
= extract32(insn
, 22, 2);
13056 int opcode
= extract32(insn
, 12, 5);
13057 int rn
= extract32(insn
, 5, 5);
13058 int rd
= extract32(insn
, 0, 5);
13060 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
;
13061 TCGv_i32 tcg_decrypt
;
13062 CryptoThreeOpIntFn
*genfn
;
13064 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
13066 unallocated_encoding(s
);
13071 case 0x4: /* AESE */
13073 genfn
= gen_helper_crypto_aese
;
13075 case 0x6: /* AESMC */
13077 genfn
= gen_helper_crypto_aesmc
;
13079 case 0x5: /* AESD */
13081 genfn
= gen_helper_crypto_aese
;
13083 case 0x7: /* AESIMC */
13085 genfn
= gen_helper_crypto_aesmc
;
13088 unallocated_encoding(s
);
13092 if (!fp_access_check(s
)) {
13096 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13097 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13098 tcg_decrypt
= tcg_const_i32(decrypt
);
13100 genfn(tcg_rd_ptr
, tcg_rn_ptr
, tcg_decrypt
);
13102 tcg_temp_free_ptr(tcg_rd_ptr
);
13103 tcg_temp_free_ptr(tcg_rn_ptr
);
13104 tcg_temp_free_i32(tcg_decrypt
);
13107 /* Crypto three-reg SHA
13108 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
13109 * +-----------------+------+---+------+---+--------+-----+------+------+
13110 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
13111 * +-----------------+------+---+------+---+--------+-----+------+------+
13113 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
13115 int size
= extract32(insn
, 22, 2);
13116 int opcode
= extract32(insn
, 12, 3);
13117 int rm
= extract32(insn
, 16, 5);
13118 int rn
= extract32(insn
, 5, 5);
13119 int rd
= extract32(insn
, 0, 5);
13120 CryptoThreeOpFn
*genfn
;
13121 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
;
13122 int feature
= ARM_FEATURE_V8_SHA256
;
13125 unallocated_encoding(s
);
13130 case 0: /* SHA1C */
13131 case 1: /* SHA1P */
13132 case 2: /* SHA1M */
13133 case 3: /* SHA1SU0 */
13135 feature
= ARM_FEATURE_V8_SHA1
;
13137 case 4: /* SHA256H */
13138 genfn
= gen_helper_crypto_sha256h
;
13140 case 5: /* SHA256H2 */
13141 genfn
= gen_helper_crypto_sha256h2
;
13143 case 6: /* SHA256SU1 */
13144 genfn
= gen_helper_crypto_sha256su1
;
13147 unallocated_encoding(s
);
13151 if (!arm_dc_feature(s
, feature
)) {
13152 unallocated_encoding(s
);
13156 if (!fp_access_check(s
)) {
13160 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13161 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13162 tcg_rm_ptr
= vec_full_reg_ptr(s
, rm
);
13165 genfn(tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
);
13167 TCGv_i32 tcg_opcode
= tcg_const_i32(opcode
);
13169 gen_helper_crypto_sha1_3reg(tcg_rd_ptr
, tcg_rn_ptr
,
13170 tcg_rm_ptr
, tcg_opcode
);
13171 tcg_temp_free_i32(tcg_opcode
);
13174 tcg_temp_free_ptr(tcg_rd_ptr
);
13175 tcg_temp_free_ptr(tcg_rn_ptr
);
13176 tcg_temp_free_ptr(tcg_rm_ptr
);
13179 /* Crypto two-reg SHA
13180 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13181 * +-----------------+------+-----------+--------+-----+------+------+
13182 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13183 * +-----------------+------+-----------+--------+-----+------+------+
13185 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
13187 int size
= extract32(insn
, 22, 2);
13188 int opcode
= extract32(insn
, 12, 5);
13189 int rn
= extract32(insn
, 5, 5);
13190 int rd
= extract32(insn
, 0, 5);
13191 CryptoTwoOpFn
*genfn
;
13193 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
;
13196 unallocated_encoding(s
);
13201 case 0: /* SHA1H */
13202 feature
= ARM_FEATURE_V8_SHA1
;
13203 genfn
= gen_helper_crypto_sha1h
;
13205 case 1: /* SHA1SU1 */
13206 feature
= ARM_FEATURE_V8_SHA1
;
13207 genfn
= gen_helper_crypto_sha1su1
;
13209 case 2: /* SHA256SU0 */
13210 feature
= ARM_FEATURE_V8_SHA256
;
13211 genfn
= gen_helper_crypto_sha256su0
;
13214 unallocated_encoding(s
);
13218 if (!arm_dc_feature(s
, feature
)) {
13219 unallocated_encoding(s
);
13223 if (!fp_access_check(s
)) {
13227 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13228 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13230 genfn(tcg_rd_ptr
, tcg_rn_ptr
);
13232 tcg_temp_free_ptr(tcg_rd_ptr
);
13233 tcg_temp_free_ptr(tcg_rn_ptr
);
13236 /* Crypto three-reg SHA512
13237 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13238 * +-----------------------+------+---+---+-----+--------+------+------+
13239 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13240 * +-----------------------+------+---+---+-----+--------+------+------+
13242 static void disas_crypto_three_reg_sha512(DisasContext
*s
, uint32_t insn
)
13244 int opcode
= extract32(insn
, 10, 2);
13245 int o
= extract32(insn
, 14, 1);
13246 int rm
= extract32(insn
, 16, 5);
13247 int rn
= extract32(insn
, 5, 5);
13248 int rd
= extract32(insn
, 0, 5);
13250 CryptoThreeOpFn
*genfn
;
13254 case 0: /* SHA512H */
13255 feature
= ARM_FEATURE_V8_SHA512
;
13256 genfn
= gen_helper_crypto_sha512h
;
13258 case 1: /* SHA512H2 */
13259 feature
= ARM_FEATURE_V8_SHA512
;
13260 genfn
= gen_helper_crypto_sha512h2
;
13262 case 2: /* SHA512SU1 */
13263 feature
= ARM_FEATURE_V8_SHA512
;
13264 genfn
= gen_helper_crypto_sha512su1
;
13267 feature
= ARM_FEATURE_V8_SHA3
;
13273 case 0: /* SM3PARTW1 */
13274 feature
= ARM_FEATURE_V8_SM3
;
13275 genfn
= gen_helper_crypto_sm3partw1
;
13277 case 1: /* SM3PARTW2 */
13278 feature
= ARM_FEATURE_V8_SM3
;
13279 genfn
= gen_helper_crypto_sm3partw2
;
13281 case 2: /* SM4EKEY */
13282 feature
= ARM_FEATURE_V8_SM4
;
13283 genfn
= gen_helper_crypto_sm4ekey
;
13286 unallocated_encoding(s
);
13291 if (!arm_dc_feature(s
, feature
)) {
13292 unallocated_encoding(s
);
13296 if (!fp_access_check(s
)) {
13301 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
;
13303 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13304 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13305 tcg_rm_ptr
= vec_full_reg_ptr(s
, rm
);
13307 genfn(tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
);
13309 tcg_temp_free_ptr(tcg_rd_ptr
);
13310 tcg_temp_free_ptr(tcg_rn_ptr
);
13311 tcg_temp_free_ptr(tcg_rm_ptr
);
13313 TCGv_i64 tcg_op1
, tcg_op2
, tcg_res
[2];
13316 tcg_op1
= tcg_temp_new_i64();
13317 tcg_op2
= tcg_temp_new_i64();
13318 tcg_res
[0] = tcg_temp_new_i64();
13319 tcg_res
[1] = tcg_temp_new_i64();
13321 for (pass
= 0; pass
< 2; pass
++) {
13322 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13323 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13325 tcg_gen_rotli_i64(tcg_res
[pass
], tcg_op2
, 1);
13326 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
13328 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13329 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13331 tcg_temp_free_i64(tcg_op1
);
13332 tcg_temp_free_i64(tcg_op2
);
13333 tcg_temp_free_i64(tcg_res
[0]);
13334 tcg_temp_free_i64(tcg_res
[1]);
13338 /* Crypto two-reg SHA512
13339 * 31 12 11 10 9 5 4 0
13340 * +-----------------------------------------+--------+------+------+
13341 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13342 * +-----------------------------------------+--------+------+------+
13344 static void disas_crypto_two_reg_sha512(DisasContext
*s
, uint32_t insn
)
13346 int opcode
= extract32(insn
, 10, 2);
13347 int rn
= extract32(insn
, 5, 5);
13348 int rd
= extract32(insn
, 0, 5);
13349 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
;
13351 CryptoTwoOpFn
*genfn
;
13354 case 0: /* SHA512SU0 */
13355 feature
= ARM_FEATURE_V8_SHA512
;
13356 genfn
= gen_helper_crypto_sha512su0
;
13359 feature
= ARM_FEATURE_V8_SM4
;
13360 genfn
= gen_helper_crypto_sm4e
;
13363 unallocated_encoding(s
);
13367 if (!arm_dc_feature(s
, feature
)) {
13368 unallocated_encoding(s
);
13372 if (!fp_access_check(s
)) {
13376 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13377 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13379 genfn(tcg_rd_ptr
, tcg_rn_ptr
);
13381 tcg_temp_free_ptr(tcg_rd_ptr
);
13382 tcg_temp_free_ptr(tcg_rn_ptr
);
13385 /* Crypto four-register
13386 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13387 * +-------------------+-----+------+---+------+------+------+
13388 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13389 * +-------------------+-----+------+---+------+------+------+
13391 static void disas_crypto_four_reg(DisasContext
*s
, uint32_t insn
)
13393 int op0
= extract32(insn
, 21, 2);
13394 int rm
= extract32(insn
, 16, 5);
13395 int ra
= extract32(insn
, 10, 5);
13396 int rn
= extract32(insn
, 5, 5);
13397 int rd
= extract32(insn
, 0, 5);
13403 feature
= ARM_FEATURE_V8_SHA3
;
13405 case 2: /* SM3SS1 */
13406 feature
= ARM_FEATURE_V8_SM3
;
13409 unallocated_encoding(s
);
13413 if (!arm_dc_feature(s
, feature
)) {
13414 unallocated_encoding(s
);
13418 if (!fp_access_check(s
)) {
13423 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
[2];
13426 tcg_op1
= tcg_temp_new_i64();
13427 tcg_op2
= tcg_temp_new_i64();
13428 tcg_op3
= tcg_temp_new_i64();
13429 tcg_res
[0] = tcg_temp_new_i64();
13430 tcg_res
[1] = tcg_temp_new_i64();
13432 for (pass
= 0; pass
< 2; pass
++) {
13433 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13434 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13435 read_vec_element(s
, tcg_op3
, ra
, pass
, MO_64
);
13439 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13442 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
13444 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
13446 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13447 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13449 tcg_temp_free_i64(tcg_op1
);
13450 tcg_temp_free_i64(tcg_op2
);
13451 tcg_temp_free_i64(tcg_op3
);
13452 tcg_temp_free_i64(tcg_res
[0]);
13453 tcg_temp_free_i64(tcg_res
[1]);
13455 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
, tcg_zero
;
13457 tcg_op1
= tcg_temp_new_i32();
13458 tcg_op2
= tcg_temp_new_i32();
13459 tcg_op3
= tcg_temp_new_i32();
13460 tcg_res
= tcg_temp_new_i32();
13461 tcg_zero
= tcg_const_i32(0);
13463 read_vec_element_i32(s
, tcg_op1
, rn
, 3, MO_32
);
13464 read_vec_element_i32(s
, tcg_op2
, rm
, 3, MO_32
);
13465 read_vec_element_i32(s
, tcg_op3
, ra
, 3, MO_32
);
13467 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
13468 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
13469 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
13470 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
13472 write_vec_element_i32(s
, tcg_zero
, rd
, 0, MO_32
);
13473 write_vec_element_i32(s
, tcg_zero
, rd
, 1, MO_32
);
13474 write_vec_element_i32(s
, tcg_zero
, rd
, 2, MO_32
);
13475 write_vec_element_i32(s
, tcg_res
, rd
, 3, MO_32
);
13477 tcg_temp_free_i32(tcg_op1
);
13478 tcg_temp_free_i32(tcg_op2
);
13479 tcg_temp_free_i32(tcg_op3
);
13480 tcg_temp_free_i32(tcg_res
);
13481 tcg_temp_free_i32(tcg_zero
);
13486 * 31 21 20 16 15 10 9 5 4 0
13487 * +-----------------------+------+--------+------+------+
13488 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13489 * +-----------------------+------+--------+------+------+
13491 static void disas_crypto_xar(DisasContext
*s
, uint32_t insn
)
13493 int rm
= extract32(insn
, 16, 5);
13494 int imm6
= extract32(insn
, 10, 6);
13495 int rn
= extract32(insn
, 5, 5);
13496 int rd
= extract32(insn
, 0, 5);
13497 TCGv_i64 tcg_op1
, tcg_op2
, tcg_res
[2];
13500 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA3
)) {
13501 unallocated_encoding(s
);
13505 if (!fp_access_check(s
)) {
13509 tcg_op1
= tcg_temp_new_i64();
13510 tcg_op2
= tcg_temp_new_i64();
13511 tcg_res
[0] = tcg_temp_new_i64();
13512 tcg_res
[1] = tcg_temp_new_i64();
13514 for (pass
= 0; pass
< 2; pass
++) {
13515 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
13516 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
13518 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
13519 tcg_gen_rotri_i64(tcg_res
[pass
], tcg_res
[pass
], imm6
);
13521 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
13522 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
13524 tcg_temp_free_i64(tcg_op1
);
13525 tcg_temp_free_i64(tcg_op2
);
13526 tcg_temp_free_i64(tcg_res
[0]);
13527 tcg_temp_free_i64(tcg_res
[1]);
13530 /* Crypto three-reg imm2
13531 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13532 * +-----------------------+------+-----+------+--------+------+------+
13533 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13534 * +-----------------------+------+-----+------+--------+------+------+
13536 static void disas_crypto_three_reg_imm2(DisasContext
*s
, uint32_t insn
)
13538 int opcode
= extract32(insn
, 10, 2);
13539 int imm2
= extract32(insn
, 12, 2);
13540 int rm
= extract32(insn
, 16, 5);
13541 int rn
= extract32(insn
, 5, 5);
13542 int rd
= extract32(insn
, 0, 5);
13543 TCGv_ptr tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
;
13544 TCGv_i32 tcg_imm2
, tcg_opcode
;
13546 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SM3
)) {
13547 unallocated_encoding(s
);
13551 if (!fp_access_check(s
)) {
13555 tcg_rd_ptr
= vec_full_reg_ptr(s
, rd
);
13556 tcg_rn_ptr
= vec_full_reg_ptr(s
, rn
);
13557 tcg_rm_ptr
= vec_full_reg_ptr(s
, rm
);
13558 tcg_imm2
= tcg_const_i32(imm2
);
13559 tcg_opcode
= tcg_const_i32(opcode
);
13561 gen_helper_crypto_sm3tt(tcg_rd_ptr
, tcg_rn_ptr
, tcg_rm_ptr
, tcg_imm2
,
13564 tcg_temp_free_ptr(tcg_rd_ptr
);
13565 tcg_temp_free_ptr(tcg_rn_ptr
);
13566 tcg_temp_free_ptr(tcg_rm_ptr
);
13567 tcg_temp_free_i32(tcg_imm2
);
13568 tcg_temp_free_i32(tcg_opcode
);
13571 /* C3.6 Data processing - SIMD, inc Crypto
13573 * As the decode gets a little complex we are using a table based
13574 * approach for this part of the decode.
13576 static const AArch64DecodeTable data_proc_simd
[] = {
13577 /* pattern , mask , fn */
13578 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
13579 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
13580 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
13581 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
13582 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
13583 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
13584 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
13585 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13586 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
13587 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
13588 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
13589 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
13590 { 0x2e000000, 0xbf208400, disas_simd_ext
},
13591 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
13592 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra
},
13593 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
13594 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
13595 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
13596 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
13597 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
13598 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
13599 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
13600 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
13601 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
13602 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512
},
13603 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512
},
13604 { 0xce000000, 0xff808000, disas_crypto_four_reg
},
13605 { 0xce800000, 0xffe00000, disas_crypto_xar
},
13606 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2
},
13607 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16
},
13608 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
13609 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16
},
13610 { 0x00000000, 0x00000000, NULL
}
13613 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
13615 /* Note that this is called with all non-FP cases from
13616 * table C3-6 so it must UNDEF for entries not specifically
13617 * allocated to instructions in that table.
13619 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
13623 unallocated_encoding(s
);
13627 /* C3.6 Data processing - SIMD and floating point */
13628 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
13630 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
13631 disas_data_proc_fp(s
, insn
);
13633 /* SIMD, including crypto */
13634 disas_data_proc_simd(s
, insn
);
13638 /* C3.1 A64 instruction index by encoding */
13639 static void disas_a64_insn(CPUARMState
*env
, DisasContext
*s
)
13643 insn
= arm_ldl_code(env
, s
->pc
, s
->sctlr_b
);
13647 s
->fp_access_checked
= false;
13649 switch (extract32(insn
, 25, 4)) {
13650 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
13651 unallocated_encoding(s
);
13653 case 0x8: case 0x9: /* Data processing - immediate */
13654 disas_data_proc_imm(s
, insn
);
13656 case 0xa: case 0xb: /* Branch, exception generation and system insns */
13657 disas_b_exc_sys(s
, insn
);
13662 case 0xe: /* Loads and stores */
13663 disas_ldst(s
, insn
);
13666 case 0xd: /* Data processing - register */
13667 disas_data_proc_reg(s
, insn
);
13670 case 0xf: /* Data processing - SIMD and floating point */
13671 disas_data_proc_simd_fp(s
, insn
);
13674 assert(FALSE
); /* all 15 cases should be handled above */
13678 /* if we allocated any temporaries, free them here */
13682 static void aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
13685 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13686 CPUARMState
*env
= cpu
->env_ptr
;
13687 ARMCPU
*arm_cpu
= arm_env_get_cpu(env
);
13690 dc
->pc
= dc
->base
.pc_first
;
13694 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13695 * there is no secure EL1, so we route exceptions to EL3.
13697 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
13698 !arm_el_is_aa64(env
, 3);
13701 dc
->be_data
= ARM_TBFLAG_BE_DATA(dc
->base
.tb
->flags
) ? MO_BE
: MO_LE
;
13702 dc
->condexec_mask
= 0;
13703 dc
->condexec_cond
= 0;
13704 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, ARM_TBFLAG_MMUIDX(dc
->base
.tb
->flags
));
13705 dc
->tbi0
= ARM_TBFLAG_TBI0(dc
->base
.tb
->flags
);
13706 dc
->tbi1
= ARM_TBFLAG_TBI1(dc
->base
.tb
->flags
);
13707 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
13708 #if !defined(CONFIG_USER_ONLY)
13709 dc
->user
= (dc
->current_el
== 0);
13711 dc
->fp_excp_el
= ARM_TBFLAG_FPEXC_EL(dc
->base
.tb
->flags
);
13712 dc
->sve_excp_el
= ARM_TBFLAG_SVEEXC_EL(dc
->base
.tb
->flags
);
13713 dc
->sve_len
= (ARM_TBFLAG_ZCR_LEN(dc
->base
.tb
->flags
) + 1) * 16;
13715 dc
->vec_stride
= 0;
13716 dc
->cp_regs
= arm_cpu
->cp_regs
;
13717 dc
->features
= env
->features
;
13719 /* Single step state. The code-generation logic here is:
13721 * generate code with no special handling for single-stepping (except
13722 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13723 * this happens anyway because those changes are all system register or
13725 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13726 * emit code for one insn
13727 * emit code to clear PSTATE.SS
13728 * emit code to generate software step exception for completed step
13729 * end TB (as usual for having generated an exception)
13730 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13731 * emit code to generate a software step exception
13734 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(dc
->base
.tb
->flags
);
13735 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(dc
->base
.tb
->flags
);
13736 dc
->is_ldex
= false;
13737 dc
->ss_same_el
= (arm_debug_target_el(env
) == dc
->current_el
);
13739 /* Bound the number of insns to execute to those left on the page. */
13740 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
13742 /* If architectural single step active, limit to 1. */
13743 if (dc
->ss_active
) {
13746 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
13748 init_tmp_a64_array(dc
);
13751 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
13753 tcg_clear_temp_count();
13756 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
13758 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13760 tcg_gen_insn_start(dc
->pc
, 0, 0);
13761 dc
->insn_start
= tcg_last_op();
13764 static bool aarch64_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
13765 const CPUBreakpoint
*bp
)
13767 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13769 if (bp
->flags
& BP_CPU
) {
13770 gen_a64_set_pc_im(dc
->pc
);
13771 gen_helper_check_breakpoints(cpu_env
);
13772 /* End the TB early; it likely won't be executed */
13773 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
13775 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
13776 /* The address covered by the breakpoint must be
13777 included in [tb->pc, tb->pc + tb->size) in order
13778 to for it to be properly cleared -- thus we
13779 increment the PC here so that the logic setting
13780 tb->size below does the right thing. */
13782 dc
->base
.is_jmp
= DISAS_NORETURN
;
13788 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
13790 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13791 CPUARMState
*env
= cpu
->env_ptr
;
13793 if (dc
->ss_active
&& !dc
->pstate_ss
) {
13794 /* Singlestep state is Active-pending.
13795 * If we're in this state at the start of a TB then either
13796 * a) we just took an exception to an EL which is being debugged
13797 * and this is the first insn in the exception handler
13798 * b) debug exceptions were masked and we just unmasked them
13799 * without changing EL (eg by clearing PSTATE.D)
13800 * In either case we're going to take a swstep exception in the
13801 * "did not step an insn" case, and so the syndrome ISV and EX
13802 * bits should be zero.
13804 assert(dc
->base
.num_insns
== 1);
13805 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
13806 default_exception_el(dc
));
13807 dc
->base
.is_jmp
= DISAS_NORETURN
;
13809 disas_a64_insn(env
, dc
);
13812 dc
->base
.pc_next
= dc
->pc
;
13813 translator_loop_temp_check(&dc
->base
);
13816 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
13818 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13820 if (unlikely(dc
->base
.singlestep_enabled
|| dc
->ss_active
)) {
13821 /* Note that this means single stepping WFI doesn't halt the CPU.
13822 * For conditional branch insns this is harmless unreachable code as
13823 * gen_goto_tb() has already handled emitting the debug exception
13824 * (and thus a tb-jump is not possible when singlestepping).
13826 switch (dc
->base
.is_jmp
) {
13828 gen_a64_set_pc_im(dc
->pc
);
13832 if (dc
->base
.singlestep_enabled
) {
13833 gen_exception_internal(EXCP_DEBUG
);
13835 gen_step_complete_exception(dc
);
13838 case DISAS_NORETURN
:
13842 switch (dc
->base
.is_jmp
) {
13844 case DISAS_TOO_MANY
:
13845 gen_goto_tb(dc
, 1, dc
->pc
);
13849 gen_a64_set_pc_im(dc
->pc
);
13852 tcg_gen_exit_tb(0);
13855 tcg_gen_lookup_and_goto_ptr();
13857 case DISAS_NORETURN
:
13861 gen_a64_set_pc_im(dc
->pc
);
13862 gen_helper_wfe(cpu_env
);
13865 gen_a64_set_pc_im(dc
->pc
);
13866 gen_helper_yield(cpu_env
);
13870 /* This is a special case because we don't want to just halt the CPU
13871 * if trying to debug across a WFI.
13873 TCGv_i32 tmp
= tcg_const_i32(4);
13875 gen_a64_set_pc_im(dc
->pc
);
13876 gen_helper_wfi(cpu_env
, tmp
);
13877 tcg_temp_free_i32(tmp
);
13878 /* The helper doesn't necessarily throw an exception, but we
13879 * must go back to the main loop to check for interrupts anyway.
13881 tcg_gen_exit_tb(0);
13887 /* Functions above can change dc->pc, so re-align db->pc_next */
13888 dc
->base
.pc_next
= dc
->pc
;
13891 static void aarch64_tr_disas_log(const DisasContextBase
*dcbase
,
13894 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13896 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
13897 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
13900 const TranslatorOps aarch64_translator_ops
= {
13901 .init_disas_context
= aarch64_tr_init_disas_context
,
13902 .tb_start
= aarch64_tr_tb_start
,
13903 .insn_start
= aarch64_tr_insn_start
,
13904 .breakpoint_check
= aarch64_tr_breakpoint_check
,
13905 .translate_insn
= aarch64_tr_translate_insn
,
13906 .tb_stop
= aarch64_tr_tb_stop
,
13907 .disas_log
= aarch64_tr_disas_log
,